Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -371,33 +371,53 @@ public Response get(
@PathParam(BUCKET) String bucketName,
@PathParam(PATH) String keyPath
) throws IOException, OS3Exception {
ObjectRequestContext context =
new ObjectRequestContext(S3GAction.GET_KEY, bucketName);
try {
return handler.handleGetRequest(context, keyPath);
} catch (OMException ex) {
if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) {
throw newError(S3ErrorTable.NO_SUCH_KEY, keyPath, ex);
} else if (isAccessDenied(ex)) {
throw newError(S3ErrorTable.ACCESS_DENIED, keyPath, ex);
} else if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) {
throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName, ex);
} else {
throw ex;
}
}
}

@Override
@SuppressWarnings("checkstyle:MethodLength")
Response handleGetRequest(ObjectRequestContext context, String keyPath)
throws IOException, OS3Exception {

final int maxParts = queryParams().getInt(QueryParams.MAX_PARTS, 1000);
final int partNumber = queryParams().getInt(QueryParams.PART_NUMBER, 0);
final String partNumberMarker = queryParams().get(QueryParams.PART_NUMBER_MARKER);
final String taggingMarker = queryParams().get(QueryParams.TAGGING);
final String uploadId = queryParams().get(QueryParams.UPLOAD_ID);

long startNanos = Time.monotonicNowNanos();
S3GAction s3GAction = S3GAction.GET_KEY;
PerformanceStringBuilder perf = new PerformanceStringBuilder();
final long startNanos = context.getStartNanos();
final PerformanceStringBuilder perf = context.getPerf();

try {
OzoneBucket bucket = getBucket(bucketName);
S3Owner.verifyBucketOwnerCondition(getHeaders(), bucketName, bucket.getOwner());
if (taggingMarker != null) {
s3GAction = S3GAction.GET_OBJECT_TAGGING;
return getObjectTagging(bucket, keyPath);
}
final String bucketName = context.getBucketName();
final OzoneBucket bucket = context.getBucket();

if (uploadId != null) {
// When we have uploadId, this is the request for list Parts.
s3GAction = S3GAction.LIST_PARTS;
// list parts
context.setAction(S3GAction.LIST_PARTS);

int partMarker = parsePartNumberMarker(partNumberMarker);
Response response = listParts(bucket, keyPath, uploadId,
partMarker, maxParts, perf);
auditReadSuccess(s3GAction, perf);

return response;
}

context.setAction(S3GAction.GET_KEY);

OzoneKeyDetails keyDetails = (partNumber != 0) ?
getClientProtocol().getS3KeyDetails(bucketName, keyPath, partNumber) :
getClientProtocol().getS3KeyDetails(bucketName, keyPath);
Expand All @@ -414,13 +434,13 @@ public Response get(
LOG.debug("range Header provided value: {}", rangeHeaderVal);

if (rangeHeaderVal != null) {
rangeHeader = RangeHeaderParserUtil.parseRangeHeader(rangeHeaderVal,
length);
rangeHeader = RangeHeaderParserUtil.parseRangeHeader(rangeHeaderVal, length);
LOG.debug("range Header provided: {}", rangeHeader);
if (rangeHeader.isInValidRange()) {
throw newError(S3ErrorTable.INVALID_RANGE, rangeHeaderVal);
}
}

ResponseBuilder responseBuilder;

if (rangeHeaderVal == null || rangeHeader.isReadFull()) {
Expand All @@ -430,21 +450,18 @@ public Response get(
getMetrics().incGetKeySuccessLength(readLength);
perf.appendSizeBytes(readLength);
}
long opLatencyNs = getMetrics().updateGetKeySuccessStats(startNanos);
long opLatencyNs = getMetrics().updateGetKeySuccessStats(startNanos);
perf.appendOpLatencyNanos(opLatencyNs);
auditReadSuccess(S3GAction.GET_KEY, perf);
};
responseBuilder = Response
.ok(output)

responseBuilder = Response.ok(output)
.header(HttpHeaders.CONTENT_LENGTH, keyDetails.getDataSize());

} else {

long startOffset = rangeHeader.getStartOffset();
long endOffset = rangeHeader.getEndOffset();
// eg. if range header is given as bytes=0-0, then we should return 1
// byte from start offset
long copyLength = endOffset - startOffset + 1;

StreamingOutput output = dest -> {
try (OzoneInputStream ozoneInputStream = keyDetails.getContent()) {
ozoneInputStream.seek(startOffset);
Expand All @@ -455,21 +472,18 @@ public Response get(
}
long opLatencyNs = getMetrics().updateGetKeySuccessStats(startNanos);
perf.appendOpLatencyNanos(opLatencyNs);
auditReadSuccess(S3GAction.GET_KEY, perf);
};
responseBuilder = Response
.status(Status.PARTIAL_CONTENT)

responseBuilder = Response.status(Status.PARTIAL_CONTENT)
.entity(output)
.header(HttpHeaders.CONTENT_LENGTH, copyLength);

String contentRangeVal = RANGE_HEADER_SUPPORTED_UNIT + " " +
rangeHeader.getStartOffset() + "-" + rangeHeader.getEndOffset() +
"/" + length;

startOffset + "-" + endOffset + "/" + length;
responseBuilder.header(CONTENT_RANGE_HEADER, contentRangeVal);
}
responseBuilder
.header(ACCEPT_RANGE_HEADER, RANGE_HEADER_SUPPORTED_UNIT);

responseBuilder.header(ACCEPT_RANGE_HEADER, RANGE_HEADER_SUPPORTED_UNIT);

String eTag = keyDetails.getMetadata().get(OzoneConsts.ETAG);
if (eTag != null) {
Expand All @@ -480,21 +494,11 @@ public Response get(
}
}

// if multiple query parameters having same name,
// Only the first parameters will be recognized
// eg:
// http://localhost:9878/bucket/key?response-expires=1&response-expires=2
// only response-expires=1 is valid
MultivaluedMap<String, String> queryParams = getContext()
.getUriInfo().getQueryParameters();
MultivaluedMap<String, String> queryParams =
getContext().getUriInfo().getQueryParameters();

for (Map.Entry<String, String> entry :
overrideQueryParameter.entrySet()) {
for (Map.Entry<String, String> entry : overrideQueryParameter.entrySet()) {
String headerValue = getHeaders().getHeaderString(entry.getKey());

/* "Overriding Response Header" by query parameter, See:
https://docs.aws.amazon.com/de_de/AmazonS3/latest/API/API_GetObject.html
*/
String queryValue = queryParams.getFirst(entry.getValue());
if (queryValue != null) {
headerValue = queryValue;
Expand All @@ -503,32 +507,19 @@ public Response get(
responseBuilder.header(entry.getKey(), headerValue);
}
}

addLastModifiedDate(responseBuilder, keyDetails);
addTagCountIfAny(responseBuilder, keyDetails);
long metadataLatencyNs =
getMetrics().updateGetKeyMetadataStats(startNanos);

long metadataLatencyNs = getMetrics().updateGetKeyMetadataStats(startNanos);
perf.appendMetaLatencyNanos(metadataLatencyNs);

return responseBuilder.build();
} catch (OMException ex) {
auditReadFailure(s3GAction, ex);
if (taggingMarker != null) {
getMetrics().updateGetObjectTaggingFailureStats(startNanos);
} else if (uploadId != null) {
getMetrics().updateListPartsFailureStats(startNanos);
} else {

} catch (IOException | RuntimeException ex) {
if (uploadId == null) {
getMetrics().updateGetKeyFailureStats(startNanos);
}
if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) {
throw newError(S3ErrorTable.NO_SUCH_KEY, keyPath, ex);
} else if (isAccessDenied(ex)) {
throw newError(S3ErrorTable.ACCESS_DENIED, keyPath, ex);
} else if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) {
throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName, ex);
} else {
throw ex;
}
} catch (Exception ex) {
auditReadFailure(s3GAction, ex);
throw ex;
}
}
Expand Down Expand Up @@ -685,46 +676,23 @@ public Response delete(
@PathParam(BUCKET) String bucketName,
@PathParam(PATH) String keyPath
) throws IOException, OS3Exception {
final String taggingMarker = queryParams().get(QueryParams.TAGGING);
final String uploadId = queryParams().get(QueryParams.UPLOAD_ID);

long startNanos = Time.monotonicNowNanos();
S3GAction s3GAction = S3GAction.DELETE_KEY;

ObjectRequestContext context =
new ObjectRequestContext(S3GAction.DELETE_KEY, bucketName);
try {
OzoneVolume volume = getVolume();
if (S3Owner.hasBucketOwnershipVerificationConditions(getHeaders())) {
OzoneBucket bucket = volume.getBucket(bucketName);
S3Owner.verifyBucketOwnerCondition(getHeaders(), bucketName, bucket.getOwner());
}
if (taggingMarker != null) {
s3GAction = S3GAction.DELETE_OBJECT_TAGGING;
return deleteObjectTagging(volume, bucketName, keyPath);
}

if (uploadId != null && !uploadId.equals("")) {
s3GAction = S3GAction.ABORT_MULTIPART_UPLOAD;
return abortMultipartUpload(volume, bucketName, keyPath, uploadId);
}
getClientProtocol().deleteKey(volume.getName(), bucketName,
keyPath, false);
return handler.handleDeleteRequest(context, keyPath);
} catch (OMException ex) {
auditWriteFailure(s3GAction, ex);
if (uploadId != null && !uploadId.equals("")) {
getMetrics().updateAbortMultipartUploadFailureStats(startNanos);
} else {
getMetrics().updateDeleteKeyFailureStats(startNanos);
}
if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) {
throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName, ex);
} else if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) {
//NOT_FOUND is not a problem, AWS doesn't throw exception for missing
// keys. Just return 204
return Response.status(Status.NO_CONTENT).build();
} else if (ex.getResult() == ResultCodes.DIRECTORY_NOT_EMPTY) {
// With PREFIX metadata layout, a dir deletion without recursive flag
// to true will throw DIRECTORY_NOT_EMPTY error for a non-empty dir.
// NOT_FOUND is not a problem, AWS doesn't throw exception for missing
// keys. Just return 204
return Response.status(Status.NO_CONTENT).build();
} else if (isAccessDenied(ex)) {
throw newError(S3ErrorTable.ACCESS_DENIED, keyPath, ex);
} else if (ex.getResult() == ResultCodes.NOT_SUPPORTED_OPERATION) {
Expand All @@ -733,22 +701,40 @@ public Response delete(
} else {
throw ex;
}
}
}

@Override
Response handleDeleteRequest(ObjectRequestContext context, String keyPath)
throws IOException, OS3Exception {

final String bucketName = context.getBucketName();
final long startNanos = context.startNanos;
final String uploadId = queryParams().get(QueryParams.UPLOAD_ID);

try {
OzoneVolume volume = context.getVolume();

if (uploadId != null && !uploadId.isEmpty()) {
context.setAction(S3GAction.ABORT_MULTIPART_UPLOAD);
Response r = abortMultipartUpload(volume, bucketName, keyPath, uploadId);

return r;
}

getClientProtocol().deleteKey(volume.getName(), context.getBucketName(), keyPath, false);

getMetrics().updateDeleteKeySuccessStats(startNanos);
return Response.status(Status.NO_CONTENT).build();

} catch (Exception ex) {
auditWriteFailure(s3GAction, ex);
if (taggingMarker != null) {
getMetrics().updateDeleteObjectTaggingFailureStats(startNanos);
} else if (uploadId != null && !uploadId.equals("")) {
if (uploadId != null && !uploadId.isEmpty()) {
getMetrics().updateAbortMultipartUploadFailureStats(startNanos);
} else {
getMetrics().updateDeleteKeyFailureStats(startNanos);
}
throw ex;
}
getMetrics().updateDeleteKeySuccessStats(startNanos);
auditWriteSuccess(s3GAction);
return Response
.status(Status.NO_CONTENT)
.build();
}

/**
Expand Down Expand Up @@ -1273,35 +1259,7 @@ private CopyObjectResponse copyObject(OzoneVolume volume,
}
}
}

private Response getObjectTagging(OzoneBucket bucket, String keyName) throws IOException {
long startNanos = Time.monotonicNowNanos();

Map<String, String> tagMap = bucket.getObjectTagging(keyName);

getMetrics().updateGetObjectTaggingSuccessStats(startNanos);
return Response.ok(S3Tagging.fromMap(tagMap), MediaType.APPLICATION_XML_TYPE).build();
}

private Response deleteObjectTagging(OzoneVolume volume, String bucketName, String keyName)
throws IOException, OS3Exception {
long startNanos = Time.monotonicNowNanos();

try {
volume.getBucket(bucketName).deleteObjectTagging(keyName);
} catch (OMException ex) {
// Unlike normal key deletion that ignores the key not found exception
// DeleteObjectTagging should throw the exception if the key does not exist
if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) {
throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_KEY, keyName);
}
throw ex;
}

getMetrics().updateDeleteObjectTaggingSuccessStats(startNanos);
return Response.noContent().build();
}


/** Request context shared among {@code ObjectOperationHandler}s. */
final class ObjectRequestContext {
private final String bucketName;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,11 @@
import java.io.InputStream;
import java.util.Map;
import javax.ws.rs.HttpMethod;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import org.apache.hadoop.ozone.audit.S3GAction;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
import org.apache.hadoop.ozone.s3.endpoint.ObjectEndpoint.ObjectRequestContext;
import org.apache.hadoop.ozone.s3.exception.OS3Exception;
import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
Expand Down Expand Up @@ -65,6 +68,41 @@ Response handlePutRequest(ObjectRequestContext context, String keyName, InputStr
}
}

@Override
Response handleDeleteRequest(ObjectRequestContext context, String keyName)
throws IOException, OS3Exception {
if (context.ignore(getAction())) {
return null;
}
try {
context.getBucket().deleteObjectTagging(keyName);
} catch (OMException ex) {
getMetrics().updateDeleteObjectTaggingFailureStats(context.getStartNanos());
if (ex.getResult() == ResultCodes.KEY_NOT_FOUND) {
throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_KEY, keyName);
}
throw ex;
}
getMetrics().updateDeleteObjectTaggingSuccessStats(context.getStartNanos());
return Response.noContent().build();
}

@Override
Response handleGetRequest(ObjectRequestContext context, String keyName)
throws IOException, OS3Exception {
if (context.ignore(getAction())) {
return null;
}
try {
Map<String, String> tagMap = context.getBucket().getObjectTagging(keyName);
getMetrics().updateGetObjectTaggingSuccessStats(context.getStartNanos());
return Response.ok(S3Tagging.fromMap(tagMap), MediaType.APPLICATION_XML_TYPE).build();
} catch (Exception e) {
getMetrics().updateGetObjectTaggingFailureStats(context.getStartNanos());
throw e;
}
}

private S3GAction getAction() {
if (queryParams().get(S3Consts.QueryParams.TAGGING) == null) {
return null;
Expand Down
Loading