Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add tests for needOplogUpdate #5727

Merged
merged 1 commit into from
Jan 31, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
146 changes: 109 additions & 37 deletions tests/unit/api/multipartUpload.js
Original file line number Diff line number Diff line change
Expand Up @@ -144,9 +144,47 @@ function _createCompleteMpuRequest(uploadId, parts) {
};
}

const _bucketPut = util.promisify(bucketPut);

async function _uploadMpuObject(params = {}) {
const _initiateMultipartUpload = async (...params) => {
const result = await util.promisify(initiateMultipartUpload)(...params);
const json = await parseStringPromise(result);
return json.InitiateMultipartUploadResult.UploadId[0];
};
const _objectPutPart = util.promisify(objectPutPart);
const _completeMultipartUpload = (...params) => util.promisify(cb =>
completeMultipartUpload(...params, (err, xml, headers) => cb(err, { xml, headers })))();

const headers = { ...initiateRequest.headers };
if (params.location) {
headers[constants.objectLocationConstraintHeader] = params.location;
}
if (params.versionID) {
headers['x-scal-s3-version-id'] = params.versionID;
}

const uploadId = await _initiateMultipartUpload(authInfo, { ...initiateRequest, headers }, log);

const partRequest = _createPutPartRequest(uploadId, 1, Buffer.from('I am a part\n', 'utf8'));
partRequest.headers = headers;
const eTag = await _objectPutPart(authInfo, partRequest, undefined, log);

const completeRequest = _createCompleteMpuRequest(uploadId, [{ partNumber: 1, eTag }]);
const resp = await _completeMultipartUpload(authInfo, { ...completeRequest, headers }, log);

return resp.headers;
}

describe('Multipart Upload API', () => {
beforeEach(() => {
cleanup();

sinon.spy(metadataswitch, 'putObjectMD');
});

afterEach(() => {
sinon.restore();
});

it('mpuBucketPrefix should be a defined constant', () => {
Expand Down Expand Up @@ -2071,6 +2109,70 @@ describe('Multipart Upload API', () => {
});
});
});

it('should not pass needOplogUpdate when writing new object', done => {
async.series([
next => bucketPut(authInfo, bucketPutRequest, log, next),
async () => _uploadMpuObject(),
async () => {
const options = metadataswitch.putObjectMD.lastCall.args[3];
assert.strictEqual(options.needOplogUpdate, undefined);
assert.strictEqual(options.originOp, undefined);
},
], done);
});

it('should not pass needOplogUpdate when replacing object', done => {
async.series([
next => bucketPut(authInfo, bucketPutRequest, log, next),
async () => _uploadMpuObject(),
async () => _uploadMpuObject(),
async () => {
const options = metadataswitch.putObjectMD.lastCall.args[3];
assert.strictEqual(options.needOplogUpdate, undefined);
assert.strictEqual(options.originOp, undefined);
},
], done);
});

it('should pass needOplogUpdate to metadata when replacing archived object', done => {
const archived = {
archiveInfo: { foo: 0, bar: 'stuff' }
};

async.series([
next => bucketPut(authInfo, bucketPutRequest, log, next),
async () => _uploadMpuObject(),
next => fakeMetadataArchive(bucketName, objectKey, undefined, archived, next),
async () => _uploadMpuObject(),
async () => {
const options = metadataswitch.putObjectMD.lastCall.args[3];
assert.strictEqual(options.needOplogUpdate, true);
assert.strictEqual(options.originOp, 's3:ReplaceArchivedObject');
},
], done);
});

it('should pass needOplogUpdate to metadata when replacing archived object in version suspended bucket', done => {
const archived = {
archiveInfo: { foo: 0, bar: 'stuff' }
};

const suspendVersioningRequest = versioningTestUtils
.createBucketPutVersioningReq(bucketName, 'Suspended');
async.series([
next => bucketPut(authInfo, bucketPutRequest, log, next),
next => bucketPutVersioning(authInfo, suspendVersioningRequest, log, next),
async () => _uploadMpuObject(),
next => fakeMetadataArchive(bucketName, objectKey, undefined, archived, next),
async () => _uploadMpuObject(),
async () => {
const options = metadataswitch.putObjectMD.lastCall.args[3];
assert.strictEqual(options.needOplogUpdate, true);
assert.strictEqual(options.originOp, 's3:ReplaceArchivedObject');
},
], done);
});
});

describe('complete mpu with versioning', () => {
Expand Down Expand Up @@ -2701,47 +2803,17 @@ describe('multipart upload in ingestion bucket', () => {
restoreRequestedDays: 5,
};

const _bucketPut = util.promisify(bucketPut);
const _initiateMultipartUpload = async (...params) => {
const result = await util.promisify(initiateMultipartUpload)(...params);
const json = await parseStringPromise(result);
return json.InitiateMultipartUploadResult.UploadId[0];
};
const _objectPutPart = util.promisify(objectPutPart);
const _completeMultipartUpload = (...params) => util.promisify(cb =>
completeMultipartUpload(...params, (err, xml, headers) => cb(err, { xml, headers })))();
const uploadMpuObject = async (params = {}) => {
const headers = { ...initiateRequest.headers };
if (params.location) {
headers[constants.objectLocationConstraintHeader] = params.location;
}
if (params.versionID) {
headers['x-scal-s3-version-id'] = params.versionID;
}

const uploadId = await _initiateMultipartUpload(authInfo, { ...initiateRequest, headers }, log);

const partRequest = _createPutPartRequest(uploadId, 1, Buffer.from('I am a part\n', 'utf8'));
partRequest.headers = headers;
const eTag = await _objectPutPart(authInfo, partRequest, undefined, log);

const completeRequest = _createCompleteMpuRequest(uploadId, [{ partNumber: 1, eTag }]);
const resp = await _completeMultipartUpload(authInfo, { ...completeRequest, headers }, log);

return resp.headers;
};

it('should use the versionID from the backend', async () => {
await _bucketPut(authInfo, newPutIngestBucketRequest('us-east-1:ingest'), log);

const headers = await uploadMpuObject();
const headers = await _uploadMpuObject();
assert.strictEqual(headers['x-amz-version-id'], versionID);
});

it('should not use the versionID from the backend when writing in another location', async () => {
await _bucketPut(authInfo, newPutIngestBucketRequest('us-east-1:ingest'), log);

const headers = await uploadMpuObject({ location: 'us-east-2' });
const headers = await _uploadMpuObject({ location: 'us-east-2' });
assert.notEqual(headers['x-amz-version-id'], versionID);
});

Expand All @@ -2755,7 +2827,7 @@ describe('multipart upload in ingestion bucket', () => {

await _bucketPut(authInfo, newPutIngestBucketRequest('us-east-1:ingest'), log);

const headers = await uploadMpuObject();
const headers = await _uploadMpuObject();
assert.notEqual(headers['x-amz-version-id'], versionID);
});

Expand All @@ -2771,13 +2843,13 @@ describe('multipart upload in ingestion bucket', () => {

await _bucketPut(authInfo, newPutIngestBucketRequest('us-east-1:ingest'), log);

let headers = await uploadMpuObject();
let headers = await _uploadMpuObject();
assert.strictEqual(headers['x-amz-version-id'], versionID);
assert.strictEqual(dataClient.createMPU.firstCall.args[1]['x-amz-meta-scal-version-id'], undefined);

await util.promisify(fakeMetadataArchive)(bucketName, objectKey, versionID, archiveRestoreRequested);

headers = await uploadMpuObject({
headers = await _uploadMpuObject({
versionID,
});
assert.strictEqual(headers['x-amz-version-id'], versionID);
Expand All @@ -2796,13 +2868,13 @@ describe('multipart upload in ingestion bucket', () => {

await _bucketPut(authInfo, newPutIngestBucketRequest('us-east-1:ingest'), log);

let headers = await uploadMpuObject();
let headers = await _uploadMpuObject();
assert.strictEqual(headers['x-amz-version-id'], versionID);
assert.strictEqual(dataClient.createMPU.firstCall.args[1]['x-amz-meta-scal-version-id'], undefined);

await util.promisify(fakeMetadataArchive)(bucketName, objectKey, versionID, archiveRestoreRequested);

headers = await uploadMpuObject({
headers = await _uploadMpuObject({
versionID,
location: 'us-east-2',
});
Expand Down
73 changes: 73 additions & 0 deletions tests/unit/api/objectCopy.js
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ const mpuUtils = require('../utils/mpuUtils');
const metadata = require('../metadataswitch');
const { data } = require('../../../lib/data/wrapper');
const { objectLocationConstraintHeader } = require('../../../constants');
const { fakeMetadataArchive } = require('../../functional/aws-node-sdk/test/utils/init');

const any = sinon.match.any;

Expand Down Expand Up @@ -129,6 +130,8 @@ describe('objectCopy with versioning', () => {
describe('non-versioned objectCopy', () => {
const testPutObjectRequest = versioningTestUtils
.createPutObjectRequest(sourceBucketName, objectKey, objData[0]);
const testPutDestObjectRequest = versioningTestUtils
.createPutObjectRequest(destBucketName, objectKey, objData[1]);

before(done => {
cleanup();
Expand Down Expand Up @@ -171,6 +174,76 @@ describe('non-versioned objectCopy', () => {
});
});
});

it('should not pass needOplogUpdate when creating object', done => {
async.series([
next => objectCopy(authInfo, testObjectCopyRequest, sourceBucketName, objectKey,
undefined, log, next),
async () => {
sinon.assert.calledWith(metadata.putObjectMD.lastCall,
destBucketName, objectKey, any, sinon.match({
needOplogUpdate: undefined,
originOp: undefined,
}), any, any);
},
], done);
});

it('should not pass needOplogUpdate when replacing object', done => {
async.series([
next => objectPut(authInfo, testPutDestObjectRequest, undefined, log, next),
next => objectCopy(authInfo, testObjectCopyRequest, sourceBucketName, objectKey,
undefined, log, next),
async () => {
sinon.assert.calledWith(metadata.putObjectMD.lastCall,
destBucketName, objectKey, any, sinon.match({
needOplogUpdate: undefined,
originOp: undefined,
}), any, any);
},
], done);
});

it('should pass needOplogUpdate to metadata when replacing archived object', done => {
const archived = {
archiveInfo: { foo: 0, bar: 'stuff' }
};

async.series([
next => objectPut(authInfo, testPutDestObjectRequest, undefined, log, next),
next => fakeMetadataArchive(destBucketName, objectKey, undefined, archived, next),
next => objectCopy(authInfo, testObjectCopyRequest, sourceBucketName, objectKey,
undefined, log, next),
async () => {
sinon.assert.calledWith(metadata.putObjectMD.lastCall,
destBucketName, objectKey, any, sinon.match({
needOplogUpdate: true,
originOp: 's3:ReplaceArchivedObject',
}), any, any);
},
], done);
});

it('should pass needOplogUpdate to metadata when replacing archived object in version suspended bucket', done => {
const archived = {
archiveInfo: { foo: 0, bar: 'stuff' }
};

async.series([
next => bucketPutVersioning(authInfo, suspendVersioningRequest, log, next),
next => objectPut(authInfo, testPutDestObjectRequest, undefined, log, next),
next => fakeMetadataArchive(destBucketName, objectKey, undefined, archived, next),
next => objectCopy(authInfo, testObjectCopyRequest, sourceBucketName, objectKey,
undefined, log, next),
async () => {
sinon.assert.calledWith(metadata.putObjectMD.lastCall,
destBucketName, objectKey, any, sinon.match({
needOplogUpdate: true,
originOp: 's3:ReplaceArchivedObject',
}), any, any);
},
], done);
});
});

describe('objectCopy overheadField', () => {
Expand Down
Loading
Loading