Skip to content

Commit

Permalink
Merge pull request #8106 from tangledbytes/utkarsh/feat/list-objects-…
Browse files Browse the repository at this point in the history
…glacier-status

[NSFS | Glacier] Add support for returning RestoreStatus in listObjects
  • Loading branch information
tangledbytes authored Jun 6, 2024
2 parents f9a3444 + ee4b946 commit d189ae5
Show file tree
Hide file tree
Showing 2 changed files with 106 additions and 6 deletions.
30 changes: 28 additions & 2 deletions src/endpoint/s3/ops/s3_get_bucket.js
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,10 @@ const S3Error = require('../s3_errors').S3Error;
const s3_utils = require('../s3_utils');

/**
* list objects and list objects V2:
* list objects and list objects V2:
* https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html
* https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html
*
*
* note: the original documentation was in the below link:
* http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html
* (but anyway it is permanently redirected to list object link above)
Expand All @@ -27,6 +27,16 @@ async function get_bucket(req) {
const cont_tok = req.query['continuation-token'];
const start_after = req.query['start-after'];

const optional_object_attributes = req.headers['x-amz-optional-object-attributes'];
const restore_status_requested = optional_object_attributes === 'RestoreStatus';

// Only RestoreStatus is a valid attribute for now
if (optional_object_attributes && !restore_status_requested) {
// S3 API fails with `InvalidArgument` and this message
throw new S3Error({ ...S3Error.InvalidArgument, message: 'Invalid attribute name specified' });
}


const params = {
bucket: req.params.bucket,
prefix: req.query.prefix,
Expand Down Expand Up @@ -73,6 +83,7 @@ async function get_bucket(req) {
Size: obj.size,
Owner: (!list_type || req.query['fetch-owner']) && s3_utils.DEFAULT_S3_USER,
StorageClass: s3_utils.parse_storage_class(obj.storage_class),
RestoreStatus: get_object_restore_status(obj, restore_status_requested)
}
})),
_.map(reply.common_prefixes, prefix => ({
Expand Down Expand Up @@ -103,6 +114,21 @@ function key_marker_to_cont_tok(key_marker, objects_arr, is_truncated) {
return Buffer.from(j).toString('base64');
}

function get_object_restore_status(obj, restore_status_requested) {
if (!restore_status_requested || !obj.restore_status) {
return;
}

const restore_status = {
IsRestoreInProgress: obj.restore_status.ongoing,
};
if (!obj.restore_status.ongoing && obj.restore_status.expiry_time) {
restore_status.RestoreExpiryDate = new Date(obj.restore_status.expiry_time).toUTCString();
}

return restore_status;
}

module.exports = {
handler: get_bucket,
body: {
Expand Down
82 changes: 78 additions & 4 deletions src/test/unit_tests/test_nsfs_glacier_backend.js
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ const { TapeCloudGlacierBackend, TapeCloudUtils } = require('../../sdk/nsfs_glac
const { PersistentLogger } = require('../../util/persistent_logger');
const { GlacierBackend } = require('../../sdk/nsfs_glacier_backend/backend');
const nb_native = require('../../util/nb_native');
const { handler: s3_get_bucket } = require('../../endpoint/s3/ops/s3_get_bucket');

const inspect = (x, max_arr = 5) => util.inspect(x, { colors: true, depth: null, maxArrayLength: max_arr });

Expand All @@ -36,6 +37,14 @@ function make_dummy_object_sdk() {
}
};
}
function generate_noobaa_req_obj() {
return {
query: {},
params: {},
headers: {},
object_sdk: make_dummy_object_sdk(),
};
}

/**
* @param {Date} date - the date to be asserted
Expand Down Expand Up @@ -67,11 +76,11 @@ function assert_date(date, from, expected, tz = 'LOCAL') {
}

mocha.describe('nsfs_glacier', async () => {
const src_bkt = 'src';
const src_bkt = 'nsfs_glacier_src';

const dummy_object_sdk = make_dummy_object_sdk();
const upload_bkt = 'test_ns_uploads_object';
const ns_src_bucket_path = `./${src_bkt}`;
const ns_src_bucket_path = src_bkt;

const glacier_ns = new NamespaceFS({
bucket_path: ns_src_bucket_path,
Expand All @@ -83,10 +92,11 @@ mocha.describe('nsfs_glacier', async () => {
stats: endpoint_stats_collector.instance(),
});


glacier_ns._is_storage_class_supported = async () => true;

mocha.before(async () => {
await fs.mkdir(ns_src_bucket_path, { recursive: true });

config.NSFS_GLACIER_LOGS_DIR = await fs.mkdtemp(path.join(os.tmpdir(), 'nsfs-wal-'));

// Replace the logger by custom one
Expand Down Expand Up @@ -313,8 +323,72 @@ mocha.describe('nsfs_glacier', async () => {
});
});

mocha.describe('nsfs_glacier_s3_flow', async () => {
mocha.it('list_objects should throw error with incorrect optional object attributes', async () => {
const req = generate_noobaa_req_obj();
req.params.bucket = src_bkt;
req.headers['x-amz-optional-object-attributes'] = 'restorestatus';
assert.rejects(async () => s3_get_bucket(req));
});

mocha.it('list_objects should not return restore status when optional object attr header isn\'t given', async () => {
const req = generate_noobaa_req_obj();
req.params.bucket = src_bkt;
req.object_sdk.list_objects = params => glacier_ns.list_objects(params, dummy_object_sdk);

const res = await s3_get_bucket(req);
const objs = res.ListBucketResult[1];
assert.strictEqual(objs instanceof Array, true);

// @ts-ignore
objs.forEach(obj => {
assert.strictEqual(obj.RestoreStatus, undefined);
});
});

mocha.it('list_objects should return restore status for the objects when requested', async () => {
const req = generate_noobaa_req_obj();
req.params.bucket = src_bkt;
req.headers['x-amz-optional-object-attributes'] = 'RestoreStatus';
req.object_sdk.list_objects = params => glacier_ns.list_objects(params, dummy_object_sdk);

const res = await s3_get_bucket(req);
const objs = res.ListBucketResult[1];
assert.strictEqual(objs instanceof Array, true);

const fs_context = glacier_ns.prepare_fs_context(dummy_object_sdk);

await Promise.all(
// @ts-ignore
objs.map(async obj => {
// obj.Key will be the same as original key for as long as
// no custom encoding is provided
const file_path = glacier_ns._get_file_path({ key: obj.Contents.Key });
const stat = await nb_native().fs.stat(fs_context, file_path);

const glacier_status = GlacierBackend.get_restore_status(stat.xattr, new Date(), file_path);
if (glacier_status === undefined) {
assert.strictEqual(obj.Contents.RestoreStatus, undefined);
} else {
assert.strictEqual(obj.Contents.RestoreStatus.IsRestoreInProgress, glacier_status.ongoing);
if (glacier_status.expiry_time === undefined) {
assert.strictEqual(obj.Contents.RestoreStatus.RestoreExpiryDate, undefined);
}
}
})
);
});
});

mocha.after(async () => {
await Promise.all([
fs.rm(ns_src_bucket_path, { recursive: true, force: true }),
fs.rm(config.NSFS_GLACIER_LOGS_DIR, { recursive: true, force: true }),
]);
});

mocha.describe('tapecloud_utils', () => {
const MOCK_TASK_SHOW_DATA = `Random irrelevant data to
const MOCK_TASK_SHOW_DATA = `Random irrelevant data to
Result Failure Code Failed time Node -- File name
Fail GLESM451W 2023/11/08T02:38:47 1 -- /ibm/gpfs/NoobaaTest/file.aaai
Fail GLESM451W 2023/11/08T02:38:47 1 -- /ibm/gpfs/NoobaaTest/file.aaaj
Expand Down

0 comments on commit d189ae5

Please sign in to comment.