Skip to content

Commit

Permalink
added integration testing
Browse files Browse the repository at this point in the history
  • Loading branch information
marythedev committed Nov 28, 2023
1 parent 573fc60 commit 44d0f91
Show file tree
Hide file tree
Showing 14 changed files with 1,891 additions and 199 deletions.
1,790 changes: 1,599 additions & 191 deletions package-lock.json

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
"supertest": "^6.3.3"
},
"dependencies": {
"@aws-sdk/client-s3": "^3.458.0",
"aws-jwt-verify": "^4.0.0",
"compression": "^1.7.4",
"content-type": "^1.0.5",
Expand Down
142 changes: 142 additions & 0 deletions src/model/data/aws/index.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,142 @@
// temporary use of memory-db until we add DynamoDB
const MemoryDB = require('../memory/memory-db');
const s3Client = require('./s3Client');
const { PutObjectCommand, GetObjectCommand, DeleteObjectCommand } = require('@aws-sdk/client-s3');
const logger = require('../../../logger');

// Create two in-memory databases: one for fragment metadata and the other for raw data
const metadata = new MemoryDB();

// Write a fragment's metadata to memory db. Returns a Promise
async function writeFragment(fragment) {
// Create the PUT API params from our details
const params = {
Bucket: process.env.AWS_S3_BUCKET_NAME,
Key: `${fragment.ownerId}/${fragment.id}`,
Body: JSON.stringify(fragment),
};

// Create a PUT Object command to send to S3
const command = new PutObjectCommand(params);

try {
// Use our client to send the command
await s3Client.send(command);
} catch (err) {
// If anything goes wrong, log info to debug
const { Bucket, Key } = params;
logger.error({ err, Bucket, Key }, 'Error uploading fragment data to S3');
throw new Error('unable to upload fragment data');
}
}

// Read a fragment's metadata from memory db. Returns a Promise
function readFragment(ownerId, id) {
return metadata.get(ownerId, id);
}

// Write a fragment's data buffer to S3 Object in a Bucket
// https://github.com/awsdocs/aws-sdk-for-javascript-v3/blob/main/doc_source/s3-example-creating-buckets.md#upload-an-existing-object-to-an-amazon-s3-bucket
async function writeFragmentData(ownerId, id, data) {
// Create the PUT API params from our details
const params = {
Bucket: process.env.AWS_S3_BUCKET_NAME,
Key: `${ownerId}/${id}`,
Body: data,
};

// Create a PUT Object command to send to S3
const command = new PutObjectCommand(params);

try {
// Use our client to send the command
await s3Client.send(command);
} catch (err) {
// If anything goes wrong, log info to debug
const { Bucket, Key } = params;
logger.error({ err, Bucket, Key }, 'Error uploading fragment data to S3');
throw new Error('unable to upload fragment data');
}
}

// Read a fragment's data from S3. Returns a Promise
// https://github.com/awsdocs/aws-sdk-for-javascript-v3/blob/main/doc_source/s3-example-creating-buckets.md#getting-a-file-from-an-amazon-s3-bucket
async function readFragmentData(ownerId, id) {
// Create the PUT API params from our details
const params = {
Bucket: process.env.AWS_S3_BUCKET_NAME,
Key: `${ownerId}/${id}`,
};

// Create a GET Object command to send to S3
const command = new GetObjectCommand(params);

try {
// Get the object from the Amazon S3 bucket. It is returned as a ReadableStream.
const data = await s3Client.send(command);
// Convert the ReadableStream to a Buffer
return streamToBuffer(data.Body);
} catch (err) {
const { Bucket, Key } = params;
logger.error({ err, Bucket, Key }, 'Error streaming fragment data from S3');
throw new Error('unable to read fragment data');
}
}

// Convert a stream of data into a Buffer, by collecting chunks of data until finished, then assembling them together.
// Wrapping the whole thing in a Promise so it's easier to consume.
const streamToBuffer = (stream) =>
new Promise((resolve, reject) => {
// Collect data stream into an array.
const chunks = [];

// When there's data, add the chunk to our chunks list
stream.on('data', (chunk) => chunks.push(chunk));
// When there's an error, reject the Promise
stream.on('error', reject);
// When the stream is done, resolve with a new Buffer of our chunks
stream.on('end', () => resolve(Buffer.concat(chunks)));
});

// Get a list of fragment ids/objects for the given user from memory db. Returns a Promise
async function listFragments(ownerId, expand = false) {
const fragments = await metadata.query(ownerId);

// If we don't get anything back, or are supposed to give expanded fragments, return
if (expand || !fragments) {
return fragments;
}

// Otherwise, map to only send back the ids
return fragments.map((fragment) => fragment.id);
}

// Delete a fragment's metadata and data from S3
async function deleteFragment(ownerId, id) {
// Create the DELETE API params from our details
const params = {
Bucket: process.env.AWS_S3_BUCKET_NAME,
Key: `${ownerId}/${id}`,
};

// Create a DELETE Object command to send to S3
const command = new DeleteObjectCommand(params);

try {
readFragmentData(ownerId, id); // check if fragment exists
// Use our client to send the command
await s3Client.send(command);
} catch (err) {
// If anything goes wrong, log info to debug
const { Bucket, Key } = params;
logger.error({ err, Bucket, Key }, 'Error deleting fragment data from S3');
throw new Error('unable to delete fragment data');
}
}

module.exports.listFragments = listFragments;
module.exports.writeFragment = writeFragment;
module.exports.readFragment = readFragment;
module.exports.writeFragmentData = writeFragmentData;
module.exports.readFragmentData = readFragmentData;
module.exports.deleteFragment = deleteFragment;
48 changes: 48 additions & 0 deletions src/model/data/aws/s3Client.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
/**
* S3 specific config and objects. See:
* https://www.npmjs.com/package/@aws-sdk/client-s3
*/
const { S3Client } = require('@aws-sdk/client-s3');
const logger = require('../../../logger');

/**
* If AWS credentials are configured in the environment, use them.
* If testing locally, you'll need these, or if connecting to LocalStack or MinIO
* @returns Object | undefined
*/
const getCredentials = () => {
if (process.env.AWS_ACCESS_KEY_ID && process.env.AWS_SECRET_ACCESS_KEY) {
// See https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/clients/client-s3/modules/credentials.html
const credentials = {
accessKeyId: process.env.AWS_ACCESS_KEY_ID,
secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY,
// Optionally include the AWS Session Token, too (e.g., if you're connecting to AWS from your laptop).
sessionToken: process.env.AWS_SESSION_TOKEN,
};
logger.debug('Using extra S3 Credentials AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY');
return credentials;
}
};

/**
* If an AWS S3 Endpoint is configured in the environment, use it.
* @returns string | undefined
*/
const getS3Endpoint = () => {
if (process.env.AWS_S3_ENDPOINT_URL) {
logger.debug({ endpoint: process.env.AWS_S3_ENDPOINT_URL }, 'Using alternate S3 endpoint');
return process.env.AWS_S3_ENDPOINT_URL;
}
};

/**
* Configure and export a new s3Client to use for all API calls.
* NOTE: we want to use this client with both AWS S3, but also MinIO and LocalStack in development and testing.
* Pass `undefined` when there are no certain configuration settings (i.e. we'll ignore them).
*/
module.exports = new S3Client({
region: process.env.AWS_REGION,
credentials: getCredentials(), // Credentials are optional (only MinIO needs them, or if you connect to AWS remotely from your laptop)
endpoint: getS3Endpoint(), // The endpoint URL is optional
forcePathStyle: true,
});
4 changes: 3 additions & 1 deletion src/model/data/index.js
Original file line number Diff line number Diff line change
@@ -1 +1,3 @@
module.exports = require('./memory/memory.js');
// If the AWS_REGION env variable is set, use AWS backend services (S3, DynamoDB);
// otherwise, use an in-memory db.
module.exports = process.env.AWS_REGION ? require('./aws') : require('./memory');
File renamed without changes.
14 changes: 14 additions & 0 deletions src/model/fragment.js
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,20 @@ class Fragment {
return readFragmentData(this.ownerId, this.id);
}

/**
* Updates the fragment's size
* @param {Buffer} data
* @returns nothing
*/
updateSize(data) {
if (data == undefined)
this.size = 0;
else {
this.updated = new Date();
this.size = data.length;
}
}

/**
* Set's the fragment's data in the database
* @param {Buffer} data
Expand Down
2 changes: 1 addition & 1 deletion src/routes/api/byId.js
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
const { readFragment, readFragmentData } = require('../../model/data/memory/memory');
const { readFragment, readFragmentData } = require('../../model/data/memory');
const { createErrorResponse } = require('../../response');
const logger = require('../../logger');
var MarkdownIt = require('markdown-it');
Expand Down
17 changes: 17 additions & 0 deletions src/routes/api/delete.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
const { createSuccessResponse, createErrorResponse } = require('../../response');
const { deleteFragment } = require('../../model/data/memory');
const logger = require('../../logger');

// Delete fragment for the current user
module.exports = async (req, res) => {
const ownerId = req.user;
let id = req.params.id;

try {
await deleteFragment(ownerId, id);
res.status(200).json(createSuccessResponse());
} catch (err) {
logger.info({ err }, 'Trying to delete fragment that does not exist');
res.status(404).json(createErrorResponse(404, 'unable to delete fragment'));
}
};
2 changes: 1 addition & 1 deletion src/routes/api/get.js
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
const { createSuccessResponse } = require('../../response');
const { listFragments } = require('../../model/data/memory/memory');
const { listFragments } = require('../../model/data/memory');

// Gets a list of fragments for the current user
module.exports = async (req, res) => {
Expand Down
3 changes: 3 additions & 0 deletions src/routes/api/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -30,4 +30,7 @@ router.get('/fragments/:id', require('./byId'));
// GET /v1/fragments/:id - get a fragment by id
router.get('/fragments/:id/info', require('./info'));

// DELETE /fragments/:id - delete a fragment by id
router.delete('/fragments/:id', require('./delete'));

module.exports = router;
2 changes: 1 addition & 1 deletion src/routes/api/info.js
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
const { readFragment } = require('../../model/data/memory/memory');
const { readFragment } = require('../../model/data/memory');
const { createErrorResponse } = require('../../response');
const logger = require('../../logger');

Expand Down
9 changes: 5 additions & 4 deletions src/routes/api/post.js
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
const { createSuccessResponse, createErrorResponse } = require('../../response');
const { Fragment } = require('../../model/fragment');
const contentType = require('content-type');

const { writeFragment, writeFragmentData } = require('../../model/data/memory');
const logger = require('../../logger');

// Creates a fragment for the current user
Expand All @@ -20,9 +20,10 @@ module.exports = async (req, res) => {

const ownerId = req.user;
const fragment = new Fragment({ ownerId, type });
await fragment.save();
await fragment.setData(req.body);

fragment.updateSize(req.body);
await writeFragment(fragment);
await writeFragmentData(ownerId, fragment.id, req.body);

logger.info(`Created fragment: ${JSON.stringify(fragment)} `);

let data = {
Expand Down
56 changes: 56 additions & 0 deletions tests/integration/lab-9-s3.hurl
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
# POST a new text fragment to http://localhost:8080 as an authorized user.
# The fragment's body should be the string, Hello S3!
POST http://localhost:8080/v1/fragments
# Send a plain text fragment
Content-Type: text/plain
# Include HTTP Basic Auth credentials using the [BasicAuth] section
[BasicAuth]
user1@email.com:password1

`Hello S3!`


# Confirm that the server returns a 201, and capture the Location header value to a variable named url
HTTP/1.1 201
[Captures]
url: header "Location"


# GET the fragment you just created using the url as an authorized user.
GET {{url}}
[BasicAuth]
user1@email.com:password1


# Confirm that the server returns a 200, that the type of the fragment is text/plain, and that the body is equal to Hello S3!
HTTP/1.1 200
Content-Type: text/plain
[Asserts]
body == "Hello S3!"


# DELETE the fragment using the url as an authorized user.
DELETE {{url}}
[BasicAuth]
user1@email.com:password1


# Confirm that the server returns a 200.
HTTP/1.1 200


# Try to GET the fragment again using the url as an authorized user.
GET {{url}}
[BasicAuth]
user1@email.com:password1


# Confirm that the server returns a 404, since the fragment should be deleted.
HTTP/1.1 404


#DELETE $url
#Authorization: Bearer <your_token>

# Confirm that the server returns a 404 since the fragment should be deleted
#ASSERT status 404

0 comments on commit 44d0f91

Please sign in to comment.