AWS Lambda S3 GET/POST - SignatureDoesNotMatch error - amazon-s3

I have had a Lambda node.js function up and running for about 6 months without any issue. The function simply takes a object and copies it from one bucket to another.
Today, I have started getting:
"SignatureDoesNotMatch: The request signature we calculated does not
match the signature you provided. Check your key and signing method."
The code I am using is pretty simple, any suggestions on how I could fix this?
var aws = require('aws-sdk');
var s3 = new aws.S3({apiVersion: '2006-03-01'});
exports.handler = function(event, context) {
var to_bucket = 'my_to_bucket/test';
var from_bucket = event.Records[0].s3.bucket.name;
var key = decodeURIComponent(event.Records[0].s3.object.key.replace(/\+/g, " "));
var size = Math.floor(event.Records[0].s3.object.size / 1024);
s3.getObject({Bucket: from_bucket, Key: key}, function(err, data) {
if (err) {
// send a webhook
}
else {
s3.putObject({Bucket: to_bucket, Key: key, Body: data.Body, ContentType: data.ContentType},
function(err, data) {
if (err) {
// send a webhook
}
else {
// send a webhook
}
});
} // end else
}); // end getobject
};
UPDATE:
I have discovered that if sending to a bucket, it works fine. If I sent to any subfolder of the same bucket, it fails. I do send to a subfolder and simplified the code above initially, but I've updated it to show a subfolder in the to_bucket.

I found a fix for this. After realising it was due to the folder inside the bucket, and not just sending to a bucket root, I searched and found the following post: https://github.com/aws/aws-sdk-go/issues/562
It looks like the bucket should not include the subfolder, instead the key should. Why this has worked until now is a mystery. Here is the replacement code for above:
var aws = require('aws-sdk');
var s3 = new aws.S3({apiVersion: '2006-03-01'});
exports.handler = function(event, context) {
var to_bucket = 'my_to_bucket';
var from_bucket = event.Records[0].s3.bucket.name;
var key = decodeURIComponent(event.Records[0].s3.object.key.replace(/\+/g, " "));
var size = Math.floor(event.Records[0].s3.object.size / 1024);
s3.getObject({Bucket: from_bucket, Key: key}, function(err, data) {
if (err) {
// send a webhook
}
else {
key = 'subfolder/' + key;
s3.putObject({Bucket: to_bucket, Key: key, Body: data.Body, ContentType: data.ContentType},
function(err, data) {
if (err) {
// send a webhook
}
else {
// send a webhook
}
});
} // end else
}); // end getobject
};

Related

Can’t get aws lambda/DynamoDB api to work

It is my first time trying the Aws dynamoDB and Lambda node.js. I followed exact instructions on the aws documentation here: https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-dynamo-db.html#http-api-dynamo-db-attach-integrations
Here’s the function code I used, as gotten from step 10 of the documentation
const AWS = require("aws-sdk");
const dynamo = new AWS.DynamoDB.DocumentClient();
exports.handler = async (event, context) => {
let body;
let statusCode = 200;
const headers = {
"Content-Type": "application/json"
};
try {
switch (event.routeKey) {
case "DELETE /items/{id}":
await dynamo
.delete({
TableName: "http-crud-tutorial-items",
Key: {
id: event.pathParameters.id
}
})
.promise();
body = `Deleted item ${event.pathParameters.id}`;
break;
case "GET /items/{id}":
body = await dynamo
.get({
TableName: "http-crud-tutorial-items",
Key: {
id: event.pathParameters.id
}
})
.promise();
break;
case "GET /items":
body = await dynamo.scan({ TableName: "http-crud-tutorial-items" }).promise();
break;
case "PUT /items":
let requestJSON = JSON.parse(event.body);
await dynamo
.put({
TableName: "http-crud-tutorial-items",
Item: {
id: requestJSON.id,
price: requestJSON.price,
name: requestJSON.name
}
})
.promise();
body = `Put item ${requestJSON.id}`;
break;
default:
throw new Error(`Unsupported route: "${event.routeKey}"`);
}
} catch (err) {
statusCode = 400;
body = err.message;
} finally {
body = JSON.stringify(body);
}
return {
statusCode,
body,
headers
};
};
But each time I try the API’s as it is structured on the documentation, I get ‘internal server error’. I have tried it on both terminal, and from postman, same error. I couldn’t get any helpful links elsewhere to solve this. Is there something I need to do different?
There are many reasons a Lambda would return a 500 exception to APIGW, for example:
https://aws.amazon.com/premiumsupport/knowledge-center/api-gateway-lambda-stage-variable-500/
https://aws.amazon.com/premiumsupport/knowledge-center/api-gateway-internal-server-error/
My suggestion is to strip back your function code, and only return a response to APIGW with no other logic and test that. Then you can decide to work forwards or backwards from there.

How to serve MP4 using an express Rest API server so it's compatible with mobile?

I have an express rest API server, and I am trying to use it to get an MP4 file from an S3 bucket and serve it.
The video is displayed OK on my desktop, but doesn't work on my iPhone 10. See image below.
It's not a problem with my phone's ability to play this specific file because when I access it on my phone directly from the bucket, it works. So it must be something with how my server is getting or serving the file.
Any suggestions?
app.get("/video/:userId", async (request, response) => {
s3.getObject({ Bucket: 'user-videos', Key: request.params.userId }, function(err, data) {
if (err) {
console.log(err);
} else {
response.set("Content-Type", "video/mp4");
response.send(data.Body);
}
});
});
One must check in requested source accepts range, if yes, serve the content accordingly:
app.get("/video/:userId", async (request, response) => {
s3.getObject({ Bucket: 'user-videos', Key: request.params.userId }, function(err, imgData) {
if (err) {
console.log(err);
} else {
const range = request.get("Range");
let start = 0;
let end = "";
let code = 200;
if (range) {
const result = range.match(/bytes=(\d+)-(\d*)/);
if (result !== null) {
start = result[1];
end = result[2];
}
code = 206;
}
response.set("Content-Type", "video/mp4");
response.set("Accept-Ranges", "bytes");
response.set(
"Content-Length",
imgData["ContentLength"]
);
response.set(
"Content-Range",
imgData["ContentRange"]
);
response.set("ETag", imgData["ETag"]);
response.set(
"Last-Modified",
imgData["LastModified"]
);
response.status(code);
response.send(imgData.Body);
}
});
});

Uploading pdfkit pdf stream to S3 bucket from Lambda function gives Error: Cannot determine length of [object PDFDocument]

I'm using pdfkit in a lamda function which creates a pdf and then is supposed to upload the pdf to an S3 bucket. But when I test the function I get Error: Cannot determine length of [object PDFDocument]
Here is my function:
var PDFDocument = require('pdfkit');
var AWS = require('aws-sdk');
process.env['PATH'] = process.env['PATH'] + ':' +
process.env['LAMBDA_TASK_ROOT'];
exports.handler = function(event, context) {
// create a document and pipe to a blob
var doc = new PDFDocument();
// draw some text
doc.fontSize(25)
.text('Hello World', 100, 80);
var params = {
Bucket : "test-bucket",
Key : event.pdf_name + ".pdf",
Body : doc
}
var s3 = new AWS.S3();
s3.putObject(params, function(err, data) {
if (err) {
console.log(err)
} else {
context.done(null, { status: 'pdf created' });
doc.end();
}
});
};
What am I doing wrong? How do I provide the file size if that is needed? Is this a good way to do this or is there a better way to upload a stream of a pdf file to an s3 bucket?
Here is my solution:
const PDFDocument = require('pdfkit');
const fs = require("fs");
const AWS = require('aws-sdk');
const s3 = new AWS.S3();
exports.handler = function (event, callback) {
let doc = new PDFDocument;
let fileName = "yourfile.pdf";
//We use Lambdas temp folder to store file temporarily.
//When Lambda execution ends temp is flushed
let file = fs.createWriteStream("/tmp/" + fileName);
doc.pipe(file);
doc.text("hello");
// # Finalize PDF file
doc.end();
// Send pdf file to s3
file.on("finish", function () {
//get the file size
const stats = fs.statSync("/tmp/" + fileName);
console.log("filesize: " + stats.size);
console.log("starting s3 putObject");
s3.putObject({
Bucket: "[your-bucket]",
Key: fileName,
Body: fs.createReadStream("/tmp/" + fileName),
ContentType: "application/pdf",
ContentLength: stats.size,
}, function (err) {
if (err) {
console.log(err, err.stack);
callback(err);
} else {
console.log("Done");
callback(null, "done");
}
});
});
}
Key elements to this solution was use of filestreams and lambda temp folder. file.on("finish") is used to actualy check if the file writing is ended.
if you want the pdf to be accessible to users remember to add the following attribute ACL: 'public-read' . also when using s3client for digital ocean this worked for me looks like.
s3Client.putObject({
Bucket: bucketName,
Key: fileName,
Body: fs.createReadStream("/tmp/" + fileName),
ContentType: "application/pdf",
ContentLength: stats.size,
ACL: 'public-read',
}, function (err) {
if (err) {
console.log(err, err.stack);
callback(err);
} else {
console.log("Done");
callback(null, "done");
}
});

Lambda File Write to S3

For the past six months I have been downloading the NASA APOD and saving to an S3 bucket using a Lambda function. Up until 12/23/2016 all was working as expected. Now when I check my bucket, the images are there but size 0 bytes. I have included my code below. Does anyone know if there has been a change? Thanks!
var AWS = require("aws-sdk");
var https = require('https');
var http = require('http');
var fs = require('fs');
// Incoming Handler
// <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
exports.handler = (event, context, callback) => {
GetAPOD();
};
// <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
// Functions
// <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
function GetAPOD() {
var nasa_api_key = 'MY KEY GOES HERE'
, nasa_api_path = '/planetary/apod?api_key=' + nasa_api_key;
var options = {
host: 'api.nasa.gov',
port: 443,
path: nasa_api_path,
method: 'GET'
};
// Connect to the NASA API and get the APOD.
var req = https.request(options, function (res) {
console.log('Open connection to NASA.');
res.setEncoding('utf-8');
var responseString = '';
res.on('data', function (data) {
responseString = data;
});
res.on('end', function () {
console.log('API Response: ' + responseString);
var responseObject = JSON.parse(responseString)
, image_date = responseObject['date']
, image_url = responseObject['url']
, image_hdurl = responseObject['hdurl']
, media_type = responseObject['media_type'];
if (media_type == 'image') {
var image_name = image_date + '.jpg';
var s3 = new AWS.S3();
var s3Bucket = new AWS.S3( { params: {Bucket: 'nasa-apod'} } );
// Check to see if the image already exists in the S3 bucket.
// If not we will upload the image to S3.
var head_data = {Key: image_name};
s3Bucket.headObject(head_data, function(err, output_head_data) {
if (output_head_data) {
console.log("Image exists on S3.");
}
else {
console.log("Image does not exists on S3.");
// Image has not been uploaded to S3, open a stream and download the image to the /tmp folder.
var file = fs.createWriteStream("/tmp/" + image_name);
var request = http.get(image_url, function(response) {
console.log("Opening file stream.");
// Pipe the data into the file stream and save to disk.
response.pipe(file);
response.on('end', function () {
// File is written to disk, we are going to check that it exists.
var fileName = "/tmp/" + image_name;
fs.exists(fileName, function(exists) {
if (exists) {
console.log("File exits in /tmp folder.");
// Get the stats for the image, will need this for the ContentLength
fs.stat(fileName, function(error, stats) {
if (error) {
console.log("Stat Error: " + error);
}
else {
console.log("Opening file stream.");
var image_stream = fs.createReadStream(fileName);
// Begin the upload process to S3.
var param_data = {Key: image_name, Body: image_stream, ContentType: "image/jpeg", ContentLength: stats.size, ACL: "public-read"};
s3Bucket.putObject(param_data, function(err, output_data) {
if (err) {
console.log('Error uploading data to S3: ' + err);
}
else {
console.log('Image successfully uploaded.');
}
});
}
});
}
else {
console.log('File does not exist in the /tmp folder.');
}
});
});
});
}
});
}
else {
console.log("Media Type: " + media_type);
}
});
});
req.on('error', function (e) {
console.error('HTTP error: ' + e.message);
});
req.end();
}
// <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
Found out that NASA APOD API is now using https and not http for images. I had to adjust my code to use https for the image path.

How do I serve a file from S3 through Meteor Iron Router

My question is very similar to this one which describes how to serve a local file using Iron Router. I need to do the same, but instead of reading the file synchronously from disk, I need to get the file from S3 which is an asynchronous call.
The problem appears to be the fact that the action method has returned before the asynchronous s3.getObject completes giving me the following error.
Error: Can't render headers after they are sent to the client.
I'm assuming that Iron Router is generating the response for me when it realizes that I haven't handled the response in my action method, but I'm stumped about how to tell it to wait for my asynchronous call to finish.
Here is my code.
Router.map(function () {
this.route('resumeDownload', {
where: 'server',
path: '/resume/:_id',
action: function () {
var response = this.response;
var candidate = Candidates.findOne(this.params._id);
if (!candidate || !candidate.resumeS3Key) {
// this works fine because the method hasn't returned yet.
response.writeHead(404);
return response.end();
}
var s3 = new AWS.S3();
s3.getObject({Bucket: 'myBucket', Key: candidate.resumeS3Key}, function (err, data) {
if (err) {
// this will cause the error to be displayed
response.writeHead(500);
return response.end();
}
// this will also cause the error to be displayed
response.writeHead(200, {'Content-Type': data.ContentType});
response.end(data.Body);
});
}
});
});
I was able to solve this one myself. I needed to use a future in my action method.
Here is the working code.
Router.map(function () {
this.route('resumeDownload', {
where: 'server',
path: '/resume/:_id',
action: function () {
var response = this.response,
candidate = Candidates.findOne(this.params._id);
if (!candidate || !candidate.resumeS3Key) {
response.writeHead(404);
return response.end();
}
var Future = Npm.require('fibers/future'),
s3 = new AWS.S3(),
futureGetObject = Future.wrap(s3.getObject.bind(s3)),
data = futureGetObject({Bucket: 'myBucket', Key: candidate.resumeS3Key}).wait();
response.writeHead(200, {'Content-Type': data.ContentType});
response.end(data.Body);
}
});
});