For the past six months I have been downloading the NASA APOD and saving to an S3 bucket using a Lambda function. Up until 12/23/2016 all was working as expected. Now when I check my bucket, the images are there but size 0 bytes. I have included my code below. Does anyone know if there has been a change? Thanks!
var AWS = require("aws-sdk");
var https = require('https');
var http = require('http');
var fs = require('fs');
// Incoming Handler
// <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
exports.handler = (event, context, callback) => {
GetAPOD();
};
// <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
// Functions
// <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
function GetAPOD() {
var nasa_api_key = 'MY KEY GOES HERE'
, nasa_api_path = '/planetary/apod?api_key=' + nasa_api_key;
var options = {
host: 'api.nasa.gov',
port: 443,
path: nasa_api_path,
method: 'GET'
};
// Connect to the NASA API and get the APOD.
var req = https.request(options, function (res) {
console.log('Open connection to NASA.');
res.setEncoding('utf-8');
var responseString = '';
res.on('data', function (data) {
responseString = data;
});
res.on('end', function () {
console.log('API Response: ' + responseString);
var responseObject = JSON.parse(responseString)
, image_date = responseObject['date']
, image_url = responseObject['url']
, image_hdurl = responseObject['hdurl']
, media_type = responseObject['media_type'];
if (media_type == 'image') {
var image_name = image_date + '.jpg';
var s3 = new AWS.S3();
var s3Bucket = new AWS.S3( { params: {Bucket: 'nasa-apod'} } );
// Check to see if the image already exists in the S3 bucket.
// If not we will upload the image to S3.
var head_data = {Key: image_name};
s3Bucket.headObject(head_data, function(err, output_head_data) {
if (output_head_data) {
console.log("Image exists on S3.");
}
else {
console.log("Image does not exists on S3.");
// Image has not been uploaded to S3, open a stream and download the image to the /tmp folder.
var file = fs.createWriteStream("/tmp/" + image_name);
var request = http.get(image_url, function(response) {
console.log("Opening file stream.");
// Pipe the data into the file stream and save to disk.
response.pipe(file);
response.on('end', function () {
// File is written to disk, we are going to check that it exists.
var fileName = "/tmp/" + image_name;
fs.exists(fileName, function(exists) {
if (exists) {
console.log("File exits in /tmp folder.");
// Get the stats for the image, will need this for the ContentLength
fs.stat(fileName, function(error, stats) {
if (error) {
console.log("Stat Error: " + error);
}
else {
console.log("Opening file stream.");
var image_stream = fs.createReadStream(fileName);
// Begin the upload process to S3.
var param_data = {Key: image_name, Body: image_stream, ContentType: "image/jpeg", ContentLength: stats.size, ACL: "public-read"};
s3Bucket.putObject(param_data, function(err, output_data) {
if (err) {
console.log('Error uploading data to S3: ' + err);
}
else {
console.log('Image successfully uploaded.');
}
});
}
});
}
else {
console.log('File does not exist in the /tmp folder.');
}
});
});
});
}
});
}
else {
console.log("Media Type: " + media_type);
}
});
});
req.on('error', function (e) {
console.error('HTTP error: ' + e.message);
});
req.end();
}
// <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
Found out that NASA APOD API is now using https and not http for images. I had to adjust my code to use https for the image path.
Related
Using react-native-vision-camera, I saw that there is a path for the image. It seem readable by react native tag.
I attempted to upload using this path (I used both file:// for android, and same for IOS), however it failed. Each time the file was detected as "jpeg" or "jpg" but I couldn't access it.
After downloading (From S3 amazon where I uploaded) and converting the jpg to txt, I only find the "file://path".
import { Storage } from "aws-amplify";
export default async function s3UploadBackup(file, user) {
let formatted_date = moment().format("DD-MM-YYYY");
let filePath = file.split("/");
let fileImageName = filePath[filePath.length - 1];
try {
console.log("Files contains :" + JSON.stringify(file));
// example of one of the URL I used "file:///storage/emulated/0/Android/data/com.app/files/Pictures/image-c64a66b3-489d-4af6-bf93-7adb507ceda1790666367.jpg"
const fileName = `${formatted_date}---${user.businessName}---${user.phoneNumber}---${user.location}---${fileImageName}`;
return Storage.put(uploadBackup.path + user.sub + "/" + user.phoneNumber + "/" + fileName, file, {
// contentType: "image/jpeg"
contentType: file.mime
})
} catch(error) {
console.log(error);
}
}
AWS-AMPLIFY support uploading file as BLOB and converting to specified file extension (JPEG, PNG,...).
Assume that we have local file URI - file:///storage/emulated/0/Android/data/com.app/files/Pictures/image-c64a66b3-489d-4af6-bf93-7adb507ceda1790666367.jpg
Let we refactor s3UploadBackup function
import { Storage } from "aws-amplify";
export default async function s3UploadBackup(file, user) {
let formatted_date = moment().format("DD-MM-YYYY");
let filePath = file.split("/");
let fileImageName = filePath[filePath.length - 1];
try {
console.log("Files contains :" + JSON.stringify(file));
// example of one of the URL I used "file:///storage/emulated/0/Android/data/com.app/files/Pictures/image-c64a66b3-489d-4af6-bf93-7adb507ceda1790666367.jpg"
const fileName = `${formatted_date}---${user.businessName}---${user.phoneNumber}---${user.location}---${fileImageName}`;
const blob = await new Promise((resolve, reject) => {
const xhr = new XMLHttpRequest();
xhr.onload = function () {
resolve(xhr.response);
};
xhr.onerror = function (e) {
reject(new TypeError("Network request failed"));
};
xhr.responseType = "blob";
xhr.open("GET", localPath, true);
xhr.send(null);
});
await Storage.put("yourKeyHere", blob, {
contentType: "image/jpeg", // contentType is optional
});
// We're done with the blob, close and release it
blob.close();
} catch(error) {
console.log(error);
// We're done with the blob, close and release it
blob.close();
}
}
I am new to Node.js and express.
I use following function to upload image to s3.
function defaultContentType(req, file, cb) {
setImmediate(function () {
var ct = file.contentType || file.mimetype || 'application/octet-stream'
cb(null, ct);
});
}
module.exports = async function (fileName, file) {
aws.config.update({
secretAccessKey: process.env.AWSSecretKey,
accessKeyId: process.env.AWSAccessKeyId,
contentType: defaultContentType,
});
var s3bucket = new aws.S3({
params: {
Bucket: process.env.S3_Bucket_Name,
}
});
var params = {
Key: fileName,
Body: file
};
var fileData = await s3bucket.upload(params, function (err, data) {
if (err) {
throw err;
} else {
return data;
}
});
return fileData;
}
before uploading the image, I resize it using
request(req.file.location, async function (err, response, body) {
var fileInstance = await sharp(body);
var resizeFile = await fileInstance.resize({
height: 150,
fit: 'inside'
});
var data = await s3Upload('mobile_' + req.file.key, resizeFile);
req.mobile = data.Location;
next();
});
The problem I have is;
The image does get resized and saved to s3.
But "s3Upload" function does not return the file location.
Seems like it take some time to complete the operation. Before it get completed, undefined value get return.
Can anyone suggest a way to fix this?
Modified method
module.exports = function (fileName, file, finishCallback) {
// more code
s3bucket.upload(params, function (err, data) {
if (err) {
throw err;
} else {
finishCallback(data);
}
});
}
modified the upload method as
s3Upload('mobile_' + req.file.key, resizeFile, (data) => {
req.mobile = data.Location;
next();
});
This seems to be working as expected.
I am not really sure this is the correct way to do things.
Is there a way to do this correctly?
I have added and retrived image in MongoDB using Node. Can I use the same code with some adjustment? Suggest me.
upload.ts
var multer = require("multer");
export let UPLOAD_PATH = "uploads";
const storage = multer.diskStorage({
destination: function(req, file, cb) {
req;
file;
cb(null, UPLOAD_PATH);
},
filename: function(req, file, cb) {
req;
cb(null, file.fieldname + "-" + Date.now() + ".jpg");
}
});
export const upload = multer({ storage: storage }).single("avatar");
image.controller.ts
Upload
this._model.findOne(
{ ["user"]: new mongoose.Types.ObjectId(user._id) },
img => {
upload(req, res, err => {
if (err) {
res.status(500).json(null);
} else {
// Create a new image model and fill the properties
let newImage = new Image();
newImage.filename = req.file.filename;
newImage.originalName = req.file.originalname;
newImage.desc = req.body.desc;
newImage.url =
req.protocol + "://" + req.get("host") + "/images/" + newImage._id;
newImage.user = user._id;
newImage.save(err => {
if (err) {
res.status(400).json(null);
} else {
res.status(201).json(img);
}
});
}
});
}
);
Retrive
getImage = (req, res) => {
const user = this.getUser(req, res);
this._model.findOne({ ['user']: new mongoose.Types.ObjectId(user._id) }, (err, image) => {
if (err) {
res.status(500).json(null);
}
else if (image == null) {
res.status(200).json(image);
} else {
// stream the image back by loading the file
res.setHeader('Content-Type', 'image/jpeg');
fs.createReadStream(path.join(UPLOAD_PATH, image.filename)).pipe(res);
}
})
};
Is it is possible to use same code with some modification to add and retrieve audio files using Node, Express in Mongo?
I'm using pdfkit in a lamda function which creates a pdf and then is supposed to upload the pdf to an S3 bucket. But when I test the function I get Error: Cannot determine length of [object PDFDocument]
Here is my function:
var PDFDocument = require('pdfkit');
var AWS = require('aws-sdk');
process.env['PATH'] = process.env['PATH'] + ':' +
process.env['LAMBDA_TASK_ROOT'];
exports.handler = function(event, context) {
// create a document and pipe to a blob
var doc = new PDFDocument();
// draw some text
doc.fontSize(25)
.text('Hello World', 100, 80);
var params = {
Bucket : "test-bucket",
Key : event.pdf_name + ".pdf",
Body : doc
}
var s3 = new AWS.S3();
s3.putObject(params, function(err, data) {
if (err) {
console.log(err)
} else {
context.done(null, { status: 'pdf created' });
doc.end();
}
});
};
What am I doing wrong? How do I provide the file size if that is needed? Is this a good way to do this or is there a better way to upload a stream of a pdf file to an s3 bucket?
Here is my solution:
const PDFDocument = require('pdfkit');
const fs = require("fs");
const AWS = require('aws-sdk');
const s3 = new AWS.S3();
exports.handler = function (event, callback) {
let doc = new PDFDocument;
let fileName = "yourfile.pdf";
//We use Lambdas temp folder to store file temporarily.
//When Lambda execution ends temp is flushed
let file = fs.createWriteStream("/tmp/" + fileName);
doc.pipe(file);
doc.text("hello");
// # Finalize PDF file
doc.end();
// Send pdf file to s3
file.on("finish", function () {
//get the file size
const stats = fs.statSync("/tmp/" + fileName);
console.log("filesize: " + stats.size);
console.log("starting s3 putObject");
s3.putObject({
Bucket: "[your-bucket]",
Key: fileName,
Body: fs.createReadStream("/tmp/" + fileName),
ContentType: "application/pdf",
ContentLength: stats.size,
}, function (err) {
if (err) {
console.log(err, err.stack);
callback(err);
} else {
console.log("Done");
callback(null, "done");
}
});
});
}
Key elements to this solution was use of filestreams and lambda temp folder. file.on("finish") is used to actualy check if the file writing is ended.
if you want the pdf to be accessible to users remember to add the following attribute ACL: 'public-read' . also when using s3client for digital ocean this worked for me looks like.
s3Client.putObject({
Bucket: bucketName,
Key: fileName,
Body: fs.createReadStream("/tmp/" + fileName),
ContentType: "application/pdf",
ContentLength: stats.size,
ACL: 'public-read',
}, function (err) {
if (err) {
console.log(err, err.stack);
callback(err);
} else {
console.log("Done");
callback(null, "done");
}
});
I am trying to directly upload files from the browser to Amazon S3 using connect-busboy with the following code (https://github.com/thaume/s3-streaming)
route - index.js:
var awsUpload = require('../services/aws-streaming');
// Index route
// ===========
exports.index = function(req, res){
if (req.method === 'POST') {
return awsUpload(req, function(err, url) {
res.send(JSON.stringify(url));
});
}
res.writeHead(200, { Connection: 'close' });
res.end('<html><head></head><body>\
<form method="POST" enctype="multipart/form-data">\
<input type="file" name="filefield"><br />\
<input type="submit">\
</form>\
</body></html>');
};
with my modified version of aws-streaming.js
// Initialize aws client
// =====================
var config = require('../config/' + 'development');
var Knox = require('knox');
var moment = require('moment');
var crypto = require('crypto');
// Create the knox client with your aws settings
Knox.aws = Knox.createClient({
key: config.aws.AWS_ACCESS_KEY_ID,
secret: config.aws.AWS_SECRET_ACCESS_KEY,
bucket: config.aws.S3_BUCKET_NAME,
region: 'eu-west-1'
});
// S3 upload service - stream buffers to S3
// ========================================
var s3UploadService = function(req, next) {
req.files = {};
req.busboy.on('file', function(fieldname, file, filename, encoding, mimetype) {
if (!filename) {
// If filename is not truthy it means there's no file
return;
}
//////////////// CHECK FOR MIMETYE /////////////////////
// If file is not "text/plain" - return //
if (mimetype != "text/plain") {
console.log('true!')
return; // A JSON array with an error "Wrong file type"!
}
// Create the initial array containing the stream's chunks
file.fileRead = [];
file.on('data', function(chunk) {
// Push chunks into the fileRead array
this.fileRead.push(chunk);
});
file.on('error', function(err) {
console.log('Error while buffering the stream: ', err);
});
file.on('end', function() {
// Concat the chunks into a Buffer
var finalBuffer = Buffer.concat(this.fileRead);
req.files[fieldname] = {
buffer: finalBuffer,
size: finalBuffer.length,
filename: filename,
mimetype: mimetype
};
// Generate date based folder prefix
var datePrefix = moment().format('YYYY[/]MM');
var key = crypto.randomBytes(10).toString('hex');
var hashFilename = key + '-' + filename;
var pathToArtwork = '/artworks/' + datePrefix + '/' + hashFilename;
var headers = {
'Content-Length': req.files[fieldname].size,
'Content-Type': req.files[fieldname].mimetype,
'x-amz-acl': 'public-read'
};
Knox.aws.putBuffer( req.files[fieldname].buffer, pathToArtwork, headers, function(err, response){
if (err) {
console.error('error streaming image: ', new Date(), err);
return next(err);
}
if (response.statusCode !== 200) {
console.error('error streaming image: ', new Date(), err);
return next(err);
}
console.log('Amazon response statusCode: ', response.statusCode);
console.log('Your file was uploaded');
next();
});
});
});
req.busboy.on('error', function(err) {
console.error('Error while parsing the form: ', err);
next(err);
});
req.busboy.on('finish', function() {
console.log('Done parsing the form!');
// When everythin's done, render the view
next(null, 'http://www.google.com');
});
// Start the parsing
req.pipe(req.busboy);
};
module.exports = s3UploadService;
What I would like to do is to validate the mimetype and return a json array with the error message, ending the parsing of the form and not upload the file. Have added code to aws-streaming, but it will not return even if validates to true. What have I done wrong?
Also the code runs the callback when it is finished parsing the form, but I would like it to be run when the file is actually uploaded. How can I achieve this, comment out the next() in 'finish' event and move it to the Knox.aws.putBuffer?
Im using Express 4