I'm trying to upload a file (an image), the upload is fine, the file is stored in Mongo and have the same content type and same size as the original file, then when I try to download it, the file is corrupted but keeps the same content type (if I upload a pdf, it is recognized as a pdf, if it is a png, it is also recognized, but I can't open them).
I don't understand what is wrong with this, it is pretty simple and standard.
For the upload from the client, I use angular ng-upload, for the download, it is a simple GET request to the route defined in the code.
EDIT :
The file is well uploaded on the server, so the problem is when I try to read it from GridFS.
The file that is downloaded is bigger than the one uploaded ! So the file isn't as the original and is corrupted, why ?
Here is my code.
//BACKEND
//ROUTES
var multer = require('multer');
var upload = multer({ dest: './tmp/'});
router.post('/:id/logo',upload.single('file'), uploadFile);
router.get('/:id/logo', getFile);
//Controller
var Grid = require('gridfs-stream');
var mongoose = require('mongoose');
var fs = require('fs');
uploadLogo = function(req, res) {
var gfs = Grid(mongoose.connection.db, mongoose.mongo);
var writeStream = gfs.createWriteStream();
fs.createReadStream(req.file.path).pipe(writeStream);
writeStream.on('close', function(file) {
res.status(200).send({fileId: file._id});
});
writeStream.on('error', function(e) {
res.status(500).send("Could not upload file");
});
}
getFile = function(req, res) {
var gfs = Grid(mongoose.connection.db, mongoose.mongo);
// Check if the file exist
gfs.findOne({ _id: req.params.id}, function(err, file) {
if(err) {
res.status(404).end();
} else if(!file){
res.status(404).end();
} else {
var readstream = gfs.createReadStream({
_id: file._id
});
res.set('Content-Type', file.contentType);
readstream.on('error', function (err) {
res.send(500, err);
});
readstream.on('open', function () {
readstream.pipe(res);
});
}
});
}
//FRONT END
$scope.newFileUpload = function(file) {
$scope.upload(file);
}
$scope.upload = function(file) {
if (file && !angular.isUndefined(file.name)) {
Upload.upload({
url: 'api/' + $scope.myId + '/logo',
fields: {
'type': 'logo'
},
file: file
}).success(function(data, status, headers, config) {
$scope.imageId = data.fileId;
}).error(function(data, status, headers, config) {
console.log('file upload error status: ' + status);
});
}
};
//THE HTML
<div class="drop-box"
ngf-drop
ngf-select
ng-model="imageLogo"
ngf-drag-over-class="dragover"
ngf-allow-dir="true"
accept="image/*"
ngf-pattern="'image/*'"
ngf-change="newFileUpload(imageLogo)"
ngf-multiple="false"
ngf-resize="{width: 200, height: 50}">
Drop logo image here or click to upload</div>
<input type="file" nv-file-select uploader="uploader"/>
Related
Using react-native-vision-camera, I saw that there is a path for the image. It seem readable by react native tag.
I attempted to upload using this path (I used both file:// for android, and same for IOS), however it failed. Each time the file was detected as "jpeg" or "jpg" but I couldn't access it.
After downloading (From S3 amazon where I uploaded) and converting the jpg to txt, I only find the "file://path".
import { Storage } from "aws-amplify";
export default async function s3UploadBackup(file, user) {
let formatted_date = moment().format("DD-MM-YYYY");
let filePath = file.split("/");
let fileImageName = filePath[filePath.length - 1];
try {
console.log("Files contains :" + JSON.stringify(file));
// example of one of the URL I used "file:///storage/emulated/0/Android/data/com.app/files/Pictures/image-c64a66b3-489d-4af6-bf93-7adb507ceda1790666367.jpg"
const fileName = `${formatted_date}---${user.businessName}---${user.phoneNumber}---${user.location}---${fileImageName}`;
return Storage.put(uploadBackup.path + user.sub + "/" + user.phoneNumber + "/" + fileName, file, {
// contentType: "image/jpeg"
contentType: file.mime
})
} catch(error) {
console.log(error);
}
}
AWS-AMPLIFY support uploading file as BLOB and converting to specified file extension (JPEG, PNG,...).
Assume that we have local file URI - file:///storage/emulated/0/Android/data/com.app/files/Pictures/image-c64a66b3-489d-4af6-bf93-7adb507ceda1790666367.jpg
Let we refactor s3UploadBackup function
import { Storage } from "aws-amplify";
export default async function s3UploadBackup(file, user) {
let formatted_date = moment().format("DD-MM-YYYY");
let filePath = file.split("/");
let fileImageName = filePath[filePath.length - 1];
try {
console.log("Files contains :" + JSON.stringify(file));
// example of one of the URL I used "file:///storage/emulated/0/Android/data/com.app/files/Pictures/image-c64a66b3-489d-4af6-bf93-7adb507ceda1790666367.jpg"
const fileName = `${formatted_date}---${user.businessName}---${user.phoneNumber}---${user.location}---${fileImageName}`;
const blob = await new Promise((resolve, reject) => {
const xhr = new XMLHttpRequest();
xhr.onload = function () {
resolve(xhr.response);
};
xhr.onerror = function (e) {
reject(new TypeError("Network request failed"));
};
xhr.responseType = "blob";
xhr.open("GET", localPath, true);
xhr.send(null);
});
await Storage.put("yourKeyHere", blob, {
contentType: "image/jpeg", // contentType is optional
});
// We're done with the blob, close and release it
blob.close();
} catch(error) {
console.log(error);
// We're done with the blob, close and release it
blob.close();
}
}
I tried to solve the problem but I don't understand why the file is uploaded but his size is 0Kb.
I see this code in the tutorial but he works on that tutorial but, is not worked for me
const { ApolloServer, gql } = require('apollo-server');
const path = require('path');
const fs = require('fs');
const typeDefs = gql`
type File {
url: String!
}
type Query {
hello: String!
}
type Mutation {
fileUpload(file: Upload!): File!
}
`;
const resolvers = {
Query: {
hello: () => 'Hello world!',
},
Mutation: {
fileUpload: async (_, { file }) => {
const { createReadStream, filename, mimetype, encoding } = await file;
const stream = createReadStream();
const pathName = path.join(__dirname, `/public/images/${filename}`);
await stream.pipe(fs.createWriteStream(pathName));
return {
url: `http://localhost:4000/images/${filename}`,
};
},
},
};
const server = new ApolloServer({
typeDefs,
resolvers,
});
server.listen().then(({ url }) => {
console.log(`๐ Server ready at ${url}`);
});
then when I upload the file, it is uploaded, but the file is 0kb
like this
What is happening is the resolver is returning before the file has uploaded, causing the server to respond before the client has finished uploading. You need to promisify and await the file upload stream events in the resolver.
Here is an example:
https://github.com/jaydenseric/apollo-upload-examples/blob/c456f86b58ead10ea45137628f0a98951f63e239/api/server.js#L40-L41
In your case:
const resolvers = {
Query: {
hello: () => "Hello world!",
},
Mutation: {
fileUpload: async (_, { file }) => {
const { createReadStream, filename } = await file;
const stream = createReadStream();
const path = path.join(__dirname, `/public/images/${filename}`);
// Store the file in the filesystem.
await new Promise((resolve, reject) => {
// Create a stream to which the upload will be written.
const writeStream = createWriteStream(path);
// When the upload is fully written, resolve the promise.
writeStream.on("finish", resolve);
// If there's an error writing the file, remove the partially written
// file and reject the promise.
writeStream.on("error", (error) => {
unlink(path, () => {
reject(error);
});
});
// In Node.js <= v13, errors are not automatically propagated between
// piped streams. If there is an error receiving the upload, destroy the
// write stream with the corresponding error.
stream.on("error", (error) => writeStream.destroy(error));
// Pipe the upload into the write stream.
stream.pipe(writeStream);
});
return {
url: `http://localhost:4000/images/${filename}`,
};
},
},
};
Note that itโs probably not a good idea to use the filename like that to store the uploaded files, as future uploads with the same filename will overwrite earlier ones. I'm not really sure what will happen if two files with the same name are uploaded at the same time by two clients.
I am using multer's array method as a middleware for my post route and I'm trying to figure out how I can send the error callback from the fileFilter function in the multer options setup as a flash message when the group of files that a user is uploading contains at least one file with the wrong format. My current fileFilter setup achieves this, but instead of sending the user to a blank page with the File Selected is not supported message, I was looking for a way to communicate that to the route using multer as middleware.
Here is my multer setup:
var upload = multer({
storage: multerS3({
s3: s3,
bucket: options.Bucket,
contentType: multerS3.AUTO_CONTENT_TYPE,
acl: options.ACL,
key: function(req, file, cb){
var fileNameFormatted = file.originalname.replace(/\s+/g, '-').toLowerCase();
cb(null, req.user.organizationId + '/' + uploadDate + '/' + fileNameFormatted);
}
}),
fileFilter: function(req, file, cb){
if(!file.originalname.match(/\.(jpg|jpeg|png|gif|csv|xls|xlsb|xlsm|xlsx)$/)){
return cb('File Selected is not supported');
}
cb(null, true);
}
});
Here is my route with upload.array('fileUpload', 5) fileUpload is the name of my file input field and 5 is the multer file length limiting feature.
.post(upload.array('fileUpload', 5), function(req, res) {
//Configure Uploaded S3 File Path strings based on environment for use in DB
var uploadedFiles = req.files;
var s3FilePath = [];
for (var prop in uploadedFiles) {
console.log(uploadedFiles[prop].key);
if (app.get('env') === 'production' || app.get('env') === 'staging') {
s3FilePath = 'https://files.test-site.com/' + uploadedFiles[prop].key;
} else {
s3FilePath.push(uploadedFiles[prop].location);
}
}
models.Blog.update({
blogId: req.body.blogId,
blogDate: req.body.blogDate,
}, {
where: {
userId: req.user.userId,
blogId: req.body.blogId
}
}).then(function(blog) {
console.log('This is the blog ' + blog);
var files = _.map(s3FilePath, function(file) {
console.log(file);
return {
fileName: file,
blogId: req.body.blogId
};
});
return models.BlogFile.bulkCreate(files);
}).then(function() {
res.redirect('/app');
}).catch(function(err) {
res.send(err);
console.log('File Post Error ' + err);
});
});
For the past six months I have been downloading the NASA APOD and saving to an S3 bucket using a Lambda function. Up until 12/23/2016 all was working as expected. Now when I check my bucket, the images are there but size 0 bytes. I have included my code below. Does anyone know if there has been a change? Thanks!
var AWS = require("aws-sdk");
var https = require('https');
var http = require('http');
var fs = require('fs');
// Incoming Handler
// <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
exports.handler = (event, context, callback) => {
GetAPOD();
};
// <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
// Functions
// <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
function GetAPOD() {
var nasa_api_key = 'MY KEY GOES HERE'
, nasa_api_path = '/planetary/apod?api_key=' + nasa_api_key;
var options = {
host: 'api.nasa.gov',
port: 443,
path: nasa_api_path,
method: 'GET'
};
// Connect to the NASA API and get the APOD.
var req = https.request(options, function (res) {
console.log('Open connection to NASA.');
res.setEncoding('utf-8');
var responseString = '';
res.on('data', function (data) {
responseString = data;
});
res.on('end', function () {
console.log('API Response: ' + responseString);
var responseObject = JSON.parse(responseString)
, image_date = responseObject['date']
, image_url = responseObject['url']
, image_hdurl = responseObject['hdurl']
, media_type = responseObject['media_type'];
if (media_type == 'image') {
var image_name = image_date + '.jpg';
var s3 = new AWS.S3();
var s3Bucket = new AWS.S3( { params: {Bucket: 'nasa-apod'} } );
// Check to see if the image already exists in the S3 bucket.
// If not we will upload the image to S3.
var head_data = {Key: image_name};
s3Bucket.headObject(head_data, function(err, output_head_data) {
if (output_head_data) {
console.log("Image exists on S3.");
}
else {
console.log("Image does not exists on S3.");
// Image has not been uploaded to S3, open a stream and download the image to the /tmp folder.
var file = fs.createWriteStream("/tmp/" + image_name);
var request = http.get(image_url, function(response) {
console.log("Opening file stream.");
// Pipe the data into the file stream and save to disk.
response.pipe(file);
response.on('end', function () {
// File is written to disk, we are going to check that it exists.
var fileName = "/tmp/" + image_name;
fs.exists(fileName, function(exists) {
if (exists) {
console.log("File exits in /tmp folder.");
// Get the stats for the image, will need this for the ContentLength
fs.stat(fileName, function(error, stats) {
if (error) {
console.log("Stat Error: " + error);
}
else {
console.log("Opening file stream.");
var image_stream = fs.createReadStream(fileName);
// Begin the upload process to S3.
var param_data = {Key: image_name, Body: image_stream, ContentType: "image/jpeg", ContentLength: stats.size, ACL: "public-read"};
s3Bucket.putObject(param_data, function(err, output_data) {
if (err) {
console.log('Error uploading data to S3: ' + err);
}
else {
console.log('Image successfully uploaded.');
}
});
}
});
}
else {
console.log('File does not exist in the /tmp folder.');
}
});
});
});
}
});
}
else {
console.log("Media Type: " + media_type);
}
});
});
req.on('error', function (e) {
console.error('HTTP error: ' + e.message);
});
req.end();
}
// <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>
Found out that NASA APOD API is now using https and not http for images. I had to adjust my code to use https for the image path.
I am trying to directly upload files from the browser to Amazon S3 using connect-busboy with the following code (https://github.com/thaume/s3-streaming)
route - index.js:
var awsUpload = require('../services/aws-streaming');
// Index route
// ===========
exports.index = function(req, res){
if (req.method === 'POST') {
return awsUpload(req, function(err, url) {
res.send(JSON.stringify(url));
});
}
res.writeHead(200, { Connection: 'close' });
res.end('<html><head></head><body>\
<form method="POST" enctype="multipart/form-data">\
<input type="file" name="filefield"><br />\
<input type="submit">\
</form>\
</body></html>');
};
with my modified version of aws-streaming.js
// Initialize aws client
// =====================
var config = require('../config/' + 'development');
var Knox = require('knox');
var moment = require('moment');
var crypto = require('crypto');
// Create the knox client with your aws settings
Knox.aws = Knox.createClient({
key: config.aws.AWS_ACCESS_KEY_ID,
secret: config.aws.AWS_SECRET_ACCESS_KEY,
bucket: config.aws.S3_BUCKET_NAME,
region: 'eu-west-1'
});
// S3 upload service - stream buffers to S3
// ========================================
var s3UploadService = function(req, next) {
req.files = {};
req.busboy.on('file', function(fieldname, file, filename, encoding, mimetype) {
if (!filename) {
// If filename is not truthy it means there's no file
return;
}
//////////////// CHECK FOR MIMETYE /////////////////////
// If file is not "text/plain" - return //
if (mimetype != "text/plain") {
console.log('true!')
return; // A JSON array with an error "Wrong file type"!
}
// Create the initial array containing the stream's chunks
file.fileRead = [];
file.on('data', function(chunk) {
// Push chunks into the fileRead array
this.fileRead.push(chunk);
});
file.on('error', function(err) {
console.log('Error while buffering the stream: ', err);
});
file.on('end', function() {
// Concat the chunks into a Buffer
var finalBuffer = Buffer.concat(this.fileRead);
req.files[fieldname] = {
buffer: finalBuffer,
size: finalBuffer.length,
filename: filename,
mimetype: mimetype
};
// Generate date based folder prefix
var datePrefix = moment().format('YYYY[/]MM');
var key = crypto.randomBytes(10).toString('hex');
var hashFilename = key + '-' + filename;
var pathToArtwork = '/artworks/' + datePrefix + '/' + hashFilename;
var headers = {
'Content-Length': req.files[fieldname].size,
'Content-Type': req.files[fieldname].mimetype,
'x-amz-acl': 'public-read'
};
Knox.aws.putBuffer( req.files[fieldname].buffer, pathToArtwork, headers, function(err, response){
if (err) {
console.error('error streaming image: ', new Date(), err);
return next(err);
}
if (response.statusCode !== 200) {
console.error('error streaming image: ', new Date(), err);
return next(err);
}
console.log('Amazon response statusCode: ', response.statusCode);
console.log('Your file was uploaded');
next();
});
});
});
req.busboy.on('error', function(err) {
console.error('Error while parsing the form: ', err);
next(err);
});
req.busboy.on('finish', function() {
console.log('Done parsing the form!');
// When everythin's done, render the view
next(null, 'http://www.google.com');
});
// Start the parsing
req.pipe(req.busboy);
};
module.exports = s3UploadService;
What I would like to do is to validate the mimetype and return a json array with the error message, ending the parsing of the form and not upload the file. Have added code to aws-streaming, but it will not return even if validates to true. What have I done wrong?
Also the code runs the callback when it is finished parsing the form, but I would like it to be run when the file is actually uploaded. How can I achieve this, comment out the next() in 'finish' event and move it to the Knox.aws.putBuffer?
Im using Express 4