I have an expressjs back end and I'm trying to download/stream pdfs from s3 to a zip file. This is the error in the Heroku log:
TypeError: Cannot read property 'on' of null
at Object.s3Zip.archiveStream (/app/node_modules/s3-zip/s3-zip.js:49:6)
at Object.s3Zip.archive (/app/node_modules/s3-zip/s3-zip.js:33:24)
It was working on a localhost with "s3-zip": "^2.0.4" however I never deployed that version to heroku. Now I'm using "s3-zip": "^3.1.2"
also note, in my IDE the .archive statement is crossed out with a hint comment that a: Depreciated symbol used
public getZipFiles(req, res) {
const get_file = req.headers['pdfs'].split(',');
console.log("get file: ", get_file);
// first param is the zip file name
const zip_file_name = get_file.shift() + '.zip';
const user_downloads = downloadsFolder();
console.log(downloadsFolder()); // value is /tmp/
const output = fs.createWriteStream(join(user_downloads, zip_file_name));
// note: get_file is an array - ['file1.pdf', 'file2.pdf']
s3Zip
.archive({ region: aws_region, bucket: aws_bucket, debug: true }, '', get_file)
.pipe(output)
};
I expected to see the downloaded zip file, but got the error instead
Related
I'm using the AWS-SDK with react-native to upload an image to S3 Bucket.
First of all, I want to say that my access and connectivity works well, I tried uploading plain text it works, I tried listing the objects and the buckets it works too.
Here is my code:
async function handleImage(capturedImage) {
setImage(capturedImage);
setScreenState(ScreenStates.LOADING);
try {
const result = await classifyImage(capturedImage);
console.log(result.tensor_)
// {dtype:"float32",shape:[…]}
// dtype:"float32"
// shape:[1,3,273,224]
const blob_jpeg = new Blob([result.tensor_], {type: "image/jpeg"});
console.log(typeof blob_jpeg._data)
// object
console.log(blob_jpeg._data)
// {blobId:"e7a667ad-4363-4a2e-9850-8695f103e9e0",offset:0,size:1489546,type:"image/jpeg",__collector:{}}
try {
const keyName = 'image.jpeg';
const putCommand = new PutObjectCommand({
Bucket: "mybucket",
ContentType:"image/jpeg",
Key: "myimage",
Body: blob_jpeg._data,
});
await s3.send(putCommand);
console.log(
'Successfully uploaded data to ' + bucketName + '/' + keyName);
} catch (e) {
console.log(e,e);
}
My error:
Error: "The request signature we calculated does not match the signature you provided. Check your key and signing method." in SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. Check your key and signing method. << at construct (native) << at apply (native) << at i (#aws-sdk/client-s3.js:3:461197)
Any ideas about how can I solve this problem and succesffully upload my image ?
I am trying to list objects and if this works later download/upload files to AWS S3. The code below throws an error. What am I doing incorrectly that this doesn't work? I've passed the accessKeyId and accessSecretKey in all possible ways below. I have a config and credentials file on mac and on windows I have just one awscredentials file and also set this on my windows
setx AWS_SDK_LOAD_CONFIG=1
CODE
const AWS = require('aws-sdk');
function listS3Objects(file, name, type) {
const s3bucket = new AWS.S3({
accessKeyId: process.env.AWS_ACCESS_KEY_ID,
accessSecretKey: process.env.AWS_SECRET_ACCESS_KEY,
// accessKeyId: 'my actual key in credentials file', //aws_access_key_id
// accessSecretKey: 'my actual secret key in credentials file', //aws_secret_access_key
region: "ap-southeast-1"
});
const params = {
Bucket: 'testbucketName',
};
s3bucket.listObjects(params, (err, data) => {
if (err) { throw err; }
/* eslint-disable no-console */
console.log('Success!');
console.log(data);
return data;
/* eslint-enable no-console */
});
}
const objs = listS3Objects()
//Test AWS Credentials
it('Tests', () => {
cy.log(objs)
})
ERROR
The following error originated from your test code, not from Cypress.
Missing credentials in config, if using AWS_CONFIG_FILE, set AWS_SDK_LOAD_CONFIG=1
When Cypress detects uncaught errors originating from your test code it will automatically fail the current test.
Cypress could not associate this error to any specific test.
We dynamically generated a new test to display this failure.
node_modules/aws-sdk/lib/config.js:400:1
398 |
399 | function credError(msg, err) {
400 | return new AWS.util.error(err || new Error(), {
| ^
401 | code: 'CredentialsError',
402 | message: msg,
403 | name: 'CredentialsError'
I'd troubleshoot it this way:
Can you run the script to upload or download separately? If not then its with the credentials.
if not credentials and this works perfectly can this script be imported into your spec and run? Let the script resolve and return a promise, then use the return value in your spec.
Refer this blog post.
Other options you could consider and cy.exec("aws command goes here")
I am using terraform to create lambda function and already have zip package in S3. But I get below error:
RespMetadata: {
StatusCode: 400,
RequestID: "fa9b0e8b-02a6-4eaf-81ae-bf30fc6a1153"
},
Message_: "Could not unzip uploaded file. Please check your file, then try to upload again.",
Type: "User"
}
My code looks as below:
resource "aws_lambda_function" "test_lambda" {
s3_bucket = "bucket_name"
s3_key = "lambda.zip"
function_name = "Function_Test"
role = aws_iam_role.test_lambda_role.arn
handler = "config.lambda_handler"
runtime = "python3.8"
timeout = 180
vpc_config {
subnet_ids = ["subnet-123"]
security_group_ids = ["sg-123"]
}
environment {
variables = {
LOG_LEVEL = "DEBUG"
host = "https://abc:9098"
}
}
}
recently had the same issue with terraform. terraform expects the extension of filename to be a zip and not a python code/extension. the extension of "filename" was .py instead of .zip for me it worked fine when I used the correct extension.
I'm trying to figure out how to send a pdf that was dynamically created to the client from the nextjs 9 api. I'm not sure if this is a nextjs error, a file pathing error or a webpack error. Guidance would be great.
filepath = /pages/api/pdf.js
var fs = require('fs')
export default function handle(req, res) {
console.log(req.body) // The request body
console.log(req.query) // The url querystring
console.log(req.cookies) // The passed cookies
var stream = fs.createReadStream(destinationPDF);
var filename = "filled.pdf";
// Be careful of special characters
filename = encodeURIComponent(filename);
// Ideally this should strip them
res.setHeader('Content-disposition', 'inline; filename="' + filename + '"');
res.setHeader('Content-type', 'application/pdf');
stream.pipe(res);
}
The error
Error: Failed to open PDF file:
../../public/static/85.pdf
resolves to localhost:3000/static/85.pdf
Error of concern:
let data = _babel_runtime_corejs2_core_js_object_assign__WEBPACK_IMPORTED_MODULE_0___default()(fdfData, fieldNames);
It's currently not possible to read files from API routes or pages.
https://github.com/zeit/next.js/pull/8334
I'm simply trying to follow this tutorial on how to upload files to gcs with Node and Express. But the following error keep causing my app to crash. Usually, I am able to upload one file without a problem in the first run. But I will get this error after running a few request, even with different file. When I try to upload, say 5, files at a time, this error cause my app to crash even in the first run. I see the process is trying to rename a file in the .config folder. Is it a normal behavior? If so, is there a work-around?
Window: v10.0.10586
Node: v4.3.1
Express: v4.13.1
Error: EPERM: operation not permitted, rename 'C:\Users\James Wang.config\configstore\gcs-resumable-upload.json.2873606827' -> 'C:\Users\James Wang.config\configstore\gcs-resumable-upload.json'
at Error (native)
at Object.fs.renameSync (fs.js:681:18)
at Function.writeFileSync as sync
at Object.create.all.set (C:\Users\James Wang\gi-cms-backend\node_modules\configstore\index.js:62:21)
at Object.Configstore.set (C:\Users\James Wang\gi-cms-backend\node_modules\configstore\index.js:93:11)
at Upload.set (C:\Users\James Wang\gi-cms-backend\node_modules\gcs-resumable-upload\index.js:264:20)
at C:\Users\James Wang\gi-cms-backend\node_modules\gcs-resumable-upload\index.js:60:14
at C:\Users\James Wang\gi-cms-backend\node_modules\gcs-resumable-upload\index.js:103:5
at Request._callback (C:\Users\James Wang\gi-cms-backend\node_modules\gcs-resumable-upload\index.js:230:7)
at Request.self.callback (C:\Users\James Wang\gi-cms-backend\node_modules\request\request.js:199:22)
at emitTwo (events.js:87:13)
at Request.emit (events.js:172:7)
at Request. (C:\Users\James Wang\gi-cms-backend\node_modules\request\request.js:1036:10)
at emitOne (events.js:82:20)
at Request.emit (events.js:169:7)
at IncomingMessage. (C:\Users\James Wang\gi-cms-backend\node_modules\request\request.js:963:12)
[nodemon] app crashed - waiting for file changes before starting...
UPDATE:
After setting {resumable: false} as suggested by #stephenplusplus in this post, I am no longer getting the "EPERM: operation not permitted" error.But, I start running into the { [ERROR:ETIMEDOUT] code: 'ETIMEDOUT', connection: false } error while trying to upload multiple files at a time with the largest file greater than 1.5mb. Other files get uploaded successfully.
For more information, I am able to upload files one by one when the files are no greater than ~2.5mb. If I try to upload 3 files at a time, I can only do so with files no greater than ~1.5mb.
Is the "Operation not permitted" issue as specified in the question a window specific thing, and does the timeout issue happen only after i set resumable = false?
I'm using express and multer with node.
This is the code I'm using now:
// Express middleware that will handle an array of files. req.files is an array of files received from
// filemulter.fields([{field: name, maxCount}]) function. This function should handle
// the upload process of files asychronously
function sendFilesToGCS(req, res, next) {
if(!req.files) { return next(); }
function stream(file, key, folder) {
var gcsName = Date.now() + file.originalname;
var gcsFile = bucket.file(gcsName);
var writeStream = gcsFile.createWriteStream({ resumable: false });
console.log(key);
console.log('Start uploading: ' + file.originalname);
writeStream.on('error', function(err) {
console.log(err);
res.status(501).send(err);
});
writeStream.on('finish', function() {
folder.incrementFinishCounter();
req.files[key][0].cloudStorageObject = gcsName;
req.files[key][0].cloudStoragePublicUrl = getPublicUrl(gcsName);
console.log('Finish Uploading: ' + req.files[key][0].cloudStoragePublicUrl);
folder.beginUploadNext();
});
writeStream.end(file.buffer);
};
var Folder = function(files) {
var self = this;
self.files = files;
self.reqFilesKeys = Object.keys(files); // reqFilesKeys is an array of keys parsed from req.files
self.nextInQuene = 0; // Keep track of the next file to be uploaded, must be less than reqFilesKeys.length
self.finishCounter = 0; // Keep track of how many files have been uploaded, must be less than reqFilesKeys.length
console.log(this.reqFilesKeys.length + ' files to upload');
};
// This function is used to initiate the upload process.
// It's also called in the on-finish listener of a file's write-stream,
// which will start uploading the next file in quene
Folder.prototype.beginUploadNext = function() {
// If there's still file left to upload,
if(this.finishCounter < this.reqFilesKeys.length) {
// and if there's still file left in quene
if(this.nextInQuene < this.reqFilesKeys.length) {
// upload the file
var fileToUpload = this.files[this.reqFilesKeys[this.nextInQuene]][0];
stream(fileToUpload, this.reqFilesKeys[this.nextInQuene], this);
// Increment the nextInQuene counter, and get the next one ready
this.nextInQuene++;
}
} else {
console.log('Finish all upload!!!!!!!!!!!!!!!!!!!!!!');
next();
}
};
Folder.prototype.incrementFinishCounter = function() {
this.finishCounter++;
console.log('Finished' + this.finishCounter + ' files');
};
var folder = new Folder(req.files);
// Begin upload with 3 streams
/*for(var i=0; i<3; i++) {
folder.beginUploadNext();
}*/
//Upload file one by one
folder.beginUploadNext();
}
I had the same issue with bower .. Run the following command: bower cache clean --allow-root
if this does not solve the problem, try after disabling anti virus.