Using Temporary Credentials giving SignatureDoesNotMatch issue - file-upload

We are working on s3 browser based multipart file using EvaporateJS, Using pre-signed URL with temperory credentials.
Following will be my configuration
var amz_headers_common = {};
var amz_headers_at_initiate = {};
var amz_headers_at_upload = {};
var amz_headers_at_complete = {};
amz_headers_common['x-amz-acl'] = 'private';
amz_headers_common['x-amz-security-token'] = '<?=AWS_TOKEN;?>';
amz_headers_at_initiate['x-amz-acl'] = 'private';
amz_headers_at_initiate['x-amz-security-token'] = '<?=AWS_TOKEN;?>';
var customAuth = $("#signingMethod")[0].checked;
Evaporate.create({
signerUrl: customAuth ? undefined : '<?=AWS_SIGNER_URL;?>',
aws_key: '<?=AWS_KEY;?>' ,
bucket: '<?=AWS_S3_BUCKET;?>',
cloudfront: false,
computeContentMd5: true,
cryptoMd5Method: function (data) { return AWS.util.crypto.md5(data, 'base64'); },
cryptoHexEncodedHash256: function (data) { return AWS.util.crypto.sha256(data, 'hex'); },
logging: true,
s3Acceleration: true,
signTimeout: 10,
s3FileCacheHoursAgo: 1,
maxConcurrentParts:5,
allowS3ExistenceOptimization: true,
sendCanonicalRequestToSignerUrl: true,
customAuthMethod: customAuth? doNotUseUnsafeJavaScriptV4Signer : undefined,
evaporateChanged: function (file, evaporatingCount) {
$('#totalParts').text(evaporatingCount);
if (evaporatingCount > 0) {
$("#pause-all, #pause-all-force, #cancel-all").show();
} else if (evaporatingCount === 0) {
$("#pause-all, #pause-all-force, #resume, #cancel-all").hide();
}
}
})
var promise = _e_.add({
name: name,
file: files[i],
started: callback_methods.started,
complete: callback_methods.complete,
cancelled: callback_methods.cancelled,
progress: callback_methods.progress,
error: callback_methods.error,
warn: callback_methods.warn,
paused: callback_methods.paused,
pausing: callback_methods.pausing,
resumed: callback_methods.resumed,
nameChanged: callback_methods.nameChanged,
xAmzHeadersCommon: amz_headers_common,
xAmzHeadersAtInitiate: amz_headers_at_initiate,
xAmzHeadersAtUpload: amz_headers_at_upload,
xAmzHeadersAtComplete: amz_headers_at_complete
},
{
bucket: '<?=AWS_S3_BUCKET;?>', // Shows that the bucket can be changed per
aws_key: '<?=AWS_KEY;?>' // Shows that aws_key can be changed per
}
But I'm getting following signature mismatch error.
AWS Code: SignatureDoesNotMatch, Message:The request signature we calculated does not match the signature you provided. Check your key and signing method.status:403
Following will be log
Without temperary credentials following cannonical request and v4 string to sign.
POST
/test-video.mp474.6796611212833
uploads=
host:<bucket-name>.s3-accelerate.amazonaws.com
x-amz-date:20170428T055938Z
host;x-amz-date
e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
V4 stringToSign: AWS4-HMAC-SHA256
20170428T055938Z
20170428/ap-southeast-1/s3/aws4_request
ce2c7c5fbbf58483efbd4bd244551d138353ebb7b7233d3fdce73e85d96fad8d
--------------------------------------------------------------------------------------
Using temperary credentials following cannonical request and v4 string to sign.
initiate V4 CanonicalRequest: POST
/test-video.mp461.80892198840156
uploads=
host:<bucket-name>.s3-accelerate.amazonaws.com
x-amz-acl:private
x-amz-date:20170427T160400Z
x-amz-security-token:FQoDYXdzEDEaDIkS6zY1Oj8PQLLDVSK5A5pPusfWw81Yq3v0c4VqlyyQsBDW+PHosDuDnG8EYc9jlXD1tQwiTKU1E2Nf3aKcYmv/BHYwGwOen9GPStPeVBGbWNBzi1lT+B6xOnDvIXzelnuC6Eddt+jYIrjy9RVIKBN/s80NtVwfjmFK+93iOWJzdl2ruRSzQINZ+UuSmuthudkYLZzKy0pDmCrgIz8YCjXsjhN7FyeSZzXk9qmBDCASygVEFDNbkb/xidH/Yj7P9gYdsxY6YokV/CM8ZpAKmE8Lp+en+xs9rDclexFzCId8QyJaGj0xb205WoeRIHr8RSStvyounCxrmhWP6M/eijWTP/uHIfWVDqBadEPSgVWqcEzrW2iJ+0SGROb+In6BMmkEMaw+9L5M+lkgCfMDm5Fw9Ip8bujcb4okoNjEn6L+L0b1lm3yuqvLkT3oOzL3Sn48n3y0dXsYtt3yAq+C02bnfmgtYVQgv1C9TaMHrvipFADYNJ9U81HxQWlgvuSG5BEgqV59PIzGhwPFHais/GyA+a1bmxkyhzKEw1yq6F6+wQ+VBRskmPlahQd9ZK3wrnqvpQm+H7tD2YLkVVQb+AGKtRVU3mOL3yjgnYjIBQ==
host;x-amz-acl;x-amz-date;x-amz-security-token
e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
V4 stringToSign: AWS4-HMAC-SHA256
20170427T160400Z
20170427/ap-southeast-1/s3/aws4_request
e0b18a695b23bc16b6727fd2dc417e445266111ebb4995794287a46304d2cc92
Please help me to solve singature mismatch issue.

Wherever you instantiate your S3 client for the bucket, you'll want to set your signature version to v4, since that's what your temporary credentials are configured for. Something like this:
var s3 = new AWS.S3({
signatureVersion: 'v4'
});

Related

React-Native AWS-SDK : Can't upload image to S3 buckets Error: "SignatureDoesNotMatch"

I'm using the AWS-SDK with react-native to upload an image to S3 Bucket.
First of all, I want to say that my access and connectivity works well, I tried uploading plain text it works, I tried listing the objects and the buckets it works too.
Here is my code:
async function handleImage(capturedImage) {
setImage(capturedImage);
setScreenState(ScreenStates.LOADING);
try {
const result = await classifyImage(capturedImage);
console.log(result.tensor_)
// {dtype:"float32",shape:[…]}
// dtype:"float32"
// shape:[1,3,273,224]
const blob_jpeg = new Blob([result.tensor_], {type: "image/jpeg"});
console.log(typeof blob_jpeg._data)
// object
console.log(blob_jpeg._data)
// {blobId:"e7a667ad-4363-4a2e-9850-8695f103e9e0",offset:0,size:1489546,type:"image/jpeg",__collector:{}}
try {
const keyName = 'image.jpeg';
const putCommand = new PutObjectCommand({
Bucket: "mybucket",
ContentType:"image/jpeg",
Key: "myimage",
Body: blob_jpeg._data,
});
await s3.send(putCommand);
console.log(
'Successfully uploaded data to ' + bucketName + '/' + keyName);
} catch (e) {
console.log(e,e);
}
My error:
Error: "The request signature we calculated does not match the signature you provided. Check your key and signing method." in SignatureDoesNotMatch: The request signature we calculated does not match the signature you provided. Check your key and signing method. << at construct (native) << at apply (native) << at i (#aws-sdk/client-s3.js:3:461197)
Any ideas about how can I solve this problem and succesffully upload my image ?

Cypress AWS S3 List/Upload/Download Objects

I am trying to list objects and if this works later download/upload files to AWS S3. The code below throws an error. What am I doing incorrectly that this doesn't work? I've passed the accessKeyId and accessSecretKey in all possible ways below. I have a config and credentials file on mac and on windows I have just one awscredentials file and also set this on my windows
setx AWS_SDK_LOAD_CONFIG=1
CODE
const AWS = require('aws-sdk');
function listS3Objects(file, name, type) {
const s3bucket = new AWS.S3({
accessKeyId: process.env.AWS_ACCESS_KEY_ID,
accessSecretKey: process.env.AWS_SECRET_ACCESS_KEY,
// accessKeyId: 'my actual key in credentials file', //aws_access_key_id
// accessSecretKey: 'my actual secret key in credentials file', //aws_secret_access_key
region: "ap-southeast-1"
});
const params = {
Bucket: 'testbucketName',
};
s3bucket.listObjects(params, (err, data) => {
if (err) { throw err; }
/* eslint-disable no-console */
console.log('Success!');
console.log(data);
return data;
/* eslint-enable no-console */
});
}
const objs = listS3Objects()
//Test AWS Credentials
it('Tests', () => {
cy.log(objs)
})
ERROR
The following error originated from your test code, not from Cypress.
Missing credentials in config, if using AWS_CONFIG_FILE, set AWS_SDK_LOAD_CONFIG=1
When Cypress detects uncaught errors originating from your test code it will automatically fail the current test.
Cypress could not associate this error to any specific test.
We dynamically generated a new test to display this failure.
node_modules/aws-sdk/lib/config.js:400:1
398 |
399 | function credError(msg, err) {
400 | return new AWS.util.error(err || new Error(), {
| ^
401 | code: 'CredentialsError',
402 | message: msg,
403 | name: 'CredentialsError'
I'd troubleshoot it this way:
Can you run the script to upload or download separately? If not then its with the credentials.
if not credentials and this works perfectly can this script be imported into your spec and run? Let the script resolve and return a promise, then use the return value in your spec.
Refer this blog post.
Other options you could consider and cy.exec("aws command goes here")

"Access key does not exist" when generating pre-signed S3 URL from Lambda function

I'm trying to generate a presigned URL from within a Lambda function, to get an existing S3 object .
(The Lambda function runs an ExpressJS app, and the code to generate the URL is called on one of its routes.)
I'm getting an error "The AWS Access Key Id you provided does not exist in our records." when I visit the generated URL, though, and Google isn't helping me:
<Error>
<Code>InvalidAccessKeyId</Code>
<Message>The AWS Access Key Id you provided does not exist in our records.</Message>
<AWSAccessKeyId>AKIAJ4LNLEBHJ5LTJZ5A</AWSAccessKeyId>
<RequestId>DKQ55DK3XJBYGKQ6</RequestId>
<HostId>IempRjLRk8iK66ncWcNdiTV0FW1WpGuNv1Eg4Fcq0mqqWUATujYxmXqEMAFHAPyNyQQ5tRxto2U=</HostId>
</Error>
The Lambda function is defined via AWS SAM and given bucket access via the predefined S3CrudPolicy template:
ExpressLambdaFunction:
Type: AWS::Serverless::Function
Properties:
FunctionName: ExpressJSApp
Description: Main website request handler
CodeUri: ../lambda.zip
Handler: lambda.handler
[SNIP]
Policies:
- S3CrudPolicy:
BucketName: my-bucket-name
The URL is generated via the AWS SDK:
const router = require('express').Router();
const AWS = require('aws-sdk');
router.get('/', (req, res) => {
const s3 = new AWS.S3({
region: 'eu-west-1',
signatureVersion: 'v4'
});
const params = {
'Bucket': 'my-bucket-name',
'Key': 'my-file-name'
};
s3.getSignedUrl('getObject', params, (error, url) => {
res.send(`<p>${url}</p>`)
});
});
What's going wrong? Do I need to pass credentials explicitly when calling getSignedUrl() from within a Lambda function? Doesn't the function's execute role supply those? Am I barking up the wrong tree?
tldr; Go sure, to have the correct order of signature_v4 headers/formdata, in your request.
I had the same exact issue.
I am not sure if this is the solution for everyone who is encountering the problem, but I learned the following:
The error message, and other misleading error messages can occur, if you don't use the correct order of security headers. In my case I was using the endpoint to create a presigned url, for posting a file, to upload it. In this case, you need to go sure, that you are having the correct order of security relevant data in your form-data. For signatureVersion 's3v3' it is:
key
x-amz-algorithm
x-amz-credential
x-amz-date
policy
x-amz-security-token
x-amz-signature
In the special case of a POST-Request to a presigned url, to upload a file, it's important to have your file, AFTER the security data.
After that, the request works as expected.
I can't say for certain but I'm guessing this may have something to do with you using the old SDK. Here it is w/ v3 of the SDK. You may need to massage it a little more.
const { getSignedUrl } = require("#aws-sdk/s3-request-presigner");
const { S3Client, GetObjectCommand } = require("#aws-sdk/client-s3");
// ...
const client = new S3Client({ region: 'eu-west-1' });
const params = {
'Bucket': 'my-bucket-name',
'Key': 'my-file-name'
};
const command = new GetObjectCommand(params);
getSignedUrl(client, command(error, url) => {
res.send(`<p>${url}</p>`)
});

SignatureDoesNotMatch.The request signature we calculated does not match the signature you provided. Check your key and signing method

I am trying to implement the auto-update functionality in an electron app using a private s3 bucket. but it giving me a signature mismatch error. Please can anyone help me with this?
I have used the following code structure,
let opts = {
service: 's3',
region: 'us-east-1',
host: 'bucket-name.s3.amazonaws.com',
path: '/latest.yml'
};
console.log(opts);
aws4.sign(opts, {
accessKeyId: 'XXXXXXXXXXXXXXXXXXXX',
secretAccessKey: 'XXXXXXXXXXXXXXXX'
})
console.log(opts.headers);
autoUpdater.requestHeaders = opts.headers;
autoUpdater.checkForUpdates();
autoUpdater.on('checking-for-update', () => {
})
The problem should be the / before latest.yml in your opts. Removing it should work

Unable to verify the first certificate using Amazon SDK and Minio

Trying to connect to a minio server using the following code:
var AWS = require('aws-sdk');
var s3 = new AWS.S3({
accessKeyId: 'minio' ,
secretAccessKey: 'minio123' ,
endpoint: 'https://minio.dev' ,
s3ForcePathStyle: true, // needed with minio?
signatureVersion: 'v4',
sslEnabled: false,
rejectUnauthorized: false
});
// putObject operation.
var params = {Bucket: 'documents', Key: 'testobject', Body: 'Hello from MinIO!!'};
s3.putObject(params, function(err, data) {
if (err)
console.log(err)
else
console.log("Successfully uploaded data to documents/testobject");
});
// getObject operation.
var params = {Bucket: 'documents', Key: 'testobject'};
var file = require('fs').createWriteStream('/tmp/mykey');
s3.getObject(params).
on('httpData', function(chunk) { file.write(chunk); }).
on('httpDone', function() { file.end(); }).
send();
I get the following error:
{ Error: unable to verify the first certificate
at TLSSocket.onConnectSecure (_tls_wrap.js:1051:34)
at TLSSocket.emit (events.js:189:13)
at TLSSocket.EventEmitter.emit (domain.js:441:20)
at TLSSocket._finishInit (_tls_wrap.js:633:8)
message: 'unable to verify the first certificate',
code: 'NetworkingError',
region: 'us-east-1',
hostname: 'minio.dev',
retryable: true,
time: 2019-07-11T23:38:45.382Z }
I have passed the options "sslEnabled: false", but this doesn't change anything. I've also tried to disable SSL on the node side and it also fails to change the behavior.
Does anybody have any ideas on how to ignore the self signed cert error? (if that is the issue, which I believe it is)
const AWS = require('aws-sdk');
const https = require('https');
// Allow use with Minio
AWS.NodeHttpClient.sslAgent = new https.Agent({ rejectUnauthorized: process.env.NODE_TLS_REJECT_UNAUTHORIZED !== '0' });
// the rest of the code snippet remains unchanged
rejectUnauthorized: false is the key. In this example, I've tied it to the existence of a commonly used environment variable that toggles the behavior in the request module. AWS SDK doesn't use it for its API, but reusing it seemed appropriate since it performed the same function.
Now if NODE_TLS_REJECT_UNAUTHORIZED=0 is set, the whole Node process including the AWS SDK will work with mocked HTTPS endpoints.
WARNING: Only use this in a development environment, such as mocking public services on your local workstation. It can leave you open to Man-In-The-Middle attacks!