Pardon me if this has been asked before. I am new to react and I'm developing a feature to upload files to S3 bucket using https://yuvaleros.github.io/material-ui-dropzone/.
Can some one help me out with how this should be done.
I am calling my getPresignedUrl method by using the onDrop method provided by the library but I am unable to figure out how to upload the actual file to S3?
export default function UploadFiles(props) {
const { formData, handleChange } = props;
const classes = useStyles();
const uploadFiles = (fileName) => {
api.uploadFiles(fileName).then((res) => {
const { statusCode } = res.data;
if (statusCode === 200) {
//do something
// setSnackbar({
// ...snackbar,
// ...{
// show: true,
// message: `Success`,
// type: "success",
// },
// });
} else {
console.log("this errored out");
//do something
}
});
}
return (
<React.Fragment>
<div className={classes.dropzonePreviewHeader}>
<DropzoneArea
showPreviews={true}
showPreviewsInDropzone={false}
useChipsForPreview
previewGridProps={{container: { spacing: 1, direction: 'row' }}}
previewChipProps={{classes: { root: classes.previewChip } }}
previewText="Selected files"
onDrop={e => {
e.forEach(item => uploadFiles(item.name));
}}
/></div>
</React.Fragment>
);
}
There's an existing library which does this: here
Sample code from read me:
import S3 from 'aws-s3';
const config = {
bucketName: 'myBucket',
dirName: 'photos', /* optional */
region: 'eu-west-1',
accessKeyId: 'ANEIFNENI4324N2NIEXAMPLE',
secretAccessKey: 'cms21uMxçduyUxYjeg20+DEkgDxe6veFosBT7eUgEXAMPLE',
s3Url: 'https://my-s3-url.com/', /* optional */
}
const S3Client = new S3(config);
/* Notice that if you don't provide a dirName, the file will be automatically uploaded to the root of your bucket */
/* This is optional */
const newFileName = 'my-awesome-file';
S3Client
.uploadFile(file, newFileName)
.then(data => console.log(data))
.catch(err => console.error(err))
/**
* {
* Response: {
* bucket: "your-bucket-name",
* key: "photos/image.jpg",
* location: "https://your-bucket.s3.amazonaws.com/photos/image.jpg"
* }
* }
*/
});
Related
So I'm trying to send an image to our server with react native using GraphQL query and I don't know why but it always return an error : [CombinedError: [Network] Network request failed].
The query :
import { graphql } from '../../gql';
import { gql, useMutation } from 'urql';
const AddProfilePicture_Mutation = graphql(`
mutation AddPicture_Mutation ($userId: ID!, $picture: Upload!) {
uploadProfilePicture(input: {
userId: $userId
picture: $picture
}) {
id
}
}`);
export const useAddProfilePicture = () => {
const [{fetching, error}, execute] = useMutation(AddProfilePicture_Mutation);
return {
error: !!error,
fetching,
addProfilePicture: execute,
}
}
and the code :
const pictureHandler = async () => {
const options = {
mediaType: 'photo' as MediaType,
includeBase64: true,
selectionLimit: 1,
};
const profilePicture = await launchImageLibrary(options);
if (profilePicture.assets?.[0].fileSize && profilePicture.assets?.[0].fileSize > MAXFILESIZE) {
showError(t('profileScreen.PictureSize'));
}
if (profilePicture.assets?.[0].uri && profilePicture.assets[0].fileName && profilePicture.assets[0].type) {
// const myBlob = await fetch(profilePicture.assets[0].uri).then(r => r.blob());
const blob = new Blob([profilePicture.assets[0].base64 as BlobPart], {
type: profilePicture.assets[0].type,
});
const file = new File([blob], profilePicture.assets[0].fileName, { type: `${profilePicture.assets[0].type}`});
const {error} = await addProfilePicture(
{ userId: userId!, picture: file},
{ fetchOptions: { headers: { 'graphql-require-preflight': '' } } }
);
if (!error) {
showSuccess(t('profileScreen.PictureSuccessAdded'));
navigation.navigate('UserProfile');
} else {
console.log(error);
showError(t('profileScreen.PictureErrorAdded'));
}
};
};
I've been trying everything I found on the web, Formdata, react-native-blob-util and rn-fetch-blob. If I try sending anything else then a File, the server reject it and says for exemple:
Variable 'picture' has an invalid value: Expected type org.springframework.web.multipart.MultipartFile but was java.util.LinkedHashMap]
Update :
After long research and help from other programmers. We never did found the answer. We open a new access point in the backend specifically for the uploaded picture and used a regular fetch post.
I am trying to setup front end for graphQl file upload with Apollo-boost-upload. The backend code is based on this link
https://dev.to/dnature/handling-file-uploads-with-apollo-server-2-0-14n7.
It's now reaching the resolver breakpoint after adding the following line in the server.js file
const { apolloUploadExpress } = require("apollo-upload-server");
app.use(apolloUploadExpress({ maxFileSize: 1000000000, maxFiles: 10 }));
And after modifying the schema for the upload type
scalar Upload
Here is the Vue component
<input
type="file"
style="display:none"
ref="fileInput"
accept="image/*"
#change="upload"
>
//Upload method
upload({ target: { files = [] } }) {
if (!files.length) {
return;
}
this.logoImage = files[0];
},
//Dispatching action from vue component
this.$store.dispatch("uploadLogo", { image: this.logoImage });
//Vuex action
const uploadLogo = async (context, payload) => {
context.commit("setLoading", true);
try {
const { data } = await apolloClient.mutate({
mutation: UPLOAD_LOGO,
variables: {file: payload.image},
context: {
hasUpload: true,
},
});
context.commit("setLoading", false);
console.log("Logo:", data.uploadLogo);
} catch (error) {
context.commit("setLoading", false);
console.log(error);
}
};
//Mutation
export const UPLOAD_LOGO = gql`
mutation uploadLogo($file: Upload!) {
uploadLogo(file: $file) {
_id
path
filename
mimetype
user {
_id
}
}
}
`;
// Apolloclient config on main.js
import ApolloClient from "apollo-boost-upload";
import { InMemoryCache } from "apollo-boost";
import VueApollo from "vue-apollo";
// Set up Apollo Client
export const defaultClient = new ApolloClient({
uri: "http://localhost:4000/graphql",
cache: new InMemoryCache({
addTypename: false,
}),
fetchOptions: {
credentials: "include",
},
request: (operation) => {
// if no token in local storage, add it
if (!localStorage.someToken) {
localStorage.setItem("someToken", "");
}
// operation adds the token to authorizatrion header, which is sent o backend
operation.setContext({
headers: {
authorization: "Bearer " + localStorage.getItem("someToken"),
},
});
},
onError: ({ graphQLErrors, networkError }) => {
if (networkError) {
console.log("[networkError]", networkError);
}
if (graphQLErrors) {
for (const error of graphQLErrors) {
console.dir(error);
if (error.name === "AuthenticationError") {
// set auth errir in state
store.commit("setError", error);
// signout user to clear error
store.dispatch("signUserOut");
}
}
}
},
});
Here is the updated typedef (old code commented out) from backend if that helps to identify the issue
const logoUploadTypeDefs = gql`
type File {
_id: ID!
path: String!
filename: String!
mimetype: String!
encoding: String!
user: User
}
# input Upload {
# name: String!
# type: String!
# size: Int!
# path: String!
# }
scalar Upload
type Mutation {
uploadLogo(file: Upload!): File
}
type Query {
info: String
logo: File!
}
`;
Now, the Node app crashes with the following log
I had to change "apollo-upload-server" to "graphql-upload"
change 1:
Commented out "apollo-upload-server" and used "graphql-upload"
// const { apolloUploadExpress } = require("apollo-upload-server");]
const {
graphqlUploadExpress, // A Koa implementation is also exported.
} = require("graphql-upload");
And in the middleware, used this
change 2:
app.use(graphqlUploadExpress());
await apolloServer.start();
instead of old code
app.use(apolloUploadExpress());// Not to be used
await apolloServer.start();
Also, in the resolver, I added this
change 3:
Import Upload from graphql-upload in the resolver file
const { GraphQLUpload } = require("graphql-upload");
....
....
const resolvers = {
// This maps the `Upload` scalar to the implementation provided
// by the `graphql-upload` package.
Upload: GraphQLUpload,
Query: {
....
}
Mutations: {
....
}
}
Refer to Apollo Docs for more details. This fixed the issue of Node crashing with error "Maximum call stack size exceeded at _openReadFs..."
How to import s3 bucket JSON data in DynamoDB automatically using NODEJS, DynamoDB, and AWS lambda.
import type { AWS } from '#serverless/typescript';
const serverlessConfiguration: AWS = {
service: 'raj',
frameworkVersion: '2',
custom: {
webpack: {
webpackConfig: './webpack.config.js',
includeModules: true,
},
},
plugins: ['serverless-webpack'],
provider: {
name: 'aws',
runtime: 'nodejs14.x',
profile : 'server',
apiGateway: {
minimumCompressionSize: 1024,
shouldStartNameWithService: true,
},
environment: {
AWS_NODEJS_CONNECTION_REUSE_ENABLED: '1',
},
lambdaHashingVersion: '20201221',
},
// import the function via paths
functions: {
messageAdd : {
handler : "src/now.handler",
events: [
{
http: {
path : 'addData',
method : 'POST',
cors : true,
}
}
]
}
},
};
module.exports = serverlessConfiguration;
const AWS = require('aws-sdk') ;
const docClient = new AWS.DynamoDB.DocumentClient();
// The Lambda handler
exports.handler = async (event) => {
AWS.config.update({
region: 'us-east-1', // use appropriate region
accessKeyId: '', // use your access key
secretAccessKey: '' // user your secret key
});
const s3 = new AWS.S3();
const ddbTable = "s3todyb";
console.log (JSON.stringify(event, null, 2));
console.log('Using DDB table: ', ddbTable);
await Promise.all(
event.Records.map(async (record) => {
try {
console.log('Incoming record: ', record);
// Get original text from object in incoming event
const originalText = await s3.getObject({
Bucket: event.Records[0].s3.bucket.name,
Key: event.Records[0].s3.object.key
}).promise();
// Upload JSON to DynamoDB
const jsonData = JSON.parse(originalText.Body.toString('utf-8'));
await ddbLoader(jsonData);
} catch (err) {
console.error(err);
}
})
);
};
// Load JSON data to DynamoDB table
const ddbLoader = async (data) => {
// Separate into batches for upload
let batches = [];
const BATCH_SIZE = 25;
while (data.length > 0) {
batches.push(data.splice(0, BATCH_SIZE));
}
console.log(`Total batches: ${batches.length}`);
let batchCount = 0;
// Save each batch
await Promise.all(
batches.map(async (item_data) => {
// Set up the params object for the DDB call
const params = {
RequestItems: {}
};
params.RequestItems[ddbTable] = [];
item_data.forEach(item => {
for (let key of Object.keys(item)) {
// An AttributeValue may not contain an empty string
if (item[key] === '')
delete item[key];
}
// Build params
params.RequestItems[ddbTable].push({
PutRequest: {
Item: {
...item
}
}
});
});
// Push to DynamoDB in batches
try {
batchCount++;
console.log('Trying batch: ', batchCount);
const result = await docClient.batchWrite(params).promise();
console.log('Success: ', result);
} catch (err) {
console.error('Error: ', err);
}
})
);
};
What I'm trying to do is get real time transcription for video recorded in the browser with webRTC. Use case is basically subtitles in real time like google hangouts has.
So I have a WebRTC program running in the browser. It sends webm objects back to the server. They are linear32 audio encodings. Google speech to text only accepts linear16 or Flac files.
Is there a way to convert linear32 to linear16 in real time?
Otherwise has anyone been able to hook up webRTC with Google speech to get real time transcriptions working?
Any advice on where to look to solve this problem would be great
Check out this repository it might help you - https://github.com/muaz-khan/Translator
Translator.js is a JavaScript library built top on Google Speech-Recognition & Translation API to transcript and translate voice and text. It supports many locales and brings globalization in WebRTC!
I had the same problem and failed with webRTC. I recommend you use the Web Audio Api instead if you are just interested in transcribing the audio from the video.
Here is how I did it with a nodejs sever and react client app. It is uploaded to github here
You need an audio worklet script. (Put it in the public folder because that is where the API expects to find it)
recorderWorkletProcessor.js (saved in public/src/worklets/recorderWorkletProcessor.js)
/**
An in-place replacement for ScriptProcessorNode using AudioWorklet
*/
class RecorderProcessor extends AudioWorkletProcessor {
// 0. Determine the buffer size (this is the same as the 1st argument of ScriptProcessor)
bufferSize = 2048;
// 1. Track the current buffer fill level
_bytesWritten = 0;
// 2. Create a buffer of fixed size
_buffer = new Float32Array(this.bufferSize);
constructor() {
super();
this.initBuffer();
}
initBuffer() {
this._bytesWritten = 0;
}
isBufferEmpty() {
return this._bytesWritten === 0;
}
isBufferFull() {
return this._bytesWritten === this.bufferSize;
}
/**
* #param {Float32Array[][]} inputs
* #returns {boolean}
*/
process(inputs) {
// Grabbing the 1st channel similar to ScriptProcessorNode
this.append(inputs[0][0]);
return true;
}
/**
*
* #param {Float32Array} channelData
*/
append(channelData) {
if (this.isBufferFull()) {
this.flush();
}
if (!channelData) return;
for (let i = 0; i < channelData.length; i++) {
this._buffer[this._bytesWritten++] = channelData[i];
}
}
flush() {
// trim the buffer if ended prematurely
const buffer = this._bytesWritten < this.bufferSize ? this._buffer.slice(0, this._bytesWritten) : this._buffer;
const result = this.downsampleBuffer(buffer, 44100, 16000);
this.port.postMessage(result);
this.initBuffer();
}
downsampleBuffer(buffer, sampleRate, outSampleRate) {
if (outSampleRate == sampleRate) {
return buffer;
}
if (outSampleRate > sampleRate) {
throw new Error("downsampling rate show be smaller than original sample rate");
}
var sampleRateRatio = sampleRate / outSampleRate;
var newLength = Math.round(buffer.length / sampleRateRatio);
var result = new Int16Array(newLength);
var offsetResult = 0;
var offsetBuffer = 0;
while (offsetResult < result.length) {
var nextOffsetBuffer = Math.round((offsetResult + 1) * sampleRateRatio);
var accum = 0,
count = 0;
for (var i = offsetBuffer; i < nextOffsetBuffer && i < buffer.length; i++) {
accum += buffer[i];
count++;
}
result[offsetResult] = Math.min(1, accum / count) * 0x7fff;
offsetResult++;
offsetBuffer = nextOffsetBuffer;
}
return result.buffer;
}
}
registerProcessor("recorder.worklet", RecorderProcessor);
Install Socket.io-client on front end
npm i socket.io-client
React component code
/* eslint-disable react-hooks/exhaustive-deps */
import { default as React, useEffect, useState, useRef } from "react";
import { Button } from "react-bootstrap";
import Container from "react-bootstrap/Container";
import * as io from "socket.io-client";
const sampleRate = 16000;
const getMediaStream = () =>
navigator.mediaDevices.getUserMedia({
audio: {
deviceId: "default",
sampleRate: sampleRate,
sampleSize: 16,
channelCount: 1,
},
video: false,
});
interface WordRecognized {
final: boolean;
text: string;
}
const AudioToText: React.FC = () => {
const [connection, setConnection] = useState<io.Socket>();
const [currentRecognition, setCurrentRecognition] = useState<string>();
const [recognitionHistory, setRecognitionHistory] = useState<string[]>([]);
const [isRecording, setIsRecording] = useState<boolean>(false);
const [recorder, setRecorder] = useState<any>();
const processorRef = useRef<any>();
const audioContextRef = useRef<any>();
const audioInputRef = useRef<any>();
const speechRecognized = (data: WordRecognized) => {
if (data.final) {
setCurrentRecognition("...");
setRecognitionHistory((old) => [data.text, ...old]);
} else setCurrentRecognition(data.text + "...");
};
const connect = () => {
connection?.disconnect();
const socket = io.connect("http://localhost:8081");
socket.on("connect", () => {
console.log("connected", socket.id);
setConnection(socket);
});
socket.emit("send_message", "hello world");
socket.emit("startGoogleCloudStream");
socket.on("receive_message", (data) => {
console.log("received message", data);
});
socket.on("receive_audio_text", (data) => {
speechRecognized(data);
console.log("received audio text", data);
});
socket.on("disconnect", () => {
console.log("disconnected", socket.id);
});
};
const disconnect = () => {
if (!connection) return;
connection?.emit("endGoogleCloudStream");
connection?.disconnect();
processorRef.current?.disconnect();
audioInputRef.current?.disconnect();
audioContextRef.current?.close();
setConnection(undefined);
setRecorder(undefined);
setIsRecording(false);
};
useEffect(() => {
(async () => {
if (connection) {
if (isRecording) {
return;
}
const stream = await getMediaStream();
audioContextRef.current = new window.AudioContext();
await audioContextRef.current.audioWorklet.addModule(
"/src/worklets/recorderWorkletProcessor.js"
);
audioContextRef.current.resume();
audioInputRef.current =
audioContextRef.current.createMediaStreamSource(stream);
processorRef.current = new AudioWorkletNode(
audioContextRef.current,
"recorder.worklet"
);
processorRef.current.connect(audioContextRef.current.destination);
audioContextRef.current.resume();
audioInputRef.current.connect(processorRef.current);
processorRef.current.port.onmessage = (event: any) => {
const audioData = event.data;
connection.emit("send_audio_data", { audio: audioData });
};
setIsRecording(true);
} else {
console.error("No connection");
}
})();
return () => {
if (isRecording) {
processorRef.current?.disconnect();
audioInputRef.current?.disconnect();
if (audioContextRef.current?.state !== "closed") {
audioContextRef.current?.close();
}
}
};
}, [connection, isRecording, recorder]);
return (
<React.Fragment>
<Container className="py-5 text-center">
<Container fluid className="py-5 bg-primary text-light text-center ">
<Container>
<Button
className={isRecording ? "btn-danger" : "btn-outline-light"}
onClick={connect}
disabled={isRecording}
>
Start
</Button>
<Button
className="btn-outline-light"
onClick={disconnect}
disabled={!isRecording}
>
Stop
</Button>
</Container>
</Container>
<Container className="py-5 text-center">
{recognitionHistory.map((tx, idx) => (
<p key={idx}>{tx}</p>
))}
<p>{currentRecognition}</p>
</Container>
</Container>
</React.Fragment>
);
};
export default AudioToText;
server.js
const express = require("express");
const speech = require("#google-cloud/speech");
//use logger
const logger = require("morgan");
//use body parser
const bodyParser = require("body-parser");
//use corrs
const cors = require("cors");
const http = require("http");
const { Server } = require("socket.io");
const app = express();
app.use(cors());
app.use(logger("dev"));
app.use(bodyParser.json());
const server = http.createServer(app);
const io = new Server(server, {
cors: {
origin: "http://localhost:3000",
methods: ["GET", "POST"],
},
});
//TODO: run in terminal first to setup credentials export GOOGLE_APPLICATION_CREDENTIALS="./speech-to-text-key.json"
const speechClient = new speech.SpeechClient();
io.on("connection", (socket) => {
let recognizeStream = null;
console.log("** a user connected - " + socket.id + " **\n");
socket.on("disconnect", () => {
console.log("** user disconnected ** \n");
});
socket.on("send_message", (message) => {
console.log("message: " + message);
setTimeout(() => {
io.emit("receive_message", "got this message" + message);
}, 1000);
});
socket.on("startGoogleCloudStream", function (data) {
startRecognitionStream(this, data);
});
socket.on("endGoogleCloudStream", function () {
console.log("** ending google cloud stream **\n");
stopRecognitionStream();
});
socket.on("send_audio_data", async (audioData) => {
io.emit("receive_message", "Got audio data");
if (recognizeStream !== null) {
try {
recognizeStream.write(audioData.audio);
} catch (err) {
console.log("Error calling google api " + err);
}
} else {
console.log("RecognizeStream is null");
}
});
function startRecognitionStream(client) {
console.log("* StartRecognitionStream\n");
try {
recognizeStream = speechClient
.streamingRecognize(request)
.on("error", console.error)
.on("data", (data) => {
const result = data.results[0];
const isFinal = result.isFinal;
const transcription = data.results
.map((result) => result.alternatives[0].transcript)
.join("\n");
console.log(`Transcription: `, transcription);
client.emit("receive_audio_text", {
text: transcription,
final: isFinal,
});
});
} catch (err) {
console.error("Error streaming google api " + err);
}
}
function stopRecognitionStream() {
if (recognizeStream) {
console.log("* StopRecognitionStream \n");
recognizeStream.end();
}
recognizeStream = null;
}
});
server.listen(8081, () => {
console.log("WebSocket server listening on port 8081.");
});
// =========================== GOOGLE CLOUD SETTINGS ================================ //
// The encoding of the audio file, e.g. 'LINEAR16'
// The sample rate of the audio file in hertz, e.g. 16000
// The BCP-47 language code to use, e.g. 'en-US'
const encoding = "LINEAR16";
const sampleRateHertz = 16000;
const languageCode = "en-US"; //en-US
const alternativeLanguageCodes = ["en-US", "ko-KR"];
const request = {
config: {
encoding: encoding,
sampleRateHertz: sampleRateHertz,
languageCode: languageCode,
//alternativeLanguageCodes: alternativeLanguageCodes,
enableWordTimeOffsets: true,
enableAutomaticPunctuation: true,
enableWordConfidence: true,
enableSpeakerDiarization: true,
diarizationSpeakerCount: 2,
model: "video",
//model: "command_and_search",
useEnhanced: true,
speechContexts: [
{
phrases: ["hello", "안녕하세요"],
},
],
},
interimResults: true,
};
I am trying to upload image to amazon s3,If possible can any one provide links /docs for how to upload to amazon s3, any help much appreciated
S3 options:
// this.state.s3options in YourComponent
{
"url": "https://yourapp.s3.eu-central-1.amazonaws.com",
"fields": {
"key": "cache/22d65141b48c5c44eaf93a0f6b0abc30.jpeg",
"policy": "eyJleHBpcm...1VDE0Mzc1OVoifV19",
"x-amz-credential": "AK...25/eu-central-1/s3/aws4_request",
"x-amz-algorithm": "AWS4-HMAC-SHA256",
"x-amz-date": "20161125T143759Z",
"x-amz-signature": "87863c360...b9b304bfe650"
}
}
Component:
class YourComponent extends Component {
// ...
// fileSource looks like: {uri: "content://media/external/images/media/13", isStatic: true}
async uploadFileToS3(fileSource) {
try {
var formData = new FormData();
// Prepare the formData by the S3 options
Object.keys(this.state.s3options.fields).forEach((key) => {
formData.append(key, this.state.s3options.fields[key]);
});
formData.append('file', {
uri: fileSource.uri,
type: 'image/jpeg',
});
formData.append('Content-Type', 'image/jpeg')
var request = new XMLHttpRequest();
request.onload = function(e) {
if (e.target.status === 204) {
// Result in e.target.responseHeaders.Location
this.setState({avatarSourceRemote: {uri: e.target.responseHeaders.Location}})
}
}.bind(this)
request.open('POST', this.state.s3options.url, true);
request.setRequestHeader('Content-type', 'multipart/form-data');
request.send(formData);
} catch(error) {
console.error(error);
}
}
// Example display the uploaded image
render() {
if (this.state.avatarSourceRemote) {
return (
<Image source={this.state.avatarSourceRemote} style={{width: 100, height: 100}} />
);
} else {
return (
<Text>No Image</Text>
);
}
}
}
This works for me
import fs from 'react-native-fs';
import {decode} from 'base64-arraybuffer';
import AWS from 'aws-sdk';
export const uploadFileToS3 = async (file) => {
const BUCKET_NAME = 'XXXXXXXXXX';
const IAM_USER_KEY = 'XXXXXXXXXX';
const IAM_USER_SECRET = 'XXXXXXXXXXXXXXX';
const s3bucket = new AWS.S3({
accessKeyId: IAM_USER_KEY,
secretAccessKey: IAM_USER_SECRET,
Bucket: BUCKET_NAME,
signatureVersion: 'v4',
});
const contentType = file.type;
const contentDeposition = `inline;filename="${file.name}"`;
const fPath = file.uri;
const base64 = await fs.readFile(fPath, 'base64');
const arrayBuffer = decode(base64);
return new Promise((resolve, reject) => {
s3bucket.createBucket(() => {
const params = {
Bucket: BUCKET_NAME,
Key: file.name,
Body: arrayBuffer,
ContentDisposition: contentDeposition,
ContentType: contentType,
};
s3bucket.upload(params, (error, data) => {
utils.stopLoader();
if (error) {
reject(getApiError(error));
} else {
console.log(JSON.stringify(data));
resolve(data);
}
});
});
});
};
This worked for me after a significant amount of trying over and over again...
I am also using a lambda function to serve me the link to post with.
The lambda function is just using getSignedUrl.
// Lambda Function
const AWS = require('aws-sdk')
AWS.config.update({
accessKeyId: {bucket_access},
secretAccessKey: {bucket_secret},
signatureVersion: 'v4',
region: {bucket_region}
})
const s3 = new AWS.S3()
exports.handler = async (event) => {
const URL = s3.getSignedUrl('putObject', {Bucket: {bucket_name},
// name of file name being placed in S3 Bucket
// event === metaData object
Key: `${event.{key}}/photo00`})
return URL
};
// React Native
const imagePreview = '{image_uri}'
const handleURL = async () => {
// metaData object
const obj = {
key: "meta_data"
}
const response = await fetch{{lambda_func_endpoint}, {
method: 'POST',
body: JSON.stringify(obj)
})
const json = await response.json();
return json
}
const handleUpload = async () => {
const URL = await handleURL()
const imageExt = imagePreview.split('.').pop()
// I have no idea why you are supposed to fetch before fetching...
// makes no sense. But it works. Lots of trying as I said.
let image = await fetch(imagePreview)
// I have no idea why it needs to be a blob in order
// to upload... makes no sense.
image = await image.blob()
await fetch(URL, {
method: 'PUT',
body: image,
headers: {
Accept: `image/${imageExt}`,
'Content-Type': `image/${imageExt}`
}
})
.then((res) => console.log(JSON.parse(JSON.stringify(res)).status))
.catch((err) => console.error(err))
}
Let me know what you guys think!