Stop request during file upload in NodeJs - file-upload

I'm write an image uploader, and I want to constrain the size of the image to under 3mb. On the server side, I can check the size of the image in the header, something like this (using express):
app.post('/upload', function(req, res) {
if (+req.headers['content-length'] > 3001000) { // About 3mb
// Do something to stop the result
return res.send({'error': 'some kind of error'});
}
// Stream in data here...
}
I tried to stop the req by (and permuations of)
req.shouldKeepAlive = false;
req.client.destroy();
res.writeHead(200, {'Connection': 'close'});
res.end()
None of them really "destroys" the request to prevent more data being uploaded.
req.client.destroy() seem to freeze the download, but the res.send({error... is not being sent back.
Help!

Throw an error and catch it. It will stop the file upload, allowing you to send a response.
try { throw new Error("Stopping file upload..."); }
catch (e) { res.end(e.toString()); }
It's a bit hackish, but it works...

Here is my solution:
var maxSize = 30 * 1024 * 1024; //30MB
app.post('/upload', function(req, res) {
var size = req.headers['content-length'];
if (size <= maxSize) {
form.parse(req, function(err, fields, files) {
console.log("File uploading");
if (files && files.upload) {
res.status(200).json({fields: fields, files: files});
fs.renameSync(files.upload[0].path, uploadDir + files.upload[0].originalFilename);
}
else {
res.send("Not uploading");
}
});
}
else {
res.send(413, "File to large");
}
And in case of wasting client's uploading time before get response, control it in the client javascript.
if (fileElement.files[0].size > maxSize) {
....
}

Related

HCL Domino AppDevPack - Problem with writing Rich Text

I use the code proposed as an example in the documentation for Domino AppDev Pack 1.0.4 , the only difference is the reading of a text file (body.txt) as a buffer, this file containing only simple long text (40Ko).
When it is executed, the document is created in the database and the rest of the code does not return an error.
But finally, the rich text field was not added to the document.
Here the response returned:
response: {"fields":[{"fieldName":"Body","unid":"8EA69129BEECA6DEC1258554002F5DCD","error":{"name":"ProtonError","code":65577,"id":"RICH_TEXT_STREAM_CORRUPT"}}]}
My goal is to write very long text (more than 64 Ko) in a rich text field. I use in the example a text file for the buffer but it could be later something like const buffer = Buffer.from ('very long text ...')
Is this the right way or does it have to be done differently ?
I'm using a Windows system with IBM Domino (r) Server (64 Bit), Release 10.0.1FP4 and AppDevPack 1.0.4.
Thank you in advance for your help
Here's code :
const write = async (database) => {
let writable;
let result;
try {
// Create a document with subject write-example-1 to hold rich text
const unid = await database.createDocument({
document: {
Form: 'RichDiscussion',
Title: 'write-example-1',
},
});
writable = await database.bulkCreateRichTextStream({});
result = await new Promise((resolve, reject) => {
// Set up event handlers.
// Reject the Promise if there is a connection-level error.
writable.on('error', (e) => {
reject(e);
});
// Return the response from writing when resolving the Promise.
writable.on('response', (response) => {
console.log("response: " + JSON.stringify(response));
resolve(response);
});
// Indicates which document and item name to use.
writable.field({ unid, fieldName: 'Body' });
let offset = 0;
// Assume for purposes of this example that we buffer the entire file.
const buffer = fs.readFileSync('/driver/body.txt');
// When writing large amounts of data, it is necessary to
// wait for the client-side to complete the previous write
// before writing more data.
const writeData = () => {
let draining = true;
while (offset < buffer.length && draining) {
const remainingBytes = buffer.length - offset;
let chunkSize = 16 * 1024;
if (remainingBytes < chunkSize) {
chunkSize = remainingBytes;
}
draining = writable.write(buffer.slice(offset, offset + chunkSize));
offset += chunkSize;
}
if (offset < buffer.length) {
// Buffer is not draining. Whenever the drain event is emitted
// call this function again to write more data.
writable.once('drain', writeData);
}
};
writeData();
writable = undefined;
});
} catch (e) {
console.log(`Unexpected exception ${e.message}`);
} finally {
if (writable) {
writable.end();
}
}
return result;
};
As of appdev pack 1.0.4, the rich text stream accepts writing data of valid rich text cd format, in the LMBCS character set. We are currently working on a library to help you write valid rich text data to the stream.
I'd love to hear more about your use cases, and we're excited you're already poking around the feature! If you can join the openntf slack channel, I usually hang out there.

Modify Kurento group call example to support only audio

I need to modify the Kurento group call example from Link
to send only audio if one participant has no camera. Right now only audio is received when a camera is used. When only a microphone is available I receive a DeviceMediaError.
I managed to filter whether a camera device is connected or not and then send only audio, but this doesn't work. Maybe the participant should've an audio tag instead of a video tag?
EDIT: It's only working on Firefox and not in Chrome. Any ideas?
in file - https://github.com/Kurento/kurento-tutorial-java/blob/master/kurento-group-call/src/main/java/org/kurento/tutorial/groupcall/UserSession.java.
change following line -
sender.getOutgoingWebRtcPeer().connect(incoming, MediaType.AUDIO);
and set offer media constraints to video:false in browser js file.
updated code -
let constraints = {
audio: true,
video: false
};
let localParticipant = new Participant(sessionId);
participants[sessionId] = localParticipant;
localVideo = document.getElementById('local_video');
let video = localVideo;
let options = {
localVideo: video,
mediaConstraints: constraints,
onicecandidate: localParticipant.onIceCandidate.bind(localParticipant),
configuration : { iceServers : [
{"url":"stun:74.125.200.127:19302"},
] }
};
localParticipant.rtcPeer = new kurentoUtils.WebRtcPeer.WebRtcPeerSendonly(options, function(error) {
if (error) {
return console.error(error);
}
localVideoCurrentId = sessionId;
localVideo = document.getElementById('local_video');
localVideo.src = localParticipant.rtcPeer.localVideo.src;
localVideo.muted = true;
this.generateOffer(localParticipant.offerToReceiveVideo.bind(localParticipant));
});
server.js code
function join(socket, room, callback) {
let userSession = userRegister.getById(socket.id);
userSession.setRoomName(room.name);
room.pipeline.create('WebRtcEndpoint', {mediaProfile : 'WEBM_AUDIO_ONLY'}, (error, outgoingMedia) => {
if (error) {
console.error('no participant in room');
if (Object.keys(room.participants).length === 0) {
room.pipeline.release();
}
return callback(error);
}
// else
outgoingMedia.setMaxAudioRecvBandwidth(100);
add media profile parameter on server side while joining room.
function getEndpointForUser(userSession, sender, callback) {
if (userSession.id === sender.id) {
return callback(null, userSession.outgoingMedia);
}
let incoming = userSession.incomingMedia[sender.id];
if (incoming == null) {
console.log(`user : ${userSession.id} create endpoint to receive video from : ${sender.id}`);
getRoom(userSession.roomName, (error, room) => {
if (error) {
return callback(error);
}
room.pipeline.create('WebRtcEndpoint', {mediaProfile : 'WEBM_AUDIO_ONLY'}, (error, incomingMedia) => {
if (error) {
if (Object.keys(room.participants).length === 0) {
room.pipeline.release();
}
return callback(error);
}
console.log(`user: ${userSession.id} successfully create pipeline`);
incomingMedia.setMaxAudioRecvBandwidth(0);
incomingMedia.getMaxAudioRecvBandwidth(0);
add media profile parameter when accepting call.
hope this helps.

Sending audio file created with RecordRTC to my server

I am new to working with Javascript, PHP, and with servers generally. I am working on a web page that will record audio from the user and save it to my server, using RecordRTC. I'm a bit confused about the XMLHttpRequest portion - how do I alter the following code to send to my server instead of the webrtc server?
function uploadToServer(recordRTC, callback) {
var blob = recordRTC instanceof Blob ? recordRTC : recordRTC.blob;
var fileType = blob.type.split('/')[0] || 'audio';
var fileName = (Math.random() * 1000).toString().replace('.', '');
if (fileType === 'audio') {
fileName += '.' + (!!navigator.mozGetUserMedia ? 'ogg' : 'wav');
} else {
fileName += '.webm';
}
// create FormData
var formData = new FormData();
formData.append(fileType + '-filename', fileName);
formData.append(fileType + '-blob', blob);
callback('Uploading ' + fileType + ' recording to server.');
makeXMLHttpRequest('https://webrtcweb.com/RecordRTC/', formData, function(progress) {
if (progress !== 'upload-ended') {
callback(progress);
return;
}
var initialURL = 'https://webrtcweb.com/RecordRTC/uploads/';
callback('ended', initialURL + fileName);
listOfFilesUploaded.push(initialURL + fileName);
});
}
Via my web hosting provider, I'm using an Apache server, phpMyAdmin, and a mySQL database. Do I just replace
makeXMLHttpRequest(https://webrtcweb.com/RecordRTC/
with "https://mywebsite.com" and replace
var initialURL = 'https://webrtcweb.com/RecordRTC/uploads/';
with the path to the file I created to hold these audio files (https://mywebsite.com/uploads)? Then set permissions for that folder to allow public write capabilities (this seems unsafe, is there a good method)?
This is the makeXMLHttpRequest function:
function makeXMLHttpRequest(url, data, callback) {
var request = new XMLHttpRequest();
request.onreadystatechange = function() {
if (request.readyState == 4 && request.status == 200) {
callback('upload-ended');
}
};
request.upload.onloadstart = function() {
callback('Upload started...');
};
request.upload.onprogress = function(event) {
callback('Upload Progress ' + Math.round(event.loaded / event.total * 100) + "%");
};
request.upload.onload = function() {
callback('progress-about-to-end');
};
request.upload.onload = function() {
callback('progress-ended');
};
request.upload.onerror = function(error) {
callback('Failed to upload to server');
console.error('XMLHttpRequest failed', error);
};
request.upload.onabort = function(error) {
callback('Upload aborted.');
console.error('XMLHttpRequest aborted', error);
};
request.open('POST', url);
request.send(data);
}
Please make sure that your PHP server is running top over SSL (HTTPs)
Create a directory and name it uploadFiles
Create a sub-directory and name it uploads
Structure of the directories:
https://server.com/uploadFiles -> to upload files
https://server.com/uploadFiles/uploads -> to store files
index.php
Now create or upload following index.php file on this path: https://server.com/uploadFiles
<?php
// File Name: "index.php"
// via https://github.com/muaz-khan/RecordRTC/tree/master/RecordRTC-to-PHP
foreach(array('video', 'audio') as $type) {
if (isset($_FILES["${type}-blob"])) {
echo 'uploads/';
$fileName = $_POST["${type}-filename"];
$uploadDirectory = 'uploads/'.$fileName;
if (!move_uploaded_file($_FILES["${type}-blob"]["tmp_name"], $uploadDirectory)) {
echo(" problem moving uploaded file");
}
echo($fileName);
}
}
?>
Why sub-directory?
Nested directory uploads will be used to store your uploaded files. You will get URLs similar to this:
https://server.com/uploadFiles/uploads/filename.webm
Longer file upload issues:
https://github.com/muaz-khan/RecordRTC/wiki/PHP-Upload-Issues
upload_max_filesize MUST be 500MB or greater.
max_execution_time MUST be at least 10800 (or greater).
It is recommended to modify php.ini otherwise create .htaccess.
How to link my own server?
Simply replace https://webrtcweb.com/RecordRTC/ with your own URL i.e. https://server.com/uploadFiles/.

Example code to use GridFS using mongoskin to upload file from form

I am using mongoskin to connect mongodb in my project. Now I have requirement to use GridFs to upload images, audio etc. I have one HTML form to upload these files.
I tried to find out example code to upload file using mongoskin however could't find any good one.
Please help.
After spending many hours; I am able to use mongoskin to upload file to Gridfs. Not sure if this is perfect code however sharing it here because I couldn't find any single working code on searching Google :-)
https://github.com/dilipkumar2k6/gridfs-mongoskin
var DBModule = require('./DBModule.js');
var Grid = require('gridfs-stream');
var mongoskin = require('mongoskin');
//Upload file to server and also update the database
exports.uploadContent = function (req, res) {
console.log('Calling uploadFile inside FileUploadService');
req.pipe(req.busboy);
req.busboy.on('file', function (fieldname, file, filename, encoding, mimetype) {
console.log('uploadFile after busboy fieldname: ' + fieldname + ", file : " + file + ", filename : " + filename);
// make sure the db instance is open before passing into `Grid`
var gfs = Grid(DBModule.db, mongoskin);
//Get metadata var host = req.headers['host'];
var metadata = {contentType: mimetype};
var writestream = gfs.createWriteStream({filename: filename, metadata: metadata});
file.pipe(writestream);
writestream.on('close', function (file) {
// return URL to acces the uploaded content
var path = "contents/" + file._id;
res.json({"path": path});
});
writestream.on('error', function (err) {
log.error({err: err}, 'Failed to upload file to database');
res.status(constants.HTTP_CODE_INTERNAL_SERVER_ERROR);
res.json({error: err});
});
});
};
//view file from database
exports.previewContent = function (req, res) {
var contentId = new DBModule.BSON.ObjectID(req.params.contentid);
console.log('Calling previewFile inside FileUploadService for content id ' + contentId);
var gs = DBModule.db.gridStore(contentId, 'r');
gs.read(function (err, data) {
if (!err) {
//res.setHeader('Content-Type', metadata.contentType);
res.end(data);
} else {
log.error({err: err}, 'Failed to read the content for id ' + contentId);
res.status(constants.HTTP_CODE_INTERNAL_SERVER_ERROR);
res.json({error: err});
}
});
};
Try this to store the data using gridfs (by default uses mongoskin). It worked for me.
var ObjectID = require('mongodb').ObjectID,
GridStore = require('mongodb').GridStore;
exports.saveMedia = function(db, media, next) {
console.log(media)
db.open(function (err, db) {
// Create a file and open it
var gridStore = new GridStore(db, new ObjectID(), "w");
gridStore.open(function (err, gridStore) {
// Write some content to the file
gridStore.write(new Buffer(media), function (err, gridStore) {
// Flush the file to db
gridStore.close(function (err, fileData)
//returns filename
next(null, fileData)
});
});
});
});
}

Phantomjs Automation of a website leads me to getting IP blocked

I'm using PhantomJS to automate a page. What I do is:
do{
console.log(i);
i++;
page.open(url);
do { phantom.page.sendEvent('mousemove'); } while (page.loading);
if(page.injectJs('./Search.js') == false){
console.log("Search.js Failed")
}
var links = page.evaluate(function(json){
return search(json)
},json)
console.log(links);
} while(links == "")
So this leads me to opening the website repeated until what I'm looking for appears. But this also leads me to getting IP banned. What can I do to get around this?
Your IP is probably getting banned because the script generates too many requests to the website in very little time. So, you need to throttle requests, to apply a pause between them.
I would rewrite your script like this:
var page = require('webpage').create();
var url = "http://www.website.tld/";
var json = {"some" : "json"};
var i = 0;
var links;
// We abstract main code to a function so that we can call it
// again and again from itself
function getlinks (url, json) {
i++;
console.log(i);
page.open(url);
do { phantom.page.sendEvent('mousemove'); } while (page.loading);
if(page.injectJs('./Search.js') == false){
console.log("Search.js Failed")
}
var links = page.evaluate(function(json){
return search(json);
}, json);
if(links == "")
{
// No links scraped yet, so we wait for 3 seconds and try again
setTimeout(function(){
getlinks(url, json);
}, 3000)
}
else
{
console.log(links);
phantom.exit();
}
}
getlinks(url, json);