Node (express, fs) streaming multiple large videos - express

I am building a server that serves a video to a user. The server works as expected, but when I try to switch to a different video (going to different URL), the previous video continues to play (i have more explanation). I can't use HTML because user needs to dowload the whole video before playing.
Here is my server code:
const express = require("express")
const fs = require("fs")
let app = express();
var server = require("http").createServer(app);
server.listen(3000, () =>
console.log("Server started on port 3000, and listening for requests")
);
app.use(express.static('public'));
var videoPath = "F:/Videos/harh.mkv"
var videoSize = fs.statSync(videoPath).size;
var videoSize
var videoPath, start, end;
app.get('*', function(req, res){
var url = req.originalUrl;
url = decodeURIComponent(url)
if(!(url === "/api/video")){
console.log("Accessed from " + url);
videoPath = `F:/Videos${url}`;
console.log(videoPath + " <--- this is the video path");
res.sendFile(__dirname + '/public/index.html');
start=0; end=0;
}else{
videoSize = fs.statSync(videoPath).size;
var range = req.headers.range;
if(!range) res.status(400).send("Requires a valid range")
var CHUNK_SIZE = 10 ** 6
start = Number(range.replace(/\D/g, ""))
end = Math.min(start + CHUNK_SIZE, videoSize-1);
var contentLength = end - start + 1
var headers = {
"Content-Range": `bytes ${start}-${end}/${videoSize}`,
"Accept-Ranges" : "bytes",
"Content-Lenght": contentLength,
"Content-Type": "multipart/byteranges; boundary=MY_BOUNDARY",
}
res.writeHead(206, headers)
var videoStream = fs.createReadStream(videoPath, {start, end});
videoStream.pipe(res);
}
});
and here is my Html:
<body>
<video src="/api/video" controls class="video">
</video>
</body>
What is the Main problem:
This works as expected But if i change the video ( going to another url) the previous video plays.
Example:
http://localhost:3000/test.mkv (10 sec vid) loads this page. if we go to http://localhost:3000/nyancat.mkv (30sec vid) first few second the previous video plays (test.mkv) but after some time nyancat.mkv starts to play.
Each URL needs to play different video.

Related

MSE WebM video with no audio

I've written a MSE video player and it's loading WebMs. These are loading well, however I have a problem with video files with no audio tracks.
I've tried changing the codec depending on if there is audio
mediaSource.addSourceBuffer(`video/webm; ${videoHasAudio(asset) ? 'codecs="vp9,vorbis"' : 'codecs="vp9"'}`)`
And I thought this was working but now isn't. How do I run silent WebMs in MSE?
I have added sample MSE project here:
https://github.com/thowfeeq178/MediaSourceExtention
checkout the example in the github
overview:
we need to add one for video and one for audio like below:
// BBB : https://dash.akamaized.net/akamai/bbb_30fps/bbb_30fps.mpd
var baseUrl = "https://dash.akamaized.net/akamai/bbb_30fps/";
var initUrl = baseUrl + "bbb_30fps_480x270_600k/bbb_30fps_480x270_600k_0.m4v";
var initAudioUrl = baseUrl + "bbb_a64k/bbb_a64k_0.m4a";
var templateUrl =
baseUrl + "bbb_30fps_480x270_600k/bbb_30fps_480x270_600k_$Number$.m4v";
var templateUrlForAudio = baseUrl + "bbb_a64k/bbb_a64k_$Number$.m4a";
var sourceBuffer;
var audioSourceBuffer;
var index = 0;
var audioIndex = 0;
var numberOfChunks = 159;
var video = document.querySelector("video");
var ms = new MediaSource();
function onPageLoad() {
console.log("page loaded ..");
if (!window.MediaSource) {
console.error("No Media Source API available");
return;
}
// making source controlled by JS using MS
video.src = window.URL.createObjectURL(ms);
ms.addEventListener("sourceopen", onMediaSourceOpen);
}
function onMediaSourceOpen() {
// create source buffer
sourceBuffer = ms.addSourceBuffer('video/mp4; codecs="avc1.4d401f"');
audioSourceBuffer = ms.addSourceBuffer('audio/mp4; codecs="mp4a.40.5"');
// when ever one segment is loaded go for next
sourceBuffer.addEventListener("updateend", nextSegment);
audioSourceBuffer.addEventListener("updateend", nextAudioSegment);
// fire init segemnts
GET(initUrl, appendToBuffer);
GET(initAudioUrl, appendToAudioBuffer);
// play
video.play();
}
// get next segment based on index and append, once everything loaded unlisten to the event
function nextSegment() {
var url = templateUrl.replace("$Number$", index);
GET(url, appendToBuffer);
index++;
if (index > numberOfChunks) {
sourceBuffer.removeEventListener("updateend", nextSegment);
}
}
// get next audio segment based on index and append, once everything loaded unlisten to the event
function nextAudioSegment() {
var audioUrl = templateUrlForAudio.replace("$Number$", audioIndex);
GET(audioUrl, appendToAudioBuffer);
audioIndex++;
if (index > numberOfChunks) {
audioSourceBuffer.removeEventListener("updateend", nextAudioSegment);
}
}
// add to existing source
function appendToBuffer(videoChunk) {
if (videoChunk) {
sourceBuffer.appendBuffer(new Uint8Array(videoChunk));
}
}
function appendToAudioBuffer(audioChunk) {
if (audioChunk) {
audioSourceBuffer.appendBuffer(new Uint8Array(audioChunk));
}
}
// just network thing
function GET(url, callback) {
var xhr = new XMLHttpRequest();
xhr.open("GET", url);
xhr.responseType = "arraybuffer";
xhr.onload = function(e) {
if (xhr.status != 200) {
console.warn("Unexpected status code " + xhr.status + " for " + url);
return false;
}
callback(xhr.response);
};
xhr.send();
}
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<title>MSE Demo</title>
</head>
<body onload="onPageLoad()">
<h1>MSE Demo</h1>
<div>
<video muted controls width="80%"></video>
</div>
</body>
</html>

No sound in safari using Web Audio API webkitAudioContext()

I am trying to use the Web Audio API to play sound in my React application.
It's currently playing sound in all browsers except Safari v12.1.
I am aware Safari has restrictions on autoplay and requires user interaction to play sound, so I have a play button which calls the _play() function:
_play = (url, index) => {
this._getData(url);
this.source.start(index)
}
It's calling the _getData() function which looks like this:
_getData(url) {
this.source = this.audioContext.createBufferSource();
var request = new XMLHttpRequest();
request.open('GET', url, true);
request.responseType = 'arraybuffer';
request.onload = () => {
var audioData = request.response;
console.log(this.audioContext)
this.audioContext.decodeAudioData(audioData, buffer => {
this.source.buffer = buffer;
this.source.connect(this.audioContext.destination);
},
function(e){ console.log("Error with decoding audio data" + e.err); });
}
request.send();
}
this.audioContext is created in the component constructor using:
this.audioContext = new (window.AudioContext || window.webkitAudioContext)();
The console.log(this.audioContext) inside the request.onload outputs this before pressing play:
...and this after pressing play:
But no sound is playing (in Safari).
What am I doing wrong?
I think the problem that you ran into is that Safari does not allow you to modify the buffer anymore once you called start().
The following page does for example play a second of noise in Safari when you press the play button.
<!DOCTYPE html>
<html>
<body>
<button id="play-button">play</button>
<script>
document
.getElementById('play-button')
.addEventListener('click', () => {
const audioContext = new AudioContext();
const audioBufferSourceNode = audioContext.createBufferSource();
const sampleRate = audioContext.sampleRate;
const audioBuffer = audioContext.createBuffer(1, sampleRate, sampleRate);
const channelData = audioBuffer.getChannelData(0);
for (let i = 0; i < sampleRate; i += 1) {
channelData[i] = (Math.random() * 2) - 1;
}
audioBufferSourceNode.buffer = audioBuffer;
audioBufferSourceNode.connect(audioContext.destination);
audioBufferSourceNode.start(audioContext.currentTime);
});
</script>
</body>
</html>
But it doesn't work anymore if you modify it slightly. When starting the audioBufferSourceNode before assigning the buffer there will be no output anymore.
audioBufferSourceNode.connect(audioContext.destination);
audioBufferSourceNode.start(audioContext.currentTime);
audioBufferSourceNode.buffer = audioBuffer;
I guess you can get your code working by waiting for the HTTP response and the audio decoding before you start the source. Make sure to execute this.source.buffer = buffer before you execute this.source.start(index).
I hope this helps.

Why does node-lame module saves mp3 file with a high pitched/fast playback speed?

I am trying to record audio from the browser and stream it as raw audio (PCM) to my node server where I want to save to it in .mp3 format. I am using node-lame module on my server for creating an mp3 file from the PCM audio stream. The problem here is that the mp3 file is always high pitched and playing at a fast speed. I have tried both sending the data from the browser(client side) as int16 and as float32 and setting the appropriate lame.Encoder for them :
stream.pipe(new lame.Encoder({channels:2, bitDepth: 32, float:true,})) //float32
.pipe(fs.createWriteStream(path.resolve(__dirname, 'demo.mp3')))
stream.pipe(new lame.Encoder({channels:2, bitDepth: 16, sampleRate:44100,})) // int16
.pipe(fs.createWriteStream(path.resolve(__dirname, 'demo.mp3')))
Here is the code for the client side
(function(window) {
var client = new BinaryClient('ws://localhost:9001');
client.on('open', function() {
window.Stream = client.createStream();
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia || navigator.msGetUserMedia;
if (navigator.getUserMedia) {
navigator.getUserMedia({audio:true}, success, function(e) {
alert('Error capturing audio.');
});
} else alert('getUserMedia not supported in this browser.');
var recording = false;
window.startRecording = function() {
recording = true;
}
window.stopRecording = function() {
recording = false;
window.Stream.end();
}
function success(e) {
audioContext = window.AudioContext || window.webkitAudioContext;
context = new audioContext();
// the sample rate is in context.sampleRate
audioInput = context.createMediaStreamSource(e);
var bufferSize = 2048;
recorder = context.createScriptProcessor(bufferSize, 1, 1);
recorder.onaudioprocess = function(e){
if(!recording) return;
console.log ('recording');
var left = e.inputBuffer.getChannelData(0);
window.Stream.write(left); //trying it with float32
}
audioInput.connect(recorder)
recorder.connect(context.destination);
}
function convertoFloat32ToInt16(buffer) {
var l = buffer.length;
var buf = new Int16Array(l)
while (l--) {
buf[l] = buffer[l]*0xFFFF; //convert to 16 bit
}
return buf.buffer
}
});
})(this);
Here is the node app
var express = require('express');
var BinaryServer = require('binaryjs').BinaryServer;
var fs = require('fs');
var lame = require("lame");
var path = require('path');
var KalmanFilter = require('kalmanjs').default;
var buffer = [];
var port = 3700;
var outFile = 'demo.mp3';
var app = express();
app.set('views', __dirname + '/tpl');
app.set('view engine', 'jade');
app.engine('jade', require('jade').__express);
app.use(express.static(__dirname + '/public'))
app.get('/', function(req, res){
res.render('index');
});
app.listen(port);
console.log('server open on port ' + port);
binaryServer = BinaryServer({port: 9001});
binaryServer.on('connection', function(client) {
console.log('new connection');
client.on('stream', function(stream, meta) {
console.log('new stream');
stream.pipe(new lame.Encoder({channels:2, bitDepth: 32, float:true}))
.pipe(fs.createWriteStream(path.resolve(__dirname, 'demo.mp3')))
.on('close',function () {
console.log('done?');
})
stream.on('end', function() {
console.log('wrote to file ' + outFile);
});
});
});
Both of these snippets are taken from https://github.com/gabrielpoca/browser-pcm-stream except for the mp3 part.
The default sampling rate in the browser is 48000hz, while you are encoding your mp3 files with a sampling rate of 44100hz. You either need to resample your PCM data or set the recording sampling rate to 44100hz.

Share screen using getScreenId.js in WebRTC for two peers

I am trying to implement share screen function in webrtc video conferencing. From suggestion, I am now following muaz-khan's solution using https://www.webrtc-experiment.com/getScreenId/ . I can easily capture the application images of one peer, and replace the video stream with the capture stream. But it is a video conferencing experiment, so two browsers need to video conference with each other. For example, browser 1, has video streams A (local video), video streams B (remote video); browser 2 has video streams B (local video), video streams A (remote video). So when I am in browser 1 and trying to share the screen, the share screen stream should replace the local video in browser 1, and remote video in browser 2.
But right now, I can only make the share screen replace the local video in browser 1, browser 2 doesn't have any changes, cann't see any changes in its remote video (which is the local video in browser 1). I don't know how to trigger the changes in browser 2 as well. do i need to signal the share screen streams to server? and change the remote stream accordingly?
Here is my code in javascript:
$(function() {
var brokerController, ws, webRTC, localid;
// ws = new XSockets.WebSocket("wss://rtcplaygrouund.azurewebsites.net:443", ["connectionbroker"], {
ws = new XSockets.WebSocket("ws://localhost:4502", ["connectionbroker"], {
ctx: "152300ed-4d84-4e72-bc99-965052dc1e95"
});
var addRemoteVideo = function(peerId,mediaStream) {
var remoteVideo = document.createElement("video");
remoteVideo.setAttribute("autoplay", "true");
remoteVideo.setAttribute("rel",peerId);
attachMediaStream(remoteVideo, mediaStream);
remoteVideo.setAttribute("class", "col-md-3");
remoteVideo.setAttribute("height", $( document ).height() * 0.3);
remoteVideo.setAttribute("id", 'remoteVideo');
$("#videoscreen").append(remoteVideo);
};
var onConnectionLost = function (remotePeer) {
console.log("onconnectionlost");
var peerId = remotePeer.PeerId;
var videoToRemove = $("video[rel='" + peerId + "']");
videoToRemove.remove();
};
var oncConnectionCreated = function() {
console.log("oncconnectioncreated", arguments);
}
var onGetUerMedia = function(stream) {
console.log("Successfully got some userMedia , hopefully a goat will appear..");
webRTC.connectToContext(); // connect to the current context?
};
var onRemoteStream = function (remotePeer) {
addRemoteVideo(remotePeer.PeerId, remotePeer.stream);
console.log("Opps, we got a remote stream. lets see if its a goat..");
};
var onLocalStream = function(mediaStream) {
console.log("Got a localStream", mediaStream.id);
localid = mediaStream.id;
console.log("check this id: meadiastram id ", mediaStream.id);
var video = document.createElement("video");
video.setAttribute("height", "100%");
video.setAttribute("autoplay", "true");
video.setAttribute("id", "localvideo");
video.setAttribute("name", mediaStream.id);
attachMediaStream(video, mediaStream);
$("#videoscreen").append(video);
$('#share').click(function() {
getScreenId(function (error, sourceId, screen_constraints) {
navigator.getUserMedia = navigator.mozGetUserMedia || navigator.webkitGetUserMedia;
navigator.getUserMedia(screen_constraints, function (stream) {
$('#localvideo').attr('src', URL.createObjectURL(stream));
}, function (error) {
console.error(error);
});
});
});
};
var onContextCreated = function(ctx) {
console.log("RTC object created, and a context is created - ", ctx);
webRTC.getUserMedia(webRTC.userMediaConstraints.hd(true), onGetUerMedia, onError);
};
var onOpen = function() {
console.log("Connected to the brokerController - 'connectionBroker'");
webRTC = new XSockets.WebRTC(this);
webRTC.onlocalstream = onLocalStream;
webRTC.oncontextcreated = onContextCreated;
webRTC.onconnectioncreated = oncConnectionCreated;
webRTC.onconnectionlost = onConnectionLost;
webRTC.onremotestream = onRemoteStream;
};
var onConnected = function() {
console.log("connection to the 'broker' server is established");
console.log("Try get the broker controller form server..");
brokerController = ws.controller("connectionbroker");
brokerController.onopen = onOpen;
};
ws.onconnected = onConnected;
});
I am using xsocket as the server, and the codes for click share and change the local stream with the share screen streams are just very simple as this:
$('#share').click(function() {
getScreenId(function (error, sourceId, screen_constraints) {
navigator.getUserMedia = navigator.mozGetUserMedia || navigator.webkitGetUserMedia;
navigator.getUserMedia(screen_constraints, function (stream) {
$('#localvideo').attr('src', URL.createObjectURL(stream));
}, function (error) {
console.error(error);
});
});
Any help or suggestion would be grateful.
Thanks for pointing out the other post: How to addTrack in MediaStream in WebRTC, but I don't think they are the same. And also I am not sure how to renegotiate the remote connection in this case.
Xsocket.webrtc.js file for webrtc connection:
https://github.com/XSockets/XSockets.WebRTC/blob/master/src/js/XSockets.WebRTC.latest.js
How I could I renegotiate the remote connection in this case?
I figured out a work around solution by myself for this question, do not replace the local stream with the sharescreen stream, instead remove the old local stream from local div, then add the new sharescreen stream to local div. In the meantime, send the old local stream id by datachanel to the other peer, and remove that old remote video as well.
The most important thing is reflesh the streams (renegotiation), then sharescreen stream would display in remote peer.
Code:
$('#share').click(function() {
getScreenId(function (error, sourceId, screen_constraints) {
navigator.getUserMedia = navigator.mozGetUserMedia || navigator.webkitGetUserMedia;
navigator.getUserMedia(screen_constraints, function (stream) {
webRTC.removeStream(webRTC.getLocalStreams()[0]);
var id = $('#localvideo').attr('name');
$('#localvideo').remove();
brokerController.invoke('updateremotevideo', id);
webRTC.addLocalStream(stream);
webRTC.getRemotePeers().forEach(function (p) {
webRTC.refreshStreams(p);
});
}, function (error) {
console.error(error);
});
});
});
after get the command to remove that old video stream from the server:
brokerController.on('updateremotevideo', function(streamid){
$(document.getElementById(streamid)).remove();
});
This solution works for me. Although if only like to replace the local video stream with share screen stream, we need to re create the offer with sdp, and send sdp to remote peer. It is more complicated.
getScreenId(function (error, sourceId, screen_constraints) {
navigator.getUserMedia = navigator.mozGetUserMedia || navigator.webkitGetUserMedia;
navigator.getUserMedia(screen_constraints, function (stream) {
navigator.getUserMedia({audio: true}, function (audioStream) {
stream.addTrack(audioStream.getAudioTracks()[0]);
var mediaRecorder = new MediaStreamRecorder(stream);
mediaRecorder.mimeType = 'video/mp4'
mediaRecorder.stream = stream;
self.setState({recorder: mediaRecorder, startRecord: true, shareVideo: true, pauseRecord: false, resumeRecord: false, stopRecord: false, downloadRecord: false, updateRecord: false});
document.querySelector('video').src = URL.createObjectURL(stream);
var video = document.getElementById('screen-video')
if (video) {
video.src = URL.createObjectURL(stream);
video.width = 360;
video.height = 300;
}
}, function (error) {
alert(error);
});
}, function (error) {
alert(error);
});
});

Titanium - Get image file from filesystem on Android

I have a problem getting an image from filesystem. On iOS works fine.
First of all, I save a remote image in the filesystem with this function:
img.imagen = url from the remote image (e.g. http://onesite.es/img2.jpeg)
function descargarImagen(img, callback){
var path = img.imagen;
var filename = path.split("/").pop();
var xhr = Titanium.Network.createHTTPClient({
onload: function() {
// first, grab a "handle" to the file where you'll store the downloaded data
var f = Ti.Filesystem.getFile(Ti.Filesystem.applicationDataDirectory, filename);
f.write(this.responseData); // write to the file
Ti.API.debug("-- Imagen guardada: " + f.nativePath);
callback({path: f.nativePath});
},
timeout: 10000
});
xhr.open('GET', path);
xhr.send();
}
Now, I want to share this image creating an Android Intent:
args.image = f.nativePath(in the previous function)
var intent = null;
var intentType = null;
intent = Ti.Android.createIntent({
action: Ti.Android.ACTION_SEND
});
// add text status
if (args.status){
intent.putExtra(Ti.Android.EXTRA_TEXT, args.status);
}
// change type according to the content
if (args.image){
intent.type = "image/*";
intent.putExtraUri(Ti.Android.EXTRA_STREAM, args.image);
}else{
intent.type = "text/plain";
intent.addCategory(Ti.Android.CATEGORY_DEFAULT);
}
// launch intent
Ti.Android.currentActivity.startActivity(Ti.Android.createIntentChooser(intent, args.androidDialogTitle));
What I'm doing wrong?