Display getUserMediaStream live video with media stream extensions (MSE) - webrtc

I am trying to display a MediaStream taken from a webcam using getUserMedia, and to relay it to a remote peer using whatever mechanism possible for it to be played (as an experiment). I am not using webRTC directly as I want control over the raw data.
The issue I encounter is that my video element displays nothing, and I don't get any errors back. I am using Chrome Version 51.0.2704.103 (64-bit) on Elementary OS (Ubuntu 14.04 based linux OS).
As a sidenote, if I record all the blobs into an array and then create a new blob and set the video's src element to URL.createObjectUrl(blob), it displays video correctly.
Here is the code I tried to accomplish this (minus the relaying, I'm just trying to play it locally):
var ms = new MediaSource();
var video = document.querySelector("video");
video.src = window.URL.createObjectURL(ms);
ms.addEventListener("sourceopen", function() {
var sourceBuffer = ms.addSourceBuffer('video/webm; codecs="vorbis,vp8"');
navigator.getUserMedia({video: {width: 320, height: 240, framerate: 30}, audio: true}, function(stream) {
var recorder = new MediaRecorder(stream);
recorder.ondataavailable = function(event) {
var reader = new FileReader();
reader.addEventListener("loadend", function () {
var uint8Chunk = new Uint8Array(reader.result);
if (!sourceBuffer.updating) {
sourceBuffer.appendBuffer(uint8Chunk);
}
if (video.paused) video.play();
});
reader.readAsArrayBuffer(event.data);
};
recorder.start(10);
}, function(error) {
console.error(error);
});
}, false);
Here is the info I get in chrome://media-internal:
render_id: 147
player_id: 0
pipeline_state: kPlaying
event: WEBMEDIAPLAYER_CREATED
url: blob:http%3A//localhost%3A8080/e5c51dd8-5709-4e6f-9457-49ac8c34756b
found_audio_stream: true
audio_codec_name: opus
found_video_stream: true
video_codec_name: vp8
duration: unknown
audio_dds: false
audio_decoder: OpusAudioDecoder
video_dds: false
video_decoder: FFmpegVideoDecoder
Also the log:
00:00:00 00 pipeline_state kCreated
00:00:00 00 event WEBMEDIAPLAYER_CREATED
00:00:00 00 url blob:http%3A//localhost%3A8080/e5c51dd8-5709-4e6f-9457-49ac8c34756b
00:00:00 00 pipeline_state kInitDemuxer
00:00:01 603 found_audio_stream true
00:00:01 603 audio_codec_name opus
00:00:01 603 found_video_stream true
00:00:01 603 video_codec_name vp8
00:00:01 604 duration unknown
00:00:01 604 pipeline_state kInitRenderer
00:00:01 604 audio_dds false
00:00:01 604 audio_decoder OpusAudioDecoder
00:00:01 604 video_dds false
00:00:01 604 video_decoder FFmpegVideoDecoder
00:00:01 604 pipeline_state kPlaying
Update: I've tried sending the data to node and saving it to a webm file with ffmpeg (fluent-ffmpeg), and I can view the file in VLC correctly.
Update 2: After streaming it back from node, I get the following: Media segment did not contain any video coded frames, mismatching initialization segment. Therefore, MSE coded frame processing may not interoperably detect discontinuities in appended media.
. After doing some research, it appears that webm files must be segmented to work, however I have not come across a way to do this (either using ffmpeg or other tools) for live streams. Any ideas here?

A little late, but you can try it like this (in chrome):
<html>
<body>
<video class="real1" autoplay controls></video>
<video class="real2" controls></video>
<script>
const constraints = {video: {width: 320, height: 240, framerate: 30}, audio: true};
const video1 = document.querySelector('.real1');
const video2 = document.querySelector('.real2');
var mediaSource = new MediaSource();
video2.src = window.URL.createObjectURL(mediaSource);
var sourceBuffer;
mediaSource.addEventListener('sourceopen', function () {
sourceBuffer = mediaSource.addSourceBuffer('video/webm; codecs=opus,vp8');
console.log(sourceBuffer);
})
var isFirst = true;
var mediaRecorder;
var i = 0;
function handleSuccess(stream) {
video1.srcObject = stream;
mediaRecorder = new MediaRecorder(stream, { mimeType: 'video/webm; codecs=opus,vp8' });
console.log(mediaRecorder.mimeType)
mediaRecorder.ondataavailable = function (e) {
var reader = new FileReader();
reader.onload = function (e) {
sourceBuffer.appendBuffer(new Uint8Array(e.target.result));
}
reader.readAsArrayBuffer(e.data);
if (video2.paused) {
video2.play(0);
}
}
mediaRecorder.start(20);
}
function handleError(error) {
console.error('Reeeejected!', error);
}
navigator.mediaDevices.getUserMedia(constraints).
then(handleSuccess).catch(handleError);
</script>
</body>
</html>
I think you missed setting the same (supported) codec to both, recorder and sourceBuffer.

Related

WebRTC AGC (Automatic Gain Control): can it really be disabled?

:)
I've installed AppRTC (https://github.com/webrtc/apprtc) to a separate server to try out more flexible control of the WebRTC parameters.
The main task is to disable Automatic Gain Control (AGC).
The following steps have been performed:
The parameters for the audio-stream:
{
video:
{
frameRate: 30,
width: 640,
height: 480
},
audio:
{
echoCancellation: true,
noiseSuppression: true,
autoGainControl: false
}
}
The GainNode filter has been added via audioContext.createGain() and has received a fixed value via gainNode.gain.value
To be able to test the AGC absence - a graphical audio meter has been added using audioContext.createScriptProcessor(...).onaudioprocess
The problem is that in fact the AGC is not disabled and Gain still remains dynamic.
During a long monotone loud sound the analyzer drops to a significantly lower value after 5-6 seconds.
And after 5-6 seconds of silence gets back to previous range.
All this has been tested on macOs Catalina 10.15.7, in the following browsers:
Mozilla Firefox 82.0.3,
Google Chrome 86.0.4240.198,
Safari 14.0 (15610.1.28.1.9, 15610),
and also on iOS 14.2 Safari.
The question: is there a functioning possibility to turn off AGC and to follow that not only "by hearing" but also by the meter values?
The full code of the gain fixation method:
src/web_app/html/index_template.html
var loadingParams = {
errorMessages: [],
isLoopback: false,
warningMessages: [],
roomId: '101',
roomLink: 'https://www.xxxx.com/r/101',
// mediaConstraints: {"audio": true, "video": true},
mediaConstraints: {video: {frameRate: 30, width: 640, height: 480}, audio: {echoCancellation: true, noiseSuppression: true, autoGainControl: false}},
offerOptions: {},
peerConnectionConfig: {"bundlePolicy": "max-bundle", "iceServers": [{"urls": ["turn:www.xxxx.com:3478?transport=udp", "turn:www.xxxx.com:3478?transport=tcp"], "username": "demo", "credential": "demo"}, {"urls": ["stun:www.xxxx.com:3478"], "username": "demo", "credential": "demo"}], "rtcpMuxPolicy": "require"},
peerConnectionConstraints: {"optional": []},
iceServerRequestUrl: 'https://www.xxxx.com//v1alpha/iceconfig?key=',
iceServerTransports: '',
wssUrl: 'wss://www.xxxx.com:8089/ws',
wssPostUrl: 'https://www.xxxx.com:8089',
bypassJoinConfirmation: false,
versionInfo: {"time": "Wed Sep 23 12:49:00 2020 +0200", "gitHash": "78600dbe205774c115cf481a091387d928c99d6a", "branch": "master"},
};
src/web_app/js/appcontroller.js
AppController.prototype.gainStream = function (stream, gainValue) {
var max_level_L = 0;
var old_level_L = 0;
var cnvs = document.createElement('canvas');
cnvs.style.cssText = 'position:fixed;width:320px;height:30px;z-index:100;background:#000';
document.body.appendChild(cnvs);
var cnvs_cntxt = cnvs.getContext("2d");
var videoTracks = stream.getVideoTracks();
var context = new AudioContext();
var mediaStreamSource = context.createMediaStreamSource(stream);
var mediaStreamDestination = context.createMediaStreamDestination();
var gainNode = context.createGain();
var javascriptNode = context.createScriptProcessor(1024, 1, 1);
mediaStreamSource.connect(gainNode);
mediaStreamSource.connect(javascriptNode);
gainNode.connect(mediaStreamDestination);
javascriptNode.connect(mediaStreamDestination);
javascriptNode.onaudioprocess = function(event){
var inpt_L = event.inputBuffer.getChannelData(0);
var instant_L = 0.0;
var sum_L = 0.0;
for(var i = 0; i < inpt_L.length; ++i) {
sum_L += inpt_L[i] * inpt_L[i];
}
instant_L = Math.sqrt(sum_L / inpt_L.length);
max_level_L = Math.max(max_level_L, instant_L);
instant_L = Math.max( instant_L, old_level_L -0.008 );
old_level_L = instant_L;
cnvs_cntxt.clearRect(0, 0, cnvs.width, cnvs.height);
cnvs_cntxt.fillStyle = '#00ff00';
cnvs_cntxt.fillRect(10,10,(cnvs.width-20)*(instant_L/max_level_L),(cnvs.height-20)); // x,y,w,h
}
gainNode.gain.value = gainValue;
var controlledStream = mediaStreamDestination.stream;
for (const videoTrack of videoTracks) {
controlledStream.addTrack(videoTrack);
}
return controlledStream;
};
AppController.prototype.onLocalStreamAdded_ = function(stream) {
trace('User has granted access to local media.');
this.localStream_ = this.gainStream(stream, 100);
this.infoBox_.getLocalTrackIds(this.localStream_);
if (!this.roomSelection_) {
this.attachLocalStream_();
}
};
Thank you!
Best Regards,
Andrei Costenco
I think the problem you ran into is that echoCancellation and noiseSuppression do modify the signal as well. Since you mentioned that you are using a long monotone sound to test your code it could very well be that the noiseSuppression algorithm tries to reduce that "noise".
Unfortunately there is no way to tell why the signal was modified. You have to trust the browser here that it actually has switched off the gain control and that all remaining modifications come from the other two algorithms.
If you don't want to fully trust the browser you could also experiment a bit by using other sounds to run your tests. It should not be "noisy" but it's difficult to say what get's detected by the browser as noise and what doesn't.

Detect if MediaStreamTrack is black/blank

I'm creating videochat with peerjs.
I'm toggling camera (on/off) with the following function:
function toggleCamera() {
localStream.getVideoTracks()[0].enabled = !(localStream.getVideoTracks()[0].enabled);
}
After calling this function, video goes black and receiver gets just black screen (which works as intended).
Now I want to detect black/blank screen so I can show user some message or icon that camera is disabled and there is no stream.
How do I do detect that?
The common approach is to send a signaling message (either via the normal path or a datachannel). Polling getStats to detect the black frames is a valid approach but more expensive in terms of computation.
After some time I've managed to get solution:
var previousBytes = 0;
var previousTS = 0;
var currentBytes = 0;
var currentTS = 0;
// peer - new Peer()
// stream - local camera stream (received from navigator.mediaDevices.getUserMedia(constraints))
let connection = peer.call(peerID, stream);
// peerConnection - reference to RTCPeerConnection (https://peerjs.com/docs.html#dataconnection-peerconnection)
connection.peerConnection.getStats(null).then(stats => {
stats.forEach(report => {
if (report.type === "inbound-rtp") {
currentBytes = report.bytesReceived;
currentTS = report.timestamp;
if (previousBytes == 0) {
previousBytes = currentBytes;
previousTS = currentTS;
return;
}
console.log({ previousBytes })
console.log({ currentBytes })
var deltaBytes = currentBytes - previousBytes;
var deltaTS = currentTS - previousTS;
console.log("Delta: " + (deltaBytes / deltaTS) + " kB/s")
previousBytes = currentBytes;
previousTS = currentTS;
}
});
});
This code is actually in function which gets called every second. When camera is turned on and it's not covered, deltaBytes is between 100 and 250, when camera is turned off (programmatically) or covered (with a napkin or something), so camera stream is black/blank, deltaBytes is med 1.5-3kbps. After you turn camera back on, there is a spike in deltaBytes, which reaches around 500kbps.
This is short console log:
124.52747252747253 kB/s
202.213 kB/s
194.64764764764766 kB/s
15.313 kB/s (this is where camera is covered)
11.823823823823824 kB/s
11.862137862137862 kB/s
2.164 kB/s
2.005 kB/s
2.078078078078078 kB/s
1.99 kB/s
2.059 kB/s
1.992992992992993 kB/s
159.89810189810188 kB/s (uncovered camera)
502.669 kB/s
314.7927927927928 kB/s
255.0909090909091 kB/s
220.042 kB/s
213.46353646353646 kB/s
EDIT:
So in the end I did as #Philipp Hancke said. I created master connection which is open from when the page loads until user closes it. Over this connection I'm sending commands for initiating video call, canceling video session, turning on/off camera,... Then on the other side I'm parsing these commands and executing functions.
function sendMutedMicCommand() { masterConnection.send(`${commands.MutedMic}`); }
function sendUnmutedMicCommand() { masterConnection.send(`${commands.UnmutedMic}`); }
function sendPromptVideoCallCommand() { masterConnection.send(`${commands.PromptVideoCall}`); }
function sendAcceptVideoCallCommand() { masterConnection.send(`${commands.AcceptVideoCall}`); }
function sendDeclineVideoCallCommand() { masterConnection.send(`${commands.DeclineVideoCall}`); }
Function which handles data:
function handleData(data) {
let actionType = data;
switch (actionType) {
case commands.MutedMic: ShowMuteIconOnReceivingVideo(true); break;
case commands.UnmutedMic: ShowMuteIconOnReceivingVideo(false); break;
case commands.PromptVideoCall: showVideoCallModal(); break;
case commands.AcceptVideoCall: startVideoConference(); break;
case commands.DeclineVideoCall: showDeclinedCallAlert(); break;
default: break;
}
}
const commands = {
MutedMic: "mutedMic",
UnmutedMic: "unmutedMic",
PromptVideoCall: "promptVideoCall",
AcceptVideoCall: "acceptVideoCall",
DeclineVideoCall: "declineVideoCall",
}
And then when I receive mutedMic command, I show icon with crossed mic. When I receive AcceptVideoCall command I create another peer, videoCallPeer with random ID, which is then then sent to other side. Other side then created another peer with random ID and initiated video session with received ID.

stats.js shows FPS 0~2, render movement too slow

i'm beginner for three.js also using it for BIM project,
when i load a gltf file of ~25mb i can barely move the whole object and stats.js monitor shows fps of 0~2 at max
gltf file : https://github.com/xeolabs/xeogl/tree/master/examples/models/gltf/schependomlaan
im using THREE js with vuejs
//package.json
"stats.js": "^0.17.0",
"three": "^0.109.0",
import * as THREE from 'three';
import { GLTFLoader } from 'three/examples/jsm/loaders/GLTFLoader.js';
import { DRACOLoader } from 'three/examples/jsm/loaders/DRACOLoader.js';
import { OrbitControls } from 'three/examples/jsm/controls/OrbitControls.js';
this.scene = new THREE.Scene();
this.stats = new Stats();
this.stats.showPanel( 0, 1, 2 ); // 0: fps, 1: ms, 2: mb, 3+: custom
let div = document.createElement('div')
div.appendChild(this.stats.dom)
div.style.position = 'absolute';
div.style.top = 0;
div.style.left = 0;
document.getElementsByClassName('gltfViewer')[0].appendChild( div );
// Camera
this.camera = new THREE.PerspectiveCamera( 45, window.innerWidth / window.innerHeight, 1, 1500 );
this.camera.position.set( this.pos, this.pos, this.pos );
// renderer
this.raycaster = new THREE.Raycaster();
this.renderer = new THREE.WebGLRenderer({ canvas: document.getElementById('gltfViewerCanvas'), alpha: false });
this.renderer.setClearColor( 0xefefef );
this.renderer.setPixelRatio( window.devicePixelRatio );
this.renderer.setSize(window.innerWidth, window.innerHeight);
// adding controls
this.controls = new OrbitControls( this.camera, this.renderer.domElement );
this.controls.dampingFactor = 0.1;
this.controls.rotateSpeed = 0.12;
this.controls.enableDamping = true;
this.controls.update();
window.addEventListener('resize', _ => this.render());
this.controls.addEventListener('change', _ => this.render());
// light
var ambientLight = new THREE.AmbientLight( 0xcccccc );
this.scene.add( ambientLight );
var directionalLight = new THREE.DirectionalLight( 0xffffff );
directionalLight.position.set( 0, 1, 1 ).normalize();
this.scene.add( directionalLight );
// loading gltf file
// Instantiate a loader
this.gltfLoader = new GLTFLoader();
// Optional: Provide a DRACOLoader instance to decode compressed mesh data
this.dracoLoader = new DRACOLoader();
this.dracoLoader.setDecoderPath( 'three/examples/js/libs/draco' );
this.gltfLoader.setDRACOLoader( this.dracoLoader );
// Load a glTF resource
this.gltfLoader.load( this.src, this.onGLTFLoaded, this.onGLTFLoading, this.onGLTFLoadingError );
//onGLTFLoaded()
this.scene.add( optimizedGltf.scene );
// gltf.scene.getObjectById(404).visible = false;
this.listGltfObjects(gltf);
this.render();
// render ()
this.renderer.render( this.scene, this.camera );
this.stats.update();
// on mounted component :
animate()
// animate()
this.stats.begin()
this.render();
this.stats.end();
even after applying Draco compression using https://github.com/AnalyticalGraphicsInc/gltf-pipeline nothing changes.
Thanks
On filesize —
Draco compression reduces network size, but not the final amount of uncompressed data that must be sent to your GPU and rendered. If your original mesh was 100mb and you compress it to 25mb, you will still get the framerate of the original 100mb mesh. Aside: Using the -b option of glTF-Pipeline will reduce the size by another 50%, to 13MB, but again doesn't affect FPS.
On framerate —
This model contains 4280 meshes1, each requiring a GPU draw call. That is the source of your low QPS, and unfortunately it's a common problem in BIM models. You'll need to merge these meshes (in a program like Blender, or after loading in three.js) to as few as possible. A model like this should require < 100 draw calls, or even as few as 1.
1 To see this, try opening the model on https://gltf-viewer.donmccurdy.com/ and opening the JavaScript console. You should see a printout of the scene graph, which will contain many different meshes.

'TypeError: currentSubs[i] is not a function' when using ports in Elm 0.19

I am attempting to send data from Elm 0.19 to JavaScript using ports.
Edit: The problem seems to be related to running/building with elm-app
In Elm, I declare an outgoing port:
port modelToJs : Json.Encode.Value -> Cmd msg
which I use in the update function to produce a Cmd that sends a JSON encoded value to JavaScript.
In JS, I instantiate the Elm app:
const app = Elm.Main.init({
node: document.getElementById('root')
});
and register the data handler:
app.ports.modelToJs.subscribe(function dataHandler(data) {
console.log("got from Elm:" + data);
});
When modelToJs is called, the data is not sent and printed to the console. Instead, I get the following JavasScript runtime error (which Elm claims to avoid by design):
TypeError: currentSubs[i] is not a function
var value = _Json_unwrap(converter(cmdList.a));
2160 | for (var i = 0; i < currentSubs.length; i++)
2161 | {
> 2162 | currentSubs[i](value);
2163 | }
2164 | }
2165 | return init;
I have also provided a full proof of concept project on GitHub: https://github.com/mpgirro/elm0.19-ports-issue
The repo also contains an image of the error message (sry, I lack the reputation to post images)
The error appears to be in dataHandler.js. It currently contains this:
function dataHandler(data) {
console.log("got from Elm:" + data);
}
If you declare the function as export default the problem goes away:
export default function dataHandler(data) {
console.log("got from Elm:" + data);
}

issue with c8ydevicecontrol.create

the code:
this.sendOperations = function () {
var operation = {
deviceId: '12161',
com_cumulocity_model_WebCamDevice: {
name: 'take picture',
parameters: {
duration: '5s',
quality: 'HD'
}
}
};
c8yDeviceControl.create(operation);
Result:
a new operation will be created in cumulocity server, but in the meantime, the chrome brower on which the app is runing will report some errors, although it looks like the app is still runing after that:
angular.js:9997 TypeError: Cannot read property 'match' of null
at k (deviceControl.js:267)
at wrappedCallback (angular.js:11498)
at wrappedCallback (angular.js:11498)
at angular.js:11584
at Scope.$eval (angular.js:12608)
at Scope.$digest (angular.js:12420)
at Scope.$apply (angular.js:12712)
at done (angular.js:8315)
at completeRequest (angular.js:8527)
at XMLHttpRequest.xhr.onreadystatechange (angular.js:8466)
any suggestion? Thanks
D. Chen