When user A without camera call to user B with camera, hi will receive stream without video
tracks. In this case user B generate SDP with such string a=group:BUNDLE audio when normally it contain mentions about video like a=group:BUNDLE audio video and m=video 1 RTP/SAVPF 100 116 117 96
Here is my code in coffeescript for accepting offer:
acccept_offer: (sdp, success) ->
sdp = new _RTCSessionDescription sdp
#connection.setRemoteDescription sdp, =>
if #candidates.length
for candidate in #candidates
#connection.addIceCandidate candidate
#candidates = []
#connection.createAnswer (description) =>
description = new _RTCSessionDescription
sdp: #set_bandwidth description.sdp
type: description.type
#local_description = description
#connection.setLocalDescription #local_description, ->
success()
, (e) ->
console.log e
, (e) ->
console.log e
, (e) ->
console.log e
Why this strange behaviour and how can I avoid it?
You need to put constraints in your RTCPeerConnection creation to tell the SDP what media you are willing to send/receive.
Example:
var sdpConstraints = { 'mandatory': { 'OfferToReceiveAudio': true, 'OfferToReceiveVideo': false } };
Related
here is my code, I created the peerConnection and use createOffer and make the sdp object,but I want to set rrtr and 30% of packet loss rate
pc.createOffer(this.mediaConstraints).then(function(sessionDescription) {
let sdp = sessionDescription.sdp;
//fix the sdp set rrtr
let a = sdp.split('\r\n');
sessionDescription.sdp = a.map((v,i,aa)=>{
if(v.startsWith('a=rtpmap') && aa[i+1] && aa[i+1].endsWith('goog-remb')){
let arr = v.split(/\s+/);
return v+'\r\n'+arr[0]+' rrtr'
}
return v;
}).join('\r\n');
pc.setLocalDescription(sessionDescription);
//here went error
)}
I'm creating videochat with peerjs.
I'm toggling camera (on/off) with the following function:
function toggleCamera() {
localStream.getVideoTracks()[0].enabled = !(localStream.getVideoTracks()[0].enabled);
}
After calling this function, video goes black and receiver gets just black screen (which works as intended).
Now I want to detect black/blank screen so I can show user some message or icon that camera is disabled and there is no stream.
How do I do detect that?
The common approach is to send a signaling message (either via the normal path or a datachannel). Polling getStats to detect the black frames is a valid approach but more expensive in terms of computation.
After some time I've managed to get solution:
var previousBytes = 0;
var previousTS = 0;
var currentBytes = 0;
var currentTS = 0;
// peer - new Peer()
// stream - local camera stream (received from navigator.mediaDevices.getUserMedia(constraints))
let connection = peer.call(peerID, stream);
// peerConnection - reference to RTCPeerConnection (https://peerjs.com/docs.html#dataconnection-peerconnection)
connection.peerConnection.getStats(null).then(stats => {
stats.forEach(report => {
if (report.type === "inbound-rtp") {
currentBytes = report.bytesReceived;
currentTS = report.timestamp;
if (previousBytes == 0) {
previousBytes = currentBytes;
previousTS = currentTS;
return;
}
console.log({ previousBytes })
console.log({ currentBytes })
var deltaBytes = currentBytes - previousBytes;
var deltaTS = currentTS - previousTS;
console.log("Delta: " + (deltaBytes / deltaTS) + " kB/s")
previousBytes = currentBytes;
previousTS = currentTS;
}
});
});
This code is actually in function which gets called every second. When camera is turned on and it's not covered, deltaBytes is between 100 and 250, when camera is turned off (programmatically) or covered (with a napkin or something), so camera stream is black/blank, deltaBytes is med 1.5-3kbps. After you turn camera back on, there is a spike in deltaBytes, which reaches around 500kbps.
This is short console log:
124.52747252747253 kB/s
202.213 kB/s
194.64764764764766 kB/s
15.313 kB/s (this is where camera is covered)
11.823823823823824 kB/s
11.862137862137862 kB/s
2.164 kB/s
2.005 kB/s
2.078078078078078 kB/s
1.99 kB/s
2.059 kB/s
1.992992992992993 kB/s
159.89810189810188 kB/s (uncovered camera)
502.669 kB/s
314.7927927927928 kB/s
255.0909090909091 kB/s
220.042 kB/s
213.46353646353646 kB/s
EDIT:
So in the end I did as #Philipp Hancke said. I created master connection which is open from when the page loads until user closes it. Over this connection I'm sending commands for initiating video call, canceling video session, turning on/off camera,... Then on the other side I'm parsing these commands and executing functions.
function sendMutedMicCommand() { masterConnection.send(`${commands.MutedMic}`); }
function sendUnmutedMicCommand() { masterConnection.send(`${commands.UnmutedMic}`); }
function sendPromptVideoCallCommand() { masterConnection.send(`${commands.PromptVideoCall}`); }
function sendAcceptVideoCallCommand() { masterConnection.send(`${commands.AcceptVideoCall}`); }
function sendDeclineVideoCallCommand() { masterConnection.send(`${commands.DeclineVideoCall}`); }
Function which handles data:
function handleData(data) {
let actionType = data;
switch (actionType) {
case commands.MutedMic: ShowMuteIconOnReceivingVideo(true); break;
case commands.UnmutedMic: ShowMuteIconOnReceivingVideo(false); break;
case commands.PromptVideoCall: showVideoCallModal(); break;
case commands.AcceptVideoCall: startVideoConference(); break;
case commands.DeclineVideoCall: showDeclinedCallAlert(); break;
default: break;
}
}
const commands = {
MutedMic: "mutedMic",
UnmutedMic: "unmutedMic",
PromptVideoCall: "promptVideoCall",
AcceptVideoCall: "acceptVideoCall",
DeclineVideoCall: "declineVideoCall",
}
And then when I receive mutedMic command, I show icon with crossed mic. When I receive AcceptVideoCall command I create another peer, videoCallPeer with random ID, which is then then sent to other side. Other side then created another peer with random ID and initiated video session with received ID.
I am attempting to send data from Elm 0.19 to JavaScript using ports.
Edit: The problem seems to be related to running/building with elm-app
In Elm, I declare an outgoing port:
port modelToJs : Json.Encode.Value -> Cmd msg
which I use in the update function to produce a Cmd that sends a JSON encoded value to JavaScript.
In JS, I instantiate the Elm app:
const app = Elm.Main.init({
node: document.getElementById('root')
});
and register the data handler:
app.ports.modelToJs.subscribe(function dataHandler(data) {
console.log("got from Elm:" + data);
});
When modelToJs is called, the data is not sent and printed to the console. Instead, I get the following JavasScript runtime error (which Elm claims to avoid by design):
TypeError: currentSubs[i] is not a function
var value = _Json_unwrap(converter(cmdList.a));
2160 | for (var i = 0; i < currentSubs.length; i++)
2161 | {
> 2162 | currentSubs[i](value);
2163 | }
2164 | }
2165 | return init;
I have also provided a full proof of concept project on GitHub: https://github.com/mpgirro/elm0.19-ports-issue
The repo also contains an image of the error message (sry, I lack the reputation to post images)
The error appears to be in dataHandler.js. It currently contains this:
function dataHandler(data) {
console.log("got from Elm:" + data);
}
If you declare the function as export default the problem goes away:
export default function dataHandler(data) {
console.log("got from Elm:" + data);
}
I'm trying to build a user notification using Deepstream.io. I'm using deepstream.io-storage-mongodb for storage. My data structure:
User
=================
id - email - others
Notication
=================
userId - notification
I'm try to implement 1-n modelling deepsteam tutorial. But I can't understand how can I do this. How can I store pointer or how can I point towards a List ? Or how can I implement notification using deepstream ?
Thanks in Advance.
you can try as like as given below (I'm using JS):
Receive Notication
var client = deepstream( 'localhost:6020' );
client.login();
// Unique Identification for user
let uniqueId = `userId:${userId}`;
// Mongodb collection : Notification
statusRecord = client.record.getList("Notification/" + uniqueId);
statusRecord.subscribe(function(data) {
data.forEach(function(name) {
var record = client.record.getRecord(name);
record.whenReady(function(r) {
// all notification
console.log( "r ==> ", r.get() );
});
});
});
Send Notification
const ds = deepstream( 'localhost:6020' );
ds.login();
// userId
const list = this.ds.record.getList( `Notification/${userId}` );
// id for notification
let id = `Notification/${this.ds.getUid()}`;
let record = this.ds.record.getRecord(id);
record.set( {
message: 'information'// save notification data
});
list.addEntry(id);
Hope it will solve your problem.
I am trying to display a MediaStream taken from a webcam using getUserMedia, and to relay it to a remote peer using whatever mechanism possible for it to be played (as an experiment). I am not using webRTC directly as I want control over the raw data.
The issue I encounter is that my video element displays nothing, and I don't get any errors back. I am using Chrome Version 51.0.2704.103 (64-bit) on Elementary OS (Ubuntu 14.04 based linux OS).
As a sidenote, if I record all the blobs into an array and then create a new blob and set the video's src element to URL.createObjectUrl(blob), it displays video correctly.
Here is the code I tried to accomplish this (minus the relaying, I'm just trying to play it locally):
var ms = new MediaSource();
var video = document.querySelector("video");
video.src = window.URL.createObjectURL(ms);
ms.addEventListener("sourceopen", function() {
var sourceBuffer = ms.addSourceBuffer('video/webm; codecs="vorbis,vp8"');
navigator.getUserMedia({video: {width: 320, height: 240, framerate: 30}, audio: true}, function(stream) {
var recorder = new MediaRecorder(stream);
recorder.ondataavailable = function(event) {
var reader = new FileReader();
reader.addEventListener("loadend", function () {
var uint8Chunk = new Uint8Array(reader.result);
if (!sourceBuffer.updating) {
sourceBuffer.appendBuffer(uint8Chunk);
}
if (video.paused) video.play();
});
reader.readAsArrayBuffer(event.data);
};
recorder.start(10);
}, function(error) {
console.error(error);
});
}, false);
Here is the info I get in chrome://media-internal:
render_id: 147
player_id: 0
pipeline_state: kPlaying
event: WEBMEDIAPLAYER_CREATED
url: blob:http%3A//localhost%3A8080/e5c51dd8-5709-4e6f-9457-49ac8c34756b
found_audio_stream: true
audio_codec_name: opus
found_video_stream: true
video_codec_name: vp8
duration: unknown
audio_dds: false
audio_decoder: OpusAudioDecoder
video_dds: false
video_decoder: FFmpegVideoDecoder
Also the log:
00:00:00 00 pipeline_state kCreated
00:00:00 00 event WEBMEDIAPLAYER_CREATED
00:00:00 00 url blob:http%3A//localhost%3A8080/e5c51dd8-5709-4e6f-9457-49ac8c34756b
00:00:00 00 pipeline_state kInitDemuxer
00:00:01 603 found_audio_stream true
00:00:01 603 audio_codec_name opus
00:00:01 603 found_video_stream true
00:00:01 603 video_codec_name vp8
00:00:01 604 duration unknown
00:00:01 604 pipeline_state kInitRenderer
00:00:01 604 audio_dds false
00:00:01 604 audio_decoder OpusAudioDecoder
00:00:01 604 video_dds false
00:00:01 604 video_decoder FFmpegVideoDecoder
00:00:01 604 pipeline_state kPlaying
Update: I've tried sending the data to node and saving it to a webm file with ffmpeg (fluent-ffmpeg), and I can view the file in VLC correctly.
Update 2: After streaming it back from node, I get the following: Media segment did not contain any video coded frames, mismatching initialization segment. Therefore, MSE coded frame processing may not interoperably detect discontinuities in appended media.
. After doing some research, it appears that webm files must be segmented to work, however I have not come across a way to do this (either using ffmpeg or other tools) for live streams. Any ideas here?
A little late, but you can try it like this (in chrome):
<html>
<body>
<video class="real1" autoplay controls></video>
<video class="real2" controls></video>
<script>
const constraints = {video: {width: 320, height: 240, framerate: 30}, audio: true};
const video1 = document.querySelector('.real1');
const video2 = document.querySelector('.real2');
var mediaSource = new MediaSource();
video2.src = window.URL.createObjectURL(mediaSource);
var sourceBuffer;
mediaSource.addEventListener('sourceopen', function () {
sourceBuffer = mediaSource.addSourceBuffer('video/webm; codecs=opus,vp8');
console.log(sourceBuffer);
})
var isFirst = true;
var mediaRecorder;
var i = 0;
function handleSuccess(stream) {
video1.srcObject = stream;
mediaRecorder = new MediaRecorder(stream, { mimeType: 'video/webm; codecs=opus,vp8' });
console.log(mediaRecorder.mimeType)
mediaRecorder.ondataavailable = function (e) {
var reader = new FileReader();
reader.onload = function (e) {
sourceBuffer.appendBuffer(new Uint8Array(e.target.result));
}
reader.readAsArrayBuffer(e.data);
if (video2.paused) {
video2.play(0);
}
}
mediaRecorder.start(20);
}
function handleError(error) {
console.error('Reeeejected!', error);
}
navigator.mediaDevices.getUserMedia(constraints).
then(handleSuccess).catch(handleError);
</script>
</body>
</html>
I think you missed setting the same (supported) codec to both, recorder and sourceBuffer.