Closed. This question does not meet Stack Overflow guidelines. It is not currently accepting answers.
Questions asking us to recommend or find a tool, library or favorite off-site resource are off-topic for Stack Overflow as they tend to attract opinionated answers and spam. Instead, describe the problem and what has been done so far to solve it.
Closed 8 years ago.
Improve this question
I have searched many web pages and even have bouhgt "getting started with webrtc" book, whose sample gives errors.
However I could not work find any working example of a working 1to1 video chat.
Better is using nodeJS transmission.
I think samples I have found do not respect these rules:
Peers must be present with local streaming video before sending SIP (offer/answer SDP)
For ‘Answerer’; Do not add ICE candidate until the peer generates
the ‘Answer SDP’
Once remote media starts streaming stop adding ICE candidates
Never create peer connect for answerer until you get the ‘Offer SDP’
I have found many personal APIs or personal aproaches, but could not find the SIMPLEST way to create a single 1to1 video chat.
Best regards
Sample from webrtc book (that does work) gives error:
Mon Apr 14 2014 23:19:26 GMT+0200 (Paris, Madrid invalid signal:
{"type":"new_ice_candidate","candidate":{"candidate":"candidate:1 2 UDP 16924671
98 90.7.245.247 63704 typ srflx raddr 192.168.1.15 rport 63704","sdpMid":"","sdp
MLineIndex":0}}
Client side:
<!DOCTYPE html>
<html>
<head>
<script>
var webrtc_capable = true;
var rtc_peer_connection = null;
var rtc_session_description = null;
var get_user_media = null;
var connect_stream_to_src = null;
var stun_server = "stun01.sipphone.com";
if (navigator.getUserMedia) { // WebRTC 1.0 standard compliant browser
rtc_peer_connection = RTCPeerConnection;
rtc_session_description = RTCSessionDescription;
get_user_media = navigator.getUserMedia.bind(navigator);
connect_stream_to_src = function(media_stream, media_element) {
// https://www.w3.org/Bugs/Public/show_bug.cgi?id=21606
media_element.srcObject = media_stream;
media_element.play();
};
} else if (navigator.mozGetUserMedia) { // early firefox webrtc implementation
rtc_peer_connection = mozRTCPeerConnection;
rtc_session_description = mozRTCSessionDescription;
get_user_media = navigator.mozGetUserMedia.bind(navigator);
connect_stream_to_src = function(media_stream, media_element) {
media_element.mozSrcObject = media_stream;
media_element.play();
};
stun_server = "74.125.31.127:19302";
} else if (navigator.webkitGetUserMedia) { // early webkit webrtc implementation
rtc_peer_connection = webkitRTCPeerConnection;
rtc_session_description = RTCSessionDescription;
get_user_media = navigator.webkitGetUserMedia.bind(navigator);
connect_stream_to_src = function(media_stream, media_element) {
media_element.src = webkitURL.createObjectURL(media_stream);
};
} else {
alert("This browser does not support WebRTC - visit WebRTC.org for more info");
webrtc_capable = false;
}
</script>
<script>
var call_token; // unique token for this call
var signaling_server; // signaling server for this call
var peer_connection; // peer connection object
function start() {
// create the WebRTC peer connection object
peer_connection = new rtc_peer_connection({ // RTCPeerConnection configuration
"iceServers": [ // information about ice servers
{ "url": "stun:"+stun_server }, // stun server info
]
});
// generic handler that sends any ice candidates to the other peer
peer_connection.onicecandidate = function (ice_event) {
if (ice_event.candidate) {
signaling_server.send(
JSON.stringify({
type: "new_ice_candidate",
candidate: ice_event.candidate ,
})
);
}
};
// display remote video streams when they arrive using local <video> MediaElement
peer_connection.onaddstream = function (event) {
connect_stream_to_src(event.stream, document.getElementById("remote_video"));
// hide placeholder and show remote video
document.getElementById("loading_state").style.display = "none";
document.getElementById("open_call_state").style.display = "block";
};
// setup stream from the local camera
setup_video();
// setup generic connection to the signaling server using the WebSocket API
signaling_server = new WebSocket("ws://192.168.1.15:1234");
if (document.location.hash === "" || document.location.hash === undefined) { // you are the Caller
// create the unique token for this call
var token = Date.now()+"-"+Math.round(Math.random()*10000);
var token = Math.round(Math.random()*10000);
call_token = "#"+token;
// set location.hash to the unique token for this call
document.location.hash = token;
signaling_server.onopen = function() {
// setup caller signal handler
signaling_server.onmessage = caller_signal_handler;
// tell the signaling server you have joined the call
signaling_server.send(
JSON.stringify({
token:call_token,
type:"join",
})
);
}
document.title = "You are the Caller";
document.getElementById("loading_state").innerHTML = "Ready for a call...ask your friend to visit:<br/><br/>"+document.location;
} else { // you have a hash fragment so you must be the Callee
// get the unique token for this call from location.hash
call_token = document.location.hash;
signaling_server.onopen = function() {
// setup caller signal handler
signaling_server.onmessage = callee_signal_handler;
// tell the signaling server you have joined the call
signaling_server.send(
JSON.stringify({
token:call_token,
type:"join",
})
);
// let the caller know you have arrived so they can start the call
signaling_server.send(
JSON.stringify({
token:call_token,
type:"callee_arrived",
})
);
}
document.title = "You are the Callee";
document.getElementById("loading_state").innerHTML = "One moment please...connecting your call...";
}
}
/* functions used above are defined below */
// handler to process new descriptions
function new_description_created(description) {
peer_connection.setLocalDescription(
description,
function () {
signaling_server.send(
JSON.stringify({
token:call_token,
type:"new_description",
sdp:description
})
);
},
log_error
);
}
// handle signals as a caller
function caller_signal_handler(event) {
var signal = JSON.parse(event.data);
if (signal.type === "callee_arrived") {
peer_connection.createOffer(
new_description_created,
log_error
);
} else if (signal.type === "new_ice_candidate") {
peer_connection.addIceCandidate(
new RTCIceCandidate(signal.candidate)
);
} else if (signal.type === "new_description") {
peer_connection.setRemoteDescription(
new rtc_session_description(signal.sdp),
function () {
if (peer_connection.remoteDescription.type == "answer") {
// extend with your own custom answer handling here
}
},
log_error
);
} else {
// extend with your own signal types here
}
}
// handle signals as a callee
function callee_signal_handler(event) {
var signal = JSON.parse(event.data);
if (signal.type === "new_ice_candidate") {
peer_connection.addIceCandidate(
new RTCIceCandidate(signal.candidate)
);
} else if (signal.type === "new_description") {
peer_connection.setRemoteDescription(
new rtc_session_description(signal.sdp),
function () {
if (peer_connection.remoteDescription.type == "offer") {
peer_connection.createAnswer(new_description_created, log_error);
}
},
log_error
);
} else {
// extend with your own signal types here
}
}
// setup stream from the local camera
function setup_video() {
get_user_media(
{
"audio": true, // request access to local microphone
"video": true // request access to local camera
},
function (local_stream) { // success callback
// display preview from the local camera & microphone using local <video> MediaElement
connect_stream_to_src(local_stream, document.getElementById("local_video"));
// add local camera stream to peer_connection ready to be sent to the remote peer
peer_connection.addStream(local_stream);
},
log_error
);
}
// generic error handler
function log_error(error) {
console.log(error);
}
</script>
<style>
html, body {
padding: 0px;
margin: 0px;
font-family: "Arial","Helvetica",sans-serif;
}
#loading_state {
position: absolute;
top: 45%;
left: 0px;
width: 100%;
font-size: 20px;
text-align: center;
}
#open_call_state {
display: none;
}
#local_video {
position: absolute;
top: 10px;
left: 10px;
width: 160px;
height: 120px;
background: #333333;
}
#remote_video {
position: absolute;
top: 0px;
left: 0px;
width: 1024px;
height: 768px;
background: #999999;
}
</style>
</head>
<body onload="start()">
<div id="loading_state">
loading...
</div>
<div id="open_call_state">
<video id="remote_video"></video>
<video id="local_video"></video>
</div>
</body>
</html>
server side (nodejs)
// useful libs
var http = require("http");
var fs = require("fs");
var websocket = require("websocket").server;
// general variables
var port = 1234;
var webrtc_clients = [];
var webrtc_discussions = {};
// web server functions
var http_server = http.createServer(function(request, response) {
var matches = undefined;
if (matches = request.url.match("^/images/(.*)")) {
var path = process.cwd()+"/images/"+matches[1];
fs.readFile(path, function(error, data) {
if (error) {
log_error(error);
} else {
response.end(data);
}
});
} else {
response.end(page);
}
});
http_server.listen(port, function() {
log_comment("server listening (port "+port+")");
});
var page = undefined;
fs.readFile("basic_video_call.html", function(error, data) {
if (error) {
log_error(error);
} else {
page = data;
}
});
// web socket functions
var websocket_server = new websocket({
httpServer: http_server
});
websocket_server.on("request", function(request) {
log_comment("new request ("+request.origin+")");
var connection = request.accept(null, request.origin);
log_comment("new connection ("+connection.remoteAddress+")");
webrtc_clients.push(connection);
connection.id = webrtc_clients.length-1;
connection.on("message", function(message) {
if (message.type === "utf8") {
log_comment("got message "+message.utf8Data);
var signal = undefined;
try { signal = JSON.parse(message.utf8Data); } catch(e) { };
if (signal) {
if (signal.type === "join" && signal.token !== undefined) {
try {
if (webrtc_discussions[signal.token] === undefined) {
webrtc_discussions[signal.token] = {};
}
} catch(e) { };
try {
webrtc_discussions[signal.token][connection.id] = true;
} catch(e) { };
} else if (signal.token !== undefined) {
try {
Object.keys(webrtc_discussions[signal.token]).forEach(function(id) {
if (id != connection.id) {
webrtc_clients[id].send(message.utf8Data, log_error);
}
});
} catch(e) { };
} else {
log_comment("signal.type="+signal.type+" *invalid signal: "+message.utf8Data);
}
} else {
log_comment("**invalid signal: "+message.utf8Data);
}
}
});
connection.on("close", function(connection) {
log_comment("connection closed ("+connection.remoteAddress+")");
Object.keys(webrtc_discussions).forEach(function(token) {
Object.keys(webrtc_discussions[token]).forEach(function(id) {
if (id === connection.id) {
delete webrtc_discussions[token][id];
}
});
});
});
});
// utility functions
function log_error(error) {
if (error !== "Connection closed" && error !== undefined) {
log_comment("ERROR: "+error);
}
}
function log_comment(comment) {
console.log((new Date())+" "+comment);
}
You can refer to the samples on codelab. Try to run the Step 7: Putting it all together: RTCPeerConnection + RTCDataChannel + signaling. Node.js is needed. You can also read the article How to Implement a Real-time Commnication Application with WebRTC, which shares some learning experience based on codelab.
Related
I am creating a webRTC video chat that shows a caller all active members when initiating a call from firefox and the receiver is using chrome this error is displayed "Uncaught (in promise) DOMException: Failed to execute 'addIceCandidate' on 'RTCPeerConnection': Error processing ICE candidate". And when a call is initiated from firefox and receiver uses firefox I get two errors Invalidstate: cannot add ICE candidate when there is no remote SDP and ICE failed, add a STUN and see about:webrtc for details
I dont know where I am making a mistake
/ define all data here
var usersOnline,id,currentCaller,room,caller,localUser,media,memberInfo;
// All subscribed members.
var users = [];
var token = Math.random().toString(36).substring(2, 15) + Math.random().toString(36).substring(2, 15);
// create random user id
var userId = Math.random().toString(36).substring(2, 15);
// create random username
var username = token;
// authonticating user
var currentUser = {
username: token,
userId: userId
}
// stringify user data
currentUser = JSON.stringify(currentUser);
var pusher = new Pusher('KEY', {
authEndpoint: '../auth.php',
auth: {
params: JSON.parse(currentUser)
},
cluster: 'ap2',
forceTLS: true
});
var state = pusher.connection.state;
var channel = pusher.subscribe('presence-conference');
channel.bind("pusher:subscription_succeeded", function (members) {
console.log(members);
id = channel.members.me.id;
document.getElementById('mydetails').innerHTML = 'Online Now: ' + ' ( ' + (members.count - 1) +')';
members.each(member => {
if (member.id != channel.members.me.id) {
users.push(member.id);
}
});
renderOnline();
});
// Add user online
channel.bind("pusher:member_added", member => {
users.push(member.id);
renderOnline();
});
channel.bind("pusher:member_removed", member => {
// for remove member from list:
var index = users.indexOf(member.id);
users.splice(index, 1);
if (member.id == room) {
endCall();
}
renderOnline();
});
function renderOnline(){
var list = "";
users.forEach(function(user) {
list +=
`<li>` +
user +//this will call user
` <input type="button" style="float:right;" value="Call" onclick="callUser('` +
user +
`')" id="makeCall" /></li>`;
});
document.getElementById("userDetails").innerHTML = list;
}
//To iron over browser implementation anomalies like prefixes
GetRTCPeerConnection();
GetRTCSessionDescription();
GetRTCIceCandidate();
prepareCaller();
function prepareCaller() {
//Initializing a peer connection
caller = new window.RTCPeerConnection();
//Listen for ICE Candidates and send them to remote peers
caller.onicecandidate = function(evt) {
if (!evt.candidate) return;
console.log("onicecandidate called");
onIceCandidate(caller, evt);
};
//onaddstream handler to receive remote feed and show in remoteview video element
caller.onaddstream = function(evt) {
console.log("onaddstream called");
if("srcObject" in document.getElementById("selfview")){
document.getElementById("selfview").srcObject = evt.stream;
}else{
if (window.URL) {
document.getElementById("remoteview").src = window.URL.createObjectURL(
evt.stream
);
} else {
document.getElementById("remoteview").src = evt.stream;
}
}
};
}
function getCam() {
//Get local audio/video feed and show it in selfview video element
return navigator.mediaDevices.getUserMedia({
audio: {
echoCancellation: true,
sampleSize:8
},
video: {
width: 1080,
height: 720,
aspectRatio: { ideal: 1.777778 }
}
});
}
function GetRTCIceCandidate() {
window.RTCIceCandidate =
window.RTCIceCandidate ||
window.webkitRTCIceCandidate ||
window.mozRTCIceCandidate ||
window.msRTCIceCandidate;
return window.RTCIceCandidate;
}
function GetRTCPeerConnection() {
window.RTCPeerConnection =
window.RTCPeerConnection ||
window.webkitRTCPeerConnection ||
window.mozRTCPeerConnection ||
window.msRTCPeerConnection;
return window.RTCPeerConnection;
}
function GetRTCSessionDescription() {
window.RTCSessionDescription =
window.RTCSessionDescription ||
window.webkitRTCSessionDescription ||
window.mozRTCSessionDescription ||
window.msRTCSessionDescription;
return window.RTCSessionDescription;
}
//Create and send offer to remote peer on button click
function callUser(user) {
getCam()
.then(stream => {
if("srcObject" in document.getElementById("selfview")){
document.getElementById("selfview").srcObject = stream;
}else{
if (window.URL) {
document.getElementById("selfview").src = window.URL.createObjectURL(
stream
);
} else {
document.getElementById("selfview").src = stream;
}
}
toggleEndCallButton();
caller.addStream(stream);
localUserMedia = stream;
caller.createOffer().then(function(desc) {
caller.setLocalDescription(new RTCSessionDescription(desc));
channel.trigger("client-sdp", {
sdp: desc,
room: user,
from: id
});
room = user;
});
})
.catch(error => {
console.log("an error occured", error);
});
}
function endCall() {
room = undefined;
caller.close();
for (let track of localUserMedia.getTracks()) {
track.stop();
}
prepareCaller();
toggleEndCallButton();
}
function endCurrentCall() {
channel.trigger("client-endcall", {
room: room
});
endCall();
}
//Send the ICE Candidate to the remote peer
function onIceCandidate(peer, evt) {
if (evt.candidate) {
channel.trigger("client-candidate", {
candidate: evt.candidate,
room: room
});
}
}
function toggleEndCallButton() {
if (document.getElementById("endCall").style.display == "block") {
document.getElementById("endCall").style.display = "none";
} else {
document.getElementById("endCall").style.display = "block";
}
}
//Listening for the candidate message from a peer sent from onicecandidate handler
channel.bind("client-candidate", function(msg) {
if (msg.room == room) {
console.log("candidate received");
caller.addIceCandidate(new RTCIceCandidate(msg.candidate));
}
});
//Listening for Session Description Protocol message with session details from remote peer
channel.bind("client-sdp", function(msg) {
if (msg.room == id) {
console.log("sdp received");
var answer = confirm(
"You have a call from: " + msg.from + "Would you like to answer?"
);
if (!answer) {
return channel.trigger("client-reject", { room: msg.room, rejected: id });
}
room = msg.room;
getCam()
.then(stream => {
localUserMedia = stream;
toggleEndCallButton();
if("srcObject" in document.getElementById("selfview")){
document.getElementById("selfview").srcObject = stream;
}else{
if (window.URL) {
document.getElementById("selfview").src = window.URL.createObjectURL(
stream
);
} else {
document.getElementById("selfview").src = stream;
}
}
caller.addStream(stream);
var sessionDesc = new RTCSessionDescription(msg.sdp);
caller.setRemoteDescription(sessionDesc);
caller.createAnswer().then(function(sdp) {
caller.setLocalDescription(new RTCSessionDescription(sdp));
channel.trigger("client-answer", {
sdp: sdp,
room: room
});
});
})
.catch(error => {
console.log("an error occured", error);
});
}
});
//Listening for answer to offer sent to remote peer
channel.bind("client-answer", function(answer) {
if (answer.room == room) {
console.log("answer received");
caller.setRemoteDescription(new RTCSessionDescription(answer.sdp));
}
});
channel.bind("client-reject", function(answer) {
if (answer.room == room) {
console.log("Call declined");
alert("call to " + answer.rejected + "was politely declined");
endCall();
}
});
channel.bind("client-endcall", function(answer) {
if (answer.room == room) {
console.log("Call Ended");
endCall();
}
});
I EXPECTED that the video call will work don't want to use any API, help me see where I went wrong.
Call setRemoteDescription(offer) before requesting the camera.
This puts the RTCPeerConnection in the right signaling state ("have-remote-offer") to receive and process remote ICE candidates correctly.
There's no time to request the camera first when an offer comes in. Incoming offers are typically followed closely by trickled ICE candidates on your signaling channel. addIceCandidate won't know what to do with those if it hasn't seen an offer.
Move the setRemoteDescription call ahead of the getMedia call in the promise chain to fix it. You have more time then before returning an answer.
Though that's still not great, since this approach often ends up blocking initial WebRTC negotiation on a user permission prompt for the camera. This is called tight coupling. Sadly, the current state of WebRTC encourages it, since getting the best IP mode is gated on getUserMedia in most browsers.
Lastly, there's a lot of old API usage here. See my other answer for newer APIs to use.
I'm writing Webrtc chrome desktop app by accessing 2 camera simultaneously using latest Chrome Windows version.
Accessing camera list by navigator.mediaDevices.enumerateDevices() is ok but accessing these device by their specific id using navigator.mediaDevices.getUserMedia not work.
It only occurs sometimes. But no error in the catch.
So, I tried navigator.mediaDevices.getUserMedia is really exists or not.
if (navigator && navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
}
Yes, it was.
Just not getting any log info in calling navigator.mediaDevices.getUserMedia()
getVideoSources_ = function() {
return new Promise(function(resolve, reject) {
if (typeof navigator.mediaDevices.enumerateDevices === 'undefined') {
alert('Your browser does not support navigator.mediaDevices.enumerateDevices, aborting.');
reject(Error("Your browser does not support navigator.mediaDevices.enumerateDevices, aborting."));
return;
}
requestVideoSetTimeout = 500;
navigator.mediaDevices.enumerateDevices().then((devices) => {
// get the list first by sorting mibunsho camera in first place
for (var i = 0; i < devices.length; i++) {
log("devices[i]", JSON.stringify(devices[i]));
log("devices[i].label", devices[i].label);
if (devices[i].kind === 'videoinput' && devices[i].deviceId && devices[i].label) {
if (devices[i].label.indexOf("USB_Camera") > -1) {
deviceList[1] = devices[i];
} else {
deviceList[0] = devices[i];
}
}
}
// request video by sorted plan
for (var i = 0; i < deviceList.length; i++) {
requestVideo_(deviceList[i].deviceId, deviceList[i].label, resolve, reject);
requestVideoSetTimeout = 1000; // change requestVideoSetTimeout for next video request
}
}).catch((err) => {
log("getVideoSources_:" + err.name + ": " + err.message);
reject(Error("getVideoSources_ catch error"));
});
});
}
getVideoSources_().then(function(result) {
....
}).catch(function(err) {
....
});
function requestVideo_(id, label, resolve, reject) {
if (navigator && navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
log("navigator.mediaDevices.getUserMedia found!");
navigator.mediaDevices.getUserMedia({
video: {
deviceId: {exact: id},
width: 640,
height: 480,
frameRate: {
ideal: 20,
max: 20
}
},
audio: false}).then(
(stream) => {
log("***requestVideo_", id);
log("***requestVideo_", label);
log("***requestVideo_", stream);
// USB_Camera is face camera
if (label.indexOf("USB_Camera") > -1) {
log("***requestVideo_001");
myStream2 = stream;
log("***requestVideo_myStream2", myStream2);
} else {
log("***requestVideo_002");
myStream = stream;
log("***requestVideo_myStream", myStream);
getUserMediaOkCallback_(myStream, label);
}
resolve("###Video Stream got###");
stream.getVideoTracks()[0].addEventListener('ended', function(){
log("***Camera ended event fired. " + id + " " + label);
endedDevice[id] = label;
});
},
getUserMediaFailedCallback_
).catch((error) => {
log('requestVideo_: ' + error.name);
reject(Error("requestVideo_ catch error" + error.name));
});
}
}
function getUserMediaFailedCallback_(error) {
log("getUserMediaFailedCallback_ error:", error.name);
alert('User media request denied with error: ' + error.name);
}
#Kaiido is right, resolve is called in a for-loop here, which is all over the place, and before all the code has finished. Any error after this point is basically lost.
This is the promise constructor anti-pattern. In short: Don't write app code inside promise constructors. Don't pass resolve and reject functions down. Instead, let functions return promises up to you, and add all app code in then callbacks on them. Then return all promises so they form a single chain. Only then do errors propagate correctly.
See Using promises on MDN for more.
I need to modify the Kurento group call example from Link
to send only audio if one participant has no camera. Right now only audio is received when a camera is used. When only a microphone is available I receive a DeviceMediaError.
I managed to filter whether a camera device is connected or not and then send only audio, but this doesn't work. Maybe the participant should've an audio tag instead of a video tag?
EDIT: It's only working on Firefox and not in Chrome. Any ideas?
in file - https://github.com/Kurento/kurento-tutorial-java/blob/master/kurento-group-call/src/main/java/org/kurento/tutorial/groupcall/UserSession.java.
change following line -
sender.getOutgoingWebRtcPeer().connect(incoming, MediaType.AUDIO);
and set offer media constraints to video:false in browser js file.
updated code -
let constraints = {
audio: true,
video: false
};
let localParticipant = new Participant(sessionId);
participants[sessionId] = localParticipant;
localVideo = document.getElementById('local_video');
let video = localVideo;
let options = {
localVideo: video,
mediaConstraints: constraints,
onicecandidate: localParticipant.onIceCandidate.bind(localParticipant),
configuration : { iceServers : [
{"url":"stun:74.125.200.127:19302"},
] }
};
localParticipant.rtcPeer = new kurentoUtils.WebRtcPeer.WebRtcPeerSendonly(options, function(error) {
if (error) {
return console.error(error);
}
localVideoCurrentId = sessionId;
localVideo = document.getElementById('local_video');
localVideo.src = localParticipant.rtcPeer.localVideo.src;
localVideo.muted = true;
this.generateOffer(localParticipant.offerToReceiveVideo.bind(localParticipant));
});
server.js code
function join(socket, room, callback) {
let userSession = userRegister.getById(socket.id);
userSession.setRoomName(room.name);
room.pipeline.create('WebRtcEndpoint', {mediaProfile : 'WEBM_AUDIO_ONLY'}, (error, outgoingMedia) => {
if (error) {
console.error('no participant in room');
if (Object.keys(room.participants).length === 0) {
room.pipeline.release();
}
return callback(error);
}
// else
outgoingMedia.setMaxAudioRecvBandwidth(100);
add media profile parameter on server side while joining room.
function getEndpointForUser(userSession, sender, callback) {
if (userSession.id === sender.id) {
return callback(null, userSession.outgoingMedia);
}
let incoming = userSession.incomingMedia[sender.id];
if (incoming == null) {
console.log(`user : ${userSession.id} create endpoint to receive video from : ${sender.id}`);
getRoom(userSession.roomName, (error, room) => {
if (error) {
return callback(error);
}
room.pipeline.create('WebRtcEndpoint', {mediaProfile : 'WEBM_AUDIO_ONLY'}, (error, incomingMedia) => {
if (error) {
if (Object.keys(room.participants).length === 0) {
room.pipeline.release();
}
return callback(error);
}
console.log(`user: ${userSession.id} successfully create pipeline`);
incomingMedia.setMaxAudioRecvBandwidth(0);
incomingMedia.getMaxAudioRecvBandwidth(0);
add media profile parameter when accepting call.
hope this helps.
The jsfiddle (https://jsfiddle.net/kalyansai99/mm1b74uy/22/) contains code where the user can toggle between front and back camera of the mobile.
In few mobiles its working fine (Moto g5 plus, Moto E3 and so on - Chrome Browser) and in few mobiles (Mi Redimi Note 4 - Chrome Browser) when I am switching to back camera, initially the stream is loading with a track of "readyState" as "live". But when i am about to play the stream in video player, the "readyState" is getting changed to "ended" and black screen is been shown on the video tag.
Not sure whats happening. Any clues?
JSFiddle Code
var player = document.getElementById('player');
var flipBtn = document.getElementById('flipBtn');
var deviceIdMap = {};
var front;
var constraints = {
audio: false,
video: {
frameRate: 1000
}
};
var gotDevices = function (deviceList) {
var length = deviceList.length;
console.log(deviceList);
for (var i = 0; i < length; i++) {
var deviceInfo = deviceList[i];
if (deviceInfo.kind === 'videoinput') {
if (deviceInfo.label.indexOf('front') !== -1) {
deviceIdMap.front = deviceInfo.deviceId;
} else if (deviceInfo.label.indexOf('back') !== -1) {
deviceIdMap.back = deviceInfo.deviceId;
}
}
}
if (deviceIdMap.front) {
constraints.video.deviceId = {exact: deviceIdMap.front};
front = true;
} else if (deviceIdMap.back) {
constraints.video.deviceId = {exact: deviceIdMap.back};
front = false;
}
console.log('deviceIdMap - ', deviceIdMap);
};
var handleError = function (error) {
console.log('navigator.getUserMedia error: ', error);
};
function handleSuccess(stream) {
window.stream = stream;
// this is a video track as there is no audio track
console.log("Track - ", window.stream.getTracks()[0]);
console.log('Ready State - ', window.stream.getTracks()[0].readyState);
if (window.URL) {
player.src = window.URL.createObjectURL(stream);
} else {
player.src = stream;
}
player.onloadedmetadata = function (e) {
console.log('Ready State - 3', window.stream.getTracks()[0].readyState);
player.play();
console.log('Ready State - 4', window.stream.getTracks()[0].readyState);
}
console.log('Ready State - 2', window.stream.getTracks()[0].readyState);
}
navigator.mediaDevices.enumerateDevices().then(gotDevices).catch(handleError);
flipBtn.addEventListener('click', function () {
if (window.stream) {
window.stream.getTracks().forEach(function(track) {
track.stop();
});
}
if (front) {
constraints.video.deviceId = {exact: deviceIdMap.back};
} else {
constraints.video.deviceId = {exact: deviceIdMap.front};
}
front = !front;
navigator.getUserMedia(constraints, handleSuccess, handleError);
}, false);
console.log(constraints);
navigator.getUserMedia(constraints, handleSuccess, handleError);
#player {
width: 320px;
}
#flipBtn {
width: 150px;
height: 50px;
}
<video id="player" autoplay></video>
<div>
<button id="flipBtn">
Flip Camera
</button>
</div>
Replace track.stop() to track.enabled=false and when adding track to the stream, enable it back using track.enabled=true
The MediaStream.readyState property is changed to "ended" when we stop the track and can never be used again. Therefore its not wise to use stop. For more reference:
https://developer.mozilla.org/en-US/docs/Web/API/MediaStreamTrack/readyState
https://developer.mozilla.org/en-US/docs/Web/API/MediaStreamTrack/stop
I am studying webRTC application.
My reference is this software
apprtc
https://code.google.com/p/webrtc/source/browse/trunk/samples/js/apprtc/
demo
https://apprtc.appspot.com/
My computer has bult-in video device and apprtc uses this video device .
However I want to use USB-video camera instead.
I am searching the way to change input video devices.
But I couldn't find any clue in source files.
does anyone has information?
On Chrome:
chrome://settings/content/camera
chrome://settings/content/microphone
On Firefox: media.navigator.permission.disabled=false
Try this demo which is capturing all audio/video input devices:
https://www.webrtc-experiment.com/demos/MediaStreamTrack.getSources.html
You can capture any "specific" device using same API.
Edited at March 01, 2014:
MediaStreamTrack.getSources(function (media_sources) {
for (var i = 0; i < media_sources.length; i++) {
var media_source = media_sources[i];
var constraints = {};
// if audio device
if (media_source.kind == 'audio') {
constraints.audio = {
optional: [{
sourceId: media_source.id
}]
};
}
// if video device
if (media_source.kind == 'video') {
constraints.video = {
optional: [{
sourceId: media_source.id
}]
};
}
// invoke getUserMedia to capture this device
navigator.webkitGetUserMedia(constraints, function (stream) {
console.log(stream.id, stream);
}, console.error);
}
});
Updated at Sep 05, 2015:
Now Microsoft Edge, Chrome 44+, Firefox 38+, all these browsers are supporting navigator.mediaDevices.enumerateDevices API.
Here is a reusable script that provides cross-browser shim for all these media-sources APIs. It will work even in old-chrome (43 and older) (even on Android devices):
if (navigator.mediaDevices && navigator.mediaDevices.enumerateDevices) {
// Firefox 38+, Microsoft Edge, and Chrome 44+ seems having support of enumerateDevices
navigator.enumerateDevices = function(callback) {
navigator.mediaDevices.enumerateDevices().then(callback);
};
}
function getAllAudioVideoDevices(successCallback, failureCallback) {
if (!navigator.enumerateDevices && window.MediaStreamTrack && window.MediaStreamTrack.getSources) {
navigator.enumerateDevices = window.MediaStreamTrack.getSources.bind(window.MediaStreamTrack);
}
if (!navigator.enumerateDevices && navigator.mediaDevices.enumerateDevices) {
navigator.enumerateDevices = navigator.mediaDevices.enumerateDevices.bind(navigator);
}
if (!navigator.enumerateDevices) {
failureCallback(null, 'Neither navigator.mediaDevices.enumerateDevices NOR MediaStreamTrack.getSources are available.');
return;
}
var allMdiaDevices = [];
var allAudioDevices = [];
var allVideoDevices = [];
var audioInputDevices = [];
var audioOutputDevices = [];
var videoInputDevices = [];
var videoOutputDevices = [];
navigator.enumerateDevices(function(devices) {
devices.forEach(function(_device) {
var device = {};
for (var d in _device) {
device[d] = _device[d];
}
// make sure that we are not fetching duplicate devics
var skip;
allMdiaDevices.forEach(function(d) {
if (d.id === device.id) {
skip = true;
}
});
if (skip) {
return;
}
// if it is MediaStreamTrack.getSources
if (device.kind === 'audio') {
device.kind = 'audioinput';
}
if (device.kind === 'video') {
device.kind = 'videoinput';
}
if (!device.deviceId) {
device.deviceId = device.id;
}
if (!device.id) {
device.id = device.deviceId;
}
if (!device.label) {
device.label = 'Please invoke getUserMedia once.';
}
if (device.kind === 'audioinput' || device.kind === 'audio') {
audioInputDevices.push(device);
}
if (device.kind === 'audiooutput') {
audioOutputDevices.push(device);
}
if (device.kind === 'videoinput' || device.kind === 'video') {
videoInputDevices.push(device);
}
if (device.kind.indexOf('audio') !== -1) {
allAudioDevices.push(device);
}
if (device.kind.indexOf('video') !== -1) {
allVideoDevices.push(device);
}
// there is no 'videoouput' in the spec.
// so videoOutputDevices will always be [empty]
allMdiaDevices.push(device);
});
if (successCallback) {
successCallback({
allMdiaDevices: allMdiaDevices,
allVideoDevices: allVideoDevices,
allAudioDevices: allAudioDevices,
videoInputDevices: videoInputDevices,
audioInputDevices: audioInputDevices,
audioOutputDevices: audioOutputDevices
});
}
});
}
Here is how to use above reusable cross-browser shim:
getAllAudioVideoDevices(function(result) {
if (result.allMdiaDevices.length) {
console.debug('Number of audio/video devices available:', result.allMdiaDevices.length);
}
if (result.allVideoDevices.length) {
console.debug('Number of video devices available:', result.allVideoDevices.length);
}
if (result.allAudioDevices.length) {
console.debug('Number of audio devices available:', result.allAudioDevices.length);
}
if (result.videoInputDevices.length) {
console.debug('Number of video-input devices available:', result.videoInputDevices.length);
}
if (result.audioInputDevices.length) {
console.debug('Number of audio-input devices available:', result.audioInputDevices.length);
}
if (result.audioOutputDevices.length) {
console.debug('Number of audio-output devices available:', result.audioOutputDevices.length);
}
if (result.allMdiaDevices.length && result.allMdiaDevices[0].label === 'Please invoke getUserMedia once.') {
console.warn('It seems you did not invoke navigator-getUserMedia before using these API.');
}
console.info('All audio input devices:');
result.audioInputDevices.forEach(function(device) {
console.log('Audio input device id:', device.id, 'Device label:', device.label);
});
console.info('All audio output devices:');
result.audioOutputDevices.forEach(function(device) {
console.log('Audio output device id:', device.id, 'Device label:', device.label);
});
console.info('All video input devices:');
result.videoInputDevices.forEach(function(device) {
console.log('Video input device id:', device.id, 'Device label:', device.label);
});
}, function(error) {
alert(error);
});
It turns out that Chrome does support MediaStreamTrack API which allows you to do this. In Firefox this API is still experimental. Here is the Chrome implementation:
if (typeof MediaStreamTrack === 'undefined'){
alert('This browser does not support MediaStreamTrack.\n\nTry Chrome Canary.');
} else {
MediaStreamTrack.getSources( onSourcesAcquired);
}
function onSourcesAcquired(sources) {
for (var i = 0; i != sources.length; ++i) {
var source = sources[i];
// source.id -> DEVICE ID
// source.label -> DEVICE NAME
// source.kind = "audio" OR "video"
// TODO: add this to some datastructure of yours or a selection dialog
}
}
....
And then when calling getUserMedia, specify the id in the constraints:
var constraints = {
audio: {
optional: [{sourceId: selected_audio_source_id}]
},
video: {
optional: [{sourceId: selected_video_source_id}]
}
};
navigator.getUserMedia(constraints, onSuccessCallback, onErrorCallback);
It sounds to me you are looking for facingMode. You can check it out in this document:
http://www.w3.org/TR/2013/WD-mediacapture-streams-20130516/#idl-def-AllVideoCapabilities
Not sure how well it is supported yet though.