How do we know when webRTC already finished collecting ICE candidates - webrtc

I am using Kurento Utils for WebRTC connection with Kurento Media Server (ver 5.x)
Inside the kurento-utils-js library during init the simplify codes are shown below:
if (!this.pc) {
this.pc = new RTCPeerConnection(server, options);
}
var ended = false;
pc.onicecandidate = function(e) {
// candidate exists in e.candidate
if (e.candidate) {
ended = false;
return;
}
if (ended) {
return;
}
var offerSdp = pc.localDescription.sdp;
console.log('ICE negotiation completed');
self.onsdpoffer(offerSdp, self);
ended = true;
};
My question is it seems like it is waiting until onicecandidate passing "null" value that signifies the process has ended and thus able to continue with creating SDP offer, but I couldn't find this behaviour in WebRTC specs?
My next question is, how else we can know the process of finding ice candidates has ended?
One of my PC in my office couldn't reached the code console.log('ICE negotiation completed'); as null value is not passed.

You could check the iceGatheringState property (run in chrome):
var config = {'iceServers': [{ url: 'stun:stun.l.google.com:19302' }] };
var pc = new webkitRTCPeerConnection(config);
pc.onicecandidate = function(event) {
if (event && event.target && event.target.iceGatheringState === 'complete') {
alert('done gathering candidates - got iceGatheringState complete');
} else if (event && event.candidate == null) {
alert('done gathering candidates - got null candidate');
} else {
console.log(event.target.iceGatheringState, event);
}
};
pc.createOffer(function(offer) {
pc.setLocalDescription(offer);
}, function(err) {
console.log(err);
}, {'mandatory': {'OfferToReceiveAudio': true}});
window.pc = pc;

http://www.w3.org/TR/webrtc/
4.3.1
" If the intent of the ICE Agent is to notify the script that:
[...]
The gathering process is done.
Set connection's ice gathering state to completed and let newCandidate be null."
So, you can either check for the ice gathering state against "completed" (in real life, this is not very reliable), or wait for a null candidate (super reliable).

Related

How to fix InvalidStateError: Cannot add ICE candidate when there is no remote SDP

I am creating a webRTC video chat that shows a caller all active members when initiating a call from firefox and the receiver is using chrome this error is displayed "Uncaught (in promise) DOMException: Failed to execute 'addIceCandidate' on 'RTCPeerConnection': Error processing ICE candidate". And when a call is initiated from firefox and receiver uses firefox I get two errors Invalidstate: cannot add ICE candidate when there is no remote SDP and ICE failed, add a STUN and see about:webrtc for details
I dont know where I am making a mistake
/ define all data here
var usersOnline,id,currentCaller,room,caller,localUser,media,memberInfo;
// All subscribed members.
var users = [];
var token = Math.random().toString(36).substring(2, 15) + Math.random().toString(36).substring(2, 15);
// create random user id
var userId = Math.random().toString(36).substring(2, 15);
// create random username
var username = token;
// authonticating user
var currentUser = {
username: token,
userId: userId
}
// stringify user data
currentUser = JSON.stringify(currentUser);
var pusher = new Pusher('KEY', {
authEndpoint: '../auth.php',
auth: {
params: JSON.parse(currentUser)
},
cluster: 'ap2',
forceTLS: true
});
var state = pusher.connection.state;
var channel = pusher.subscribe('presence-conference');
channel.bind("pusher:subscription_succeeded", function (members) {
console.log(members);
id = channel.members.me.id;
document.getElementById('mydetails').innerHTML = 'Online Now: ' + ' ( ' + (members.count - 1) +')';
members.each(member => {
if (member.id != channel.members.me.id) {
users.push(member.id);
}
});
renderOnline();
});
// Add user online
channel.bind("pusher:member_added", member => {
users.push(member.id);
renderOnline();
});
channel.bind("pusher:member_removed", member => {
// for remove member from list:
var index = users.indexOf(member.id);
users.splice(index, 1);
if (member.id == room) {
endCall();
}
renderOnline();
});
function renderOnline(){
var list = "";
users.forEach(function(user) {
list +=
`<li>` +
user +//this will call user
` <input type="button" style="float:right;" value="Call" onclick="callUser('` +
user +
`')" id="makeCall" /></li>`;
});
document.getElementById("userDetails").innerHTML = list;
}
//To iron over browser implementation anomalies like prefixes
GetRTCPeerConnection();
GetRTCSessionDescription();
GetRTCIceCandidate();
prepareCaller();
function prepareCaller() {
//Initializing a peer connection
caller = new window.RTCPeerConnection();
//Listen for ICE Candidates and send them to remote peers
caller.onicecandidate = function(evt) {
if (!evt.candidate) return;
console.log("onicecandidate called");
onIceCandidate(caller, evt);
};
//onaddstream handler to receive remote feed and show in remoteview video element
caller.onaddstream = function(evt) {
console.log("onaddstream called");
if("srcObject" in document.getElementById("selfview")){
document.getElementById("selfview").srcObject = evt.stream;
}else{
if (window.URL) {
document.getElementById("remoteview").src = window.URL.createObjectURL(
evt.stream
);
} else {
document.getElementById("remoteview").src = evt.stream;
}
}
};
}
function getCam() {
//Get local audio/video feed and show it in selfview video element
return navigator.mediaDevices.getUserMedia({
audio: {
echoCancellation: true,
sampleSize:8
},
video: {
width: 1080,
height: 720,
aspectRatio: { ideal: 1.777778 }
}
});
}
function GetRTCIceCandidate() {
window.RTCIceCandidate =
window.RTCIceCandidate ||
window.webkitRTCIceCandidate ||
window.mozRTCIceCandidate ||
window.msRTCIceCandidate;
return window.RTCIceCandidate;
}
function GetRTCPeerConnection() {
window.RTCPeerConnection =
window.RTCPeerConnection ||
window.webkitRTCPeerConnection ||
window.mozRTCPeerConnection ||
window.msRTCPeerConnection;
return window.RTCPeerConnection;
}
function GetRTCSessionDescription() {
window.RTCSessionDescription =
window.RTCSessionDescription ||
window.webkitRTCSessionDescription ||
window.mozRTCSessionDescription ||
window.msRTCSessionDescription;
return window.RTCSessionDescription;
}
//Create and send offer to remote peer on button click
function callUser(user) {
getCam()
.then(stream => {
if("srcObject" in document.getElementById("selfview")){
document.getElementById("selfview").srcObject = stream;
}else{
if (window.URL) {
document.getElementById("selfview").src = window.URL.createObjectURL(
stream
);
} else {
document.getElementById("selfview").src = stream;
}
}
toggleEndCallButton();
caller.addStream(stream);
localUserMedia = stream;
caller.createOffer().then(function(desc) {
caller.setLocalDescription(new RTCSessionDescription(desc));
channel.trigger("client-sdp", {
sdp: desc,
room: user,
from: id
});
room = user;
});
})
.catch(error => {
console.log("an error occured", error);
});
}
function endCall() {
room = undefined;
caller.close();
for (let track of localUserMedia.getTracks()) {
track.stop();
}
prepareCaller();
toggleEndCallButton();
}
function endCurrentCall() {
channel.trigger("client-endcall", {
room: room
});
endCall();
}
//Send the ICE Candidate to the remote peer
function onIceCandidate(peer, evt) {
if (evt.candidate) {
channel.trigger("client-candidate", {
candidate: evt.candidate,
room: room
});
}
}
function toggleEndCallButton() {
if (document.getElementById("endCall").style.display == "block") {
document.getElementById("endCall").style.display = "none";
} else {
document.getElementById("endCall").style.display = "block";
}
}
//Listening for the candidate message from a peer sent from onicecandidate handler
channel.bind("client-candidate", function(msg) {
if (msg.room == room) {
console.log("candidate received");
caller.addIceCandidate(new RTCIceCandidate(msg.candidate));
}
});
//Listening for Session Description Protocol message with session details from remote peer
channel.bind("client-sdp", function(msg) {
if (msg.room == id) {
console.log("sdp received");
var answer = confirm(
"You have a call from: " + msg.from + "Would you like to answer?"
);
if (!answer) {
return channel.trigger("client-reject", { room: msg.room, rejected: id });
}
room = msg.room;
getCam()
.then(stream => {
localUserMedia = stream;
toggleEndCallButton();
if("srcObject" in document.getElementById("selfview")){
document.getElementById("selfview").srcObject = stream;
}else{
if (window.URL) {
document.getElementById("selfview").src = window.URL.createObjectURL(
stream
);
} else {
document.getElementById("selfview").src = stream;
}
}
caller.addStream(stream);
var sessionDesc = new RTCSessionDescription(msg.sdp);
caller.setRemoteDescription(sessionDesc);
caller.createAnswer().then(function(sdp) {
caller.setLocalDescription(new RTCSessionDescription(sdp));
channel.trigger("client-answer", {
sdp: sdp,
room: room
});
});
})
.catch(error => {
console.log("an error occured", error);
});
}
});
//Listening for answer to offer sent to remote peer
channel.bind("client-answer", function(answer) {
if (answer.room == room) {
console.log("answer received");
caller.setRemoteDescription(new RTCSessionDescription(answer.sdp));
}
});
channel.bind("client-reject", function(answer) {
if (answer.room == room) {
console.log("Call declined");
alert("call to " + answer.rejected + "was politely declined");
endCall();
}
});
channel.bind("client-endcall", function(answer) {
if (answer.room == room) {
console.log("Call Ended");
endCall();
}
});
I EXPECTED that the video call will work don't want to use any API, help me see where I went wrong.
Call setRemoteDescription(offer) before requesting the camera.
This puts the RTCPeerConnection in the right signaling state ("have-remote-offer") to receive and process remote ICE candidates correctly.
There's no time to request the camera first when an offer comes in. Incoming offers are typically followed closely by trickled ICE candidates on your signaling channel. addIceCandidate won't know what to do with those if it hasn't seen an offer.
Move the setRemoteDescription call ahead of the getMedia call in the promise chain to fix it. You have more time then before returning an answer.
Though that's still not great, since this approach often ends up blocking initial WebRTC negotiation on a user permission prompt for the camera. This is called tight coupling. Sadly, the current state of WebRTC encourages it, since getting the best IP mode is gated on getUserMedia in most browsers.
Lastly, there's a lot of old API usage here. See my other answer for newer APIs to use.

Unable to record again after stopping record in Kurento

I am working on this Kurento application and there is a strange problem i am facing where once I start recording the video and stop the recording, I cant start another recording again. The event goes to the server but nothing seems to be happening! PFB the code:
room.pipeline.create('WebRtcEndpoint', function (error, outgoingMedia) {
if (error) {
console.error('no participant in room');
// no participants in room yet release pipeline
if (Object.keys(room.participants).length == 0) {
room.pipeline.release();
}
return callback(error);
}
outgoingMedia.setMaxVideoRecvBandwidth(256);
userSession.outgoingMedia = outgoingMedia;
// add ice candidate the get sent before endpoint is established
var iceCandidateQueue = userSession.iceCandidateQueue[socket.id];
if (iceCandidateQueue) {
while (iceCandidateQueue.length) {
var message = iceCandidateQueue.shift();
console.error('user : ' + userSession.id + ' collect candidate for outgoing media');
userSession.outgoingMedia.addIceCandidate(message.candidate);
}
}
userSession.outgoingMedia.on('OnIceCandidate', function (event) {
console.log("generate outgoing candidate : " + userSession.id);
var candidate = kurento.register.complexTypes.IceCandidate(event.candidate);
userSession.sendMessage({
id: 'iceCandidate',
sessionId: userSession.id,
candidate: candidate
});
});
// notify other user that new user is joining
var usersInRoom = room.participants;
var data = {
id: 'newParticipantArrived',
new_user_id: userSession.id,
receiveVid: receiveVid
};
// notify existing user
for (var i in usersInRoom) {
usersInRoom[i].sendMessage(data);
}
var existingUserIds = [];
for (var i in room.participants) {
existingUserIds.push({id: usersInRoom[i].id, receiveVid: usersInRoom[i].receiveVid});
}
// send list of current user in the room to current participant
userSession.sendMessage({
id: 'existingParticipants',
data: existingUserIds,
roomName: room.name,
receiveVid: receiveVid
});
// register user to room
room.participants[userSession.id] = userSession;
var recorderParams = {
mediaProfile: 'WEBM',
uri: "file:///tmp/Room_"+room.name+"_file"+userSession.id +".webm"
};
//make recorder endpoint
room.pipeline.create('RecorderEndpoint', recorderParams, function(error, recorderEndpoint){
userSession.outgoingMedia.recorderEndpoint = recorderEndpoint;
outgoingMedia.connect(recorderEndpoint);
});
On the screen when I click the record button, the function on the server is:
function startRecord(socket) {
console.log("in func");
var userSession = userRegistry.getById(socket.id);
if (!userSession) {
return;
}
var room = rooms[userSession.roomName];
if(!room){
return;
}
var usersInRoom = room.participants;
var data = {
id: 'startRecording'
};
for (var i in usersInRoom) {
console.log("in loop");
var user = usersInRoom[i];
// release viewer from this
user.outgoingMedia.recorderEndpoint.record();
// notify all user in the room
user.sendMessage(data);
console.log(user.id);
}}
The thing is, for the very 1st time, it records properly i.e. file created on server and video&audio recorded properly.
When I press stop to stop recording, the intended effect is seen i.e. recording stops.
NOW, when I press record again, a video file is not made. The event reaches the server properly (console.log says so)
Can anyone help me pls?!
Thanks.

Modify Kurento group call example to support only audio

I need to modify the Kurento group call example from Link
to send only audio if one participant has no camera. Right now only audio is received when a camera is used. When only a microphone is available I receive a DeviceMediaError.
I managed to filter whether a camera device is connected or not and then send only audio, but this doesn't work. Maybe the participant should've an audio tag instead of a video tag?
EDIT: It's only working on Firefox and not in Chrome. Any ideas?
in file - https://github.com/Kurento/kurento-tutorial-java/blob/master/kurento-group-call/src/main/java/org/kurento/tutorial/groupcall/UserSession.java.
change following line -
sender.getOutgoingWebRtcPeer().connect(incoming, MediaType.AUDIO);
and set offer media constraints to video:false in browser js file.
updated code -
let constraints = {
audio: true,
video: false
};
let localParticipant = new Participant(sessionId);
participants[sessionId] = localParticipant;
localVideo = document.getElementById('local_video');
let video = localVideo;
let options = {
localVideo: video,
mediaConstraints: constraints,
onicecandidate: localParticipant.onIceCandidate.bind(localParticipant),
configuration : { iceServers : [
{"url":"stun:74.125.200.127:19302"},
] }
};
localParticipant.rtcPeer = new kurentoUtils.WebRtcPeer.WebRtcPeerSendonly(options, function(error) {
if (error) {
return console.error(error);
}
localVideoCurrentId = sessionId;
localVideo = document.getElementById('local_video');
localVideo.src = localParticipant.rtcPeer.localVideo.src;
localVideo.muted = true;
this.generateOffer(localParticipant.offerToReceiveVideo.bind(localParticipant));
});
server.js code
function join(socket, room, callback) {
let userSession = userRegister.getById(socket.id);
userSession.setRoomName(room.name);
room.pipeline.create('WebRtcEndpoint', {mediaProfile : 'WEBM_AUDIO_ONLY'}, (error, outgoingMedia) => {
if (error) {
console.error('no participant in room');
if (Object.keys(room.participants).length === 0) {
room.pipeline.release();
}
return callback(error);
}
// else
outgoingMedia.setMaxAudioRecvBandwidth(100);
add media profile parameter on server side while joining room.
function getEndpointForUser(userSession, sender, callback) {
if (userSession.id === sender.id) {
return callback(null, userSession.outgoingMedia);
}
let incoming = userSession.incomingMedia[sender.id];
if (incoming == null) {
console.log(`user : ${userSession.id} create endpoint to receive video from : ${sender.id}`);
getRoom(userSession.roomName, (error, room) => {
if (error) {
return callback(error);
}
room.pipeline.create('WebRtcEndpoint', {mediaProfile : 'WEBM_AUDIO_ONLY'}, (error, incomingMedia) => {
if (error) {
if (Object.keys(room.participants).length === 0) {
room.pipeline.release();
}
return callback(error);
}
console.log(`user: ${userSession.id} successfully create pipeline`);
incomingMedia.setMaxAudioRecvBandwidth(0);
incomingMedia.getMaxAudioRecvBandwidth(0);
add media profile parameter when accepting call.
hope this helps.

Logs for Protractor

I am new to protractor and want to create logs for my test cases. I used if and else to write logs. I wanted to know if there is any better way of writing logs for protractor test cases?
My Code:
var colors = require('colors/safe');
var mapFeedBackpage=require('./mapFeedBack-page.js')
describe("Map feedback Automation",function()
{
var mapFeedBack= new mapFeedBackpage();
it("Check if the Url works ",function() //spec1
{
console.log("Checking the url :"+browser.params.url+'\n')
browser.get(browser.params.url);
browser.getCurrentUrl().then(function(value){
if(/report/.test(value) === false) {
fail("Result: URL doesnt works-FAIL \n");
}
else
{
console.log(colors.green("PASS :")+browser.params.url+ "is reachable \n");
}
});
});
it("test browser should reach report road option",function() //spec2s
{
console.log("Checking if road report option is available \n");
mapFeedBack.REPORT_ROAD.click();
expect(browser.getCurrentUrl()).toContain("report_road");
browser.getCurrentUrl().then(function(value){
if(/report_road/.test(value) === false) {
fail("Result: URL doesnt works-FAIL");
}
else
{
console.log(colors.green("PASS")+" Road report option is available");
}
});
});
Yes, you can use https://www.npmjs.com/package/log4js which is basically log4j module for nodejs apps. Since protractor is nodejs program it would certainly support this. It's very easy to implement this-
var log4js = require('log4js');
var logger = log4js.getLogger();
logger.debug("Some debug messages");
or you could write a custom logger:
var logger = exports;
logger.debugLevel = 'warn';
logger.log = function(level, message) {
var levels = ['error', 'warn', 'info'];
if (levels.indexOf(level) >= levels.indexOf(logger.debugLevel) ) {
if (typeof message !== 'string') {
message = JSON.stringify(message);
};
console.log(level+': '+message);
}
}
and then use this in your scripts as :
var logger = require('./logger');
logger.debugLevel = 'warn';
logger.log('info', 'Everything started properly.');
logger.log('warn', 'Running out of memory...');
logger.log('error', { error: 'flagrant'});

WEBRTC Object #<RTCPeerConnection> has no method 'processSignalingMessage'

I have problems with the WebRTC:
I use this code from one example about Video calls.
if (new_connection) {
console.log('New Peer Connection');
var peer_connection = {};
peer_connection.connection_id = msg.from_connection_id;
peer_connection.pc = createPeerConnection(peer_connection.connection_id,
false);
peer_connections.push(peer_connection);
$('#remote').prepend(remoteVideoHtml.replace('remoteVideoId', 'peer' +
peer_connection.connection_id));
}
//Now process the SDP JSON Blob received
for (var i in peer_connections) {
if (peer_connections[i].connection_id == msg.from_connection_id) {
try {
peer_connections[i].pc.processSignalingMessage(msg.data);
}catch (e) {
console.log("Failed to create processSignalingMessage, exception: " + e.message);
}
I need help because I have one problem here.
peer_connections[i].pc.processSignalingMessage(msg.data);
The problem is:
Object #<RTCPeerConnection> has no method 'processSignalingMessage'
I don't know how those functions works and how they are invoqued:
pc.onconnecting = function (msg) {
console.log('onSessionConnecting');
}
pc.onopen = function (msg) {
console.log('onSessionOpened');
}
pc.onaddstream = function (event) {
console.log('onRemoteStreamAdded add the remote peers video stream.');
var url = webkitURL.createObjectURL(event.stream);
$('#peer' + connection_id).attr({
src: url
});
}
I will appreciate any help.
The initial version of WebRTC in Chrome was based on ROAP and it used to have a processSignallingMessage() method. The current version based on JSEP and it has methods like setRometeDescription() or setLocalDescription() to inject the local SDP and the SDP received from other users.
You can still find this implementation in old versions of Chrome or in Bowser.