Screen Sharing in kurento issues - webrtc

facing issues in screen sharing in kurento media server
i am following kurento-java-tutorials (one2many). here is my presenter function at client side
function presenter() {
if (!webRtcPeer) {
showSpinner(video);
var constraints = {
audio: false,
video: {
width : { max : 320 },
height : { max : 240 },
framerate : { exact : 15 }
}
};
var options = {
// localVideo : video,
videoStream : video,
onicecandidate : onIceCandidate,
mediaConstraints : constraints,
sendSource : 'screen'
}
console.log(options);
if(navigator.getDisplayMedia || navigator.mediaDevices.getDisplayMedia) {
function onGettingSteam(stream) {
video.srcObject = stream;
}
if(navigator.mediaDevices.getDisplayMedia) {
navigator.mediaDevices.getDisplayMedia({video: true}).then(stream1 => {
onGettingSteam(stream1);
options.localVideo=stream1;
}, getDisplayMediaError).catch(getDisplayMediaError);
}
else if(navigator.getDisplayMedia) {
navigator.getDisplayMedia({video: true}).then(stream2 => {
onGettingSteam(stream2);
options.localVideo=stream2;
}, getDisplayMediaError).catch(getDisplayMediaError);
}
}
else {
}
webRtcPeer = new kurentoUtils.WebRtcPeer.WebRtcPeerSendonly(options,
function(error) {
if (error) {
return console.error(error);
}
webRtcPeer.generateOffer(onOfferPresenter);
});
enableStopButton();
}
}
i am not getting video stream at viewer side. on console at viewer side it is saying
Call not accepted for the following reason: No active sender now. Become sender or . Try again later ...

We can use startCapture() from https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getDisplayMedia
and this worked for us:
startCapture({video: true}).then(stream => {
video.srcObject = stream;
var cstrx = {
audio: false,
video: {
width : { max : 640 },
height : { max : 480 },
framerate : { exact : 15 }
}
};
var options = {
videoStream: stream,
onicecandidate: participant.onIceCandidate.bind(participant),
mediaConstraints : cstrx,
sendSource : 'screen'
}
participant.rtcPeer = new kurentoUtils.WebRtcPeer.WebRtcPeerSendonly(options,
...
It means we have to wait for the stream, or we get the empty one if we don't.

Related

Error integrating Agora.io with nuxt.js Error in created hook: "ReferenceError: AgoraRTC is not defined"

I am integrating Agora Web SDK with nuxt.js.
I have included all the methods I need and my page has the following methods and lifecycle hooks:
methods: {
streamInit(uid, attendeeMode, videoProfile, config) {
let defaultConfig = {
streamID: uid,
audio: true,
video: true,
screen: false
};
switch (attendeeMode) {
case "audio-only":
defaultConfig.video = false;
break;
case "audience":
defaultConfig.video = false;
defaultConfig.audio = false;
break;
default:
case "video":
break;
}
let stream = AgoraRTC.createStream(merge(defaultConfig, config));
stream.setVideoProfile(videoProfile);
return stream;
},
subscribeStreamEvents() {
let rt = this;
rt.client.on("stream-added", function(evt) {
let stream = evt.stream;
console.log("New stream added: " + stream.getId());
console.log("At " + new Date().toLocaleTimeString());
console.log("Subscribe ", stream);
rt.client.subscribe(stream, function(err) {
console.log("Subscribe stream failed", err);
});
});
rt.client.on("peer-leave", function(evt) {
console.log("Peer has left: " + evt.uid);
console.log(new Date().toLocaleTimeString());
console.log(evt);
rt.removeStream(evt.uid);
});
rt.client.on("stream-subscribed", function(evt) {
let stream = evt.stream;
console.log("Got stream-subscribed event");
console.log(new Date().toLocaleTimeString());
console.log("Subscribe remote stream successfully: " + stream.getId());
console.log(evt);
rt.addStream(stream);
});
rt.client.on("stream-removed", function(evt) {
let stream = evt.stream;
console.log("Stream removed: " + stream.getId());
console.log(new Date().toLocaleTimeString());
console.log(evt);
rt.removeStream(stream.getId());
});
},
removeStream(uid) {
this.streamList.map((item, index) => {
if (item.getId() === uid) {
item.close();
let element = document.querySelector("#ag-item-" + uid);
if (element) {
element.parentNode.removeChild(element);
}
let tempList = [...this.streamList];
tempList.splice(index, 1);
this.streamList = tempList;
}
});
},
addStream(stream, push = false) {
let repeatition = this.streamList.some(item => {
return item.getId() === stream.getId();
});
if (repeatition) {
return;
}
if (push) {
this.streamList = this.streamList.concat([stream]);
} else {
this.streamList = [stream].concat(this.streamList);
}
},
handleCamera(e) {
e.currentTarget.classList.toggle("off");
this.localStream.isVideoOn()
? this.localStream.disableVideo()
: this.localStream.enableVideo();
},
handleMic(e) {
e.currentTarget.classList.toggle("off");
this.localStream.isAudioOn()
? this.localStream.disableAudio()
: this.localStream.enableAudio();
},
switchDisplay(e) {
if (
e.currentTarget.classList.contains("disabled") ||
this.streamList.length <= 1
) {
return;
}
if (this.displayMode === "pip") {
this.displayMode = "tile";
} else if (this.displayMode === "tile") {
this.displayMode = "pip";
} else if (this.displayMode === "share") {
// do nothing or alert, tbd
} else {
console.error("Display Mode can only be tile/pip/share");
}
},
hideRemote(e) {
if (
e.currentTarget.classList.contains("disabled") ||
this.streamList.length <= 1
) {
return;
}
let list;
let id = this.streamList[this.streamList.length - 1].getId();
list = Array.from(
document.querySelectorAll(`.ag-item:not(#ag-item-${id})`)
);
list.map(item => {
if (item.style.display !== "none") {
item.style.display = "none";
} else {
item.style.display = "block";
}
});
},
handleExit(e) {
if (e.currentTarget.classList.contains("disabled")) {
return;
}
try {
this.client && this.client.unpublish(this.localStream);
this.localStream && this.localStream.close();
this.client &&
this.client.leave(
() => {
console.log("Client succeed to leave.");
},
() => {
console.log("Client failed to leave.");
}
);
} finally {
this.readyState = false;
this.client = null;
this.localStream = null;
// redirect to index
this.$router.push("/");
}
}
},
created() {
let $ = this;
// init AgoraRTC local client
$.client = AgoraRTC.createClient({ mode: $.transcode });
$.client.init($.appId, () => {
console.log("AgoraRTC client initialized");
$.subscribeStreamEvents();
$.client.join($.appId, $.channel, $.uid, uid => {
console.log("User " + uid + " join channel successfully");
console.log("At " + new Date().toLocaleTimeString());
// create local stream
// It is not recommended to setState in function addStream
$.localStream = this.streamInit(uid, $.attendeeMode, $.videoProfile);
$.localStream.init(
() => {
if ($.attendeeMode !== "audience") {
$.addStream($.localStream, true);
$.client.publish($.localStream, err => {
console.log("Publish local stream error: " + err);
});
}
$.readyState = true;
},
err => {
console.log("getUserMedia failed", err);
$.readyState = true;
}
);
});
});
},
mounted() {
this.$nextTick(() => {
// add listener to control btn group
let canvas = document.querySelector("#ag-canvas");
let btnGroup = document.querySelector(".ag-btn-group");
canvas.addEventListener("mousemove", () => {
if (global._toolbarToggle) {
clearTimeout(global._toolbarToggle);
}
btnGroup.classList.add("active");
global._toolbarToggle = setTimeout(function() {
btnGroup.classList.remove("active");
}, 2000);
});
});
},
beforeUpdate() {
let $ = this;
// rerendering
let canvas = document.querySelector("#ag-canvas");
// pip mode (can only use when less than 4 people in channel)
if ($.displayMode === "pip") {
let no = $.streamList.length;
if (no > 4) {
$.displayMode = "tile";
return;
}
$.streamList.map((item, index) => {
let id = item.getId();
let dom = document.querySelector("#ag-item-" + id);
if (!dom) {
dom = document.createElement("section");
dom.setAttribute("id", "ag-item-" + id);
dom.setAttribute("class", "ag-item");
canvas.appendChild(dom);
item.play("ag-item-" + id);
}
if (index === no - 1) {
dom.setAttribute("style", `grid-area: span 12/span 24/13/25`);
} else {
dom.setAttribute(
"style",
`grid-area: span 3/span 4/${4 + 3 * index}/25;
z-index:1;width:calc(100% - 20px);height:calc(100% - 20px)`
);
}
item.player.resize && item.player.resize();
});
} else if ($.displayMode === "tile") {
// tile mode
let no = $.streamList.length;
$.streamList.map((item, index) => {
let id = item.getId();
let dom = document.querySelector("#ag-item-" + id);
if (!dom) {
dom = document.createElement("section");
dom.setAttribute("id", "ag-item-" + id);
dom.setAttribute("class", "ag-item");
canvas.appendChild(dom);
item.play("ag-item-" + id);
}
dom.setAttribute("style", `grid-area: ${tile_canvas[no][index]}`);
item.player.resize && item.player.resize();
});
} else if ($.displayMode === "share") {
// screen share mode (tbd)
}
},
beforeDestroy () {
this.client && this.client.unpublish(this.localStream);
this.localStream && this.localStream.close();
this.client &&
this.client.leave(
() => {
console.log("Client succeed to leave.");
},
() => {
console.log("Client failed to leave.");
}
);
}
I have installed agora-rtc-sdk from npm.
My plugins/agora.js file looks like this
import Vue from "vue";
import AgoraRTC from 'agora-rtc-sdk';
Vue.use(AgoraRTC);
My nuxt.config.js has plugins declared as:
{
src: "~/plugins/agora.js",
ssr: false
}
The application on loading the page gives AgoraRTC is not defined. How do I add this AgoraRTC to my nuxt.js application?
Agora works only on the client side, fully independent of a server and hence you need to define the mode as client in the nuxt.config.js like this:
{ src: '~/plugins/agora.js', mode: 'client' },

Dynamically addTrack to offerer from answerer onnegotiationneeded in webrtc

Is there anyway to notify offerer that non-existing track before just added to get the new stream from the answerer from the code below?
For my current issue now here is that the offerer can add new non-existing track and onnegotiationneeded will be fired and will also be able to createOffer and update media successfully, but when answerer do same process onnegotiationneeded fired normally also from the answerer but no media will be exchanged just because offerer do not have any new track on his end!
I use replaceOrAddTrack(remotePartiID, track, TrackKind) in adding and replacing of tracks
Only the replace works with either ends if it has same track kind from initial connection
_cfg = {
sdpConstraints: {
mandatory: {
OfferToReceiveAudio: true,
OfferToReceiveVideo: true,
VoiceActivityDetection: true,
IceRestart: true
},
optional: []
}
...
};
var channels_wrap = (function() {
return {
...
init: function() {
_cfg.defaultChannel.on('message', (message) => {
if (_cfg.enableLog) {
console.log('Client received message:', message);
}
if (message.type === 'newparticipant') {
var partID = message.from;
var partData = message.fromData;
// Open a new communication channel to the new participant
_cfg.offerChannels[partID] = this.openSignalingChannel(partID);
// Wait for answers (to offers) from the new participant
_cfg.offerChannels[partID].on('message', (msg) => {
if (msg.dest === _cfg.myID) {
if (msg.type === 'reoffer') {
if (_cfg.opc.hasOwnProperty(msg.from)) {
console.log('reoffering')
_cfg.opc[msg.from].negotiationNeeded();
}
} else
if (msg.type === 'answer') {
_cfg.opc[msg.from].peer.setRemoteDescription(new RTCSessionDescription(msg.snDescription),
handlers_wrap.setRemoteDescriptionSuccess,
handlers_wrap.setRemoteDescriptionError);
} else if (msg.type === 'candidate') {
var candidate = new RTCIceCandidate({
sdpMLineIndex: msg.label,
candidate: msg.candidate
});
if (_cfg.enableLog) {
console.log('got ice candidate from ' + msg.from);
}
_cfg.opc[msg.from].peer.addIceCandidate(candidate, handlers_wrap.addIceCandidateSuccess, handlers_wrap.addIceCandidateError);
}
}
});
// Send an offer to the new participant
dialogs_wrap.createOffer(partID, partData);
} else if (message.type === 'bye') {
handlers_wrap.hangup(message.from, message.fromData);
}
});
},
initPrivateChannel: function() {
// Open a private channel (namespace = _cfg.myID) to receive offers
_cfg.privateAnswerChannel = this.openSignalingChannel(_cfg.myID);
// Wait for offers or ice candidates
_cfg.privateAnswerChannel.on('message', (message) => {
if (message.dest === _cfg.myID) {
if (message.type === 'offer') {
var to = message.from;
dialogs_wrap.createAnswer(message.snDescription, _cfg.privateAnswerChannel, to, message.fromData);
} else if (message.type === 'candidate') {
var candidate = new RTCIceCandidate({
sdpMLineIndex: message.label,
candidate: message.candidate
});
_cfg.apc[message.from].peer.addIceCandidate(candidate, handlers_wrap.addIceCandidateSuccess, handlers_wrap.addIceCandidateError);
}
}
});
}
};
})();
var tracks_wrap = (function() {
return {
getParticipants: function(partID = null) {
var participants = {};
if (partID) {
if (_cfg.opc.hasOwnProperty(partID)) {
participants[partID] = {
ID: partID,
type: 'opc'
};
} else
if (_cfg.apc.hasOwnProperty(partID)) {
participants[partID] = {
ID: partID,
type: 'apc'
};
}
} else {
for (let key in _cfg.opc) {
participants[key] = {
ID: key,
type: 'opc'
};
}
for (let key in _cfg.apc) {
participants[key] = {
ID: key,
type: 'apc'
};
}
}
return participants;
},
replaceOrAddTrack: function(remotePartiID, track, TrackKind) {
if (!TrackKind) {
return;
}
var participants = this.getParticipants(remotePartiID);
for (var partiID in participants) {
var peer = null;
if (participants[partiID].type === 'apc' && _cfg.apc.hasOwnProperty(partiID)) {
peer = _cfg.apc[partiID].peer;
} else if (participants[partiID].type === 'opc' && _cfg.opc.hasOwnProperty(partiID)) {
peer = _cfg.opc[partiID].peer;
} else {
continue;
}
var foundTrack = null;
peer.getSenders().forEach(function(rtpSender) {
if (rtpSender.track && TrackKind === rtpSender.track.kind) {
foundTrack = true;
rtpSender.replaceTrack(track);
}
});
if (!foundTrack) {
peer.addTrack(track, _cfg.localStream); //This work only if it is offerrer that add track but not working with answerer even if i tell the offerer to send offer again
}
}
}
};
})();
var dialogs_wrap = (function() {
return {
/**
*
* Send an offer to peer with id partID and metadata as partData
*
*/
createOffer: function(partID, partData) {
if (_cfg.enableLog) {
console.log('Creating offer for peer ' + partID, partData);
}
var opcPeer = new RTCPeerConnection(_cfg.pcConfig, _cfg.peerSetup);
_cfg.opc[partID] = {};
_cfg.opc[partID].peer = opcPeer;
_cfg.opc[partID].peer.onicecandidate = handlers_wrap.handleIceCandidateAnswer(_cfg.offerChannels[partID], partID, partData);
_cfg.opc[partID].peer.ontrack = handlers_wrap.handleRemoteStreamAdded(partID, partData);
_cfg.opc[partID].peer.onremovetrack = handlers_wrap.handleRemoteStreamRemoved(partID, partData);
_cfg.localStream.getTracks().forEach(track => _cfg.opc[partID].peer.addTrack(track, _cfg.localStream));
try {
_cfg.sendChannel[partID] = _cfg.opc[partID].peer.createDataChannel("sendDataChannel", {
reliable: false
});
_cfg.sendChannel[partID].onmessage = handlers_wrap.handleMessage;
if (_cfg.enableLog) {
console.log('Created send data channel');
}
} catch (e) {
alert('Failed to create data channel. \n You need supported RtpDataChannel enabled browser');
console.log('createDataChannel() failed with exception: ', e.message);
}
_cfg.sendChannel[partID].onopen = handlers_wrap.handleSendChannelStateChange(partID);
_cfg.sendChannel[partID].onclose = handlers_wrap.handleSendChannelStateChange(partID);
var onSuccess = (partID, partData) => {
var channel = _cfg.offerChannels[partID];
if (_cfg.enableLog) {
console.log('Sending offering');
}
channel.emit('message', {
snDescription: _cfg.opc[partID].peer.localDescription,
from: _cfg.myID,
fromData: _cfg.myData,
type: 'offer',
dest: partID,
destData: partData
});
}
_cfg.opc[partID].negotiationNeeded = () => {
_cfg.opc[partID].peer.createOffer(_cfg.sdpConstraints).then(offer => {
offer.sdp = sdp_wrap.SDPController(offer.sdp);
return _cfg.opc[partID].peer.setLocalDescription(offer)
})
.then(() => onSuccess(partID, partData)).catch(handlers_wrap.handleCreateOfferError);
}
_cfg.opc[partID].peer.onnegotiationneeded = () => {
_cfg.opc[partID].negotiationNeeded();
}
},
createAnswer: function(snDescription, cnl, to, toData) {
if (_cfg.enableLog) {
console.log('Creating answer for peer ' + to);
}
if (!_cfg.apc.hasOwnProperty(to)) {
var apcPeer = new RTCPeerConnection(_cfg.pcConfig, _cfg.peerSetup);
//apcPeer.setConfiguration(_cfg.pcConfig);
_cfg.apc[to] = {};
_cfg.apc[to].peer = apcPeer;
_cfg.apc[to].peer.onicecandidate = handlers_wrap.handleIceCandidateAnswer(cnl, to, toData);
_cfg.apc[to].peer.ontrack = handlers_wrap.handleRemoteStreamAdded(to, toData);
_cfg.apc[to].peer.onremovetrack = handlers_wrap.handleRemoteStreamRemoved(to, toData);
_cfg.localStream.getTracks().forEach(track => _cfg.apc[to].peer.addTrack(track, _cfg.localStream));
_cfg.apc[to].peer.ondatachannel = handlers_wrap.gotReceiveChannel(to);
}
_cfg.apc[to].peer.setRemoteDescription(new RTCSessionDescription(snDescription), handlers_wrap.setRemoteDescriptionSuccess, handlers_wrap.setRemoteDescriptionError);
var onSuccess = (channel) => {
if (_cfg.enableLog) {
console.log('Sending answering');
}
channel.emit('message', {
snDescription: _cfg.apc[to].peer.localDescription,
from: _cfg.myID,
fromData: _cfg.myData,
type: 'answer',
dest: to,
destData: toData
});
}
_cfg.apc[to].peer.createAnswer().then(function(answer) {
answer.sdp = sdp_wrap.SDPController(answer.sdp);
return _cfg.apc[to].peer.setLocalDescription(answer);
})
.then(() => onSuccess(cnl))
.catch(handlers_wrap.handleCreateAnswerError);
var negotiationNeeded = false;
_cfg.apc[to].peer.onnegotiationneeded = (ev) => {
if (!negotiationNeeded) {
negotiationNeeded = true;
return;
}
//So i tried to create this to tell the offerer to do offer again, offerer do resend offer but nothing seem to happen
cnl.emit('message', {
from: _cfg.myID,
fromData: _cfg.myData,
type: 'reoffer',
dest: to,
destData: toData
});
}
}
};
})();

StreamTrack's readyState is getting changed to ended, just before playing the stream (MediaStream - MediaStreamTrack - WebRTC)

The jsfiddle (https://jsfiddle.net/kalyansai99/mm1b74uy/22/) contains code where the user can toggle between front and back camera of the mobile.
In few mobiles its working fine (Moto g5 plus, Moto E3 and so on - Chrome Browser) and in few mobiles (Mi Redimi Note 4 - Chrome Browser) when I am switching to back camera, initially the stream is loading with a track of "readyState" as "live". But when i am about to play the stream in video player, the "readyState" is getting changed to "ended" and black screen is been shown on the video tag.
Not sure whats happening. Any clues?
JSFiddle Code
var player = document.getElementById('player');
var flipBtn = document.getElementById('flipBtn');
var deviceIdMap = {};
var front;
var constraints = {
audio: false,
video: {
frameRate: 1000
}
};
var gotDevices = function (deviceList) {
var length = deviceList.length;
console.log(deviceList);
for (var i = 0; i < length; i++) {
var deviceInfo = deviceList[i];
if (deviceInfo.kind === 'videoinput') {
if (deviceInfo.label.indexOf('front') !== -1) {
deviceIdMap.front = deviceInfo.deviceId;
} else if (deviceInfo.label.indexOf('back') !== -1) {
deviceIdMap.back = deviceInfo.deviceId;
}
}
}
if (deviceIdMap.front) {
constraints.video.deviceId = {exact: deviceIdMap.front};
front = true;
} else if (deviceIdMap.back) {
constraints.video.deviceId = {exact: deviceIdMap.back};
front = false;
}
console.log('deviceIdMap - ', deviceIdMap);
};
var handleError = function (error) {
console.log('navigator.getUserMedia error: ', error);
};
function handleSuccess(stream) {
window.stream = stream;
// this is a video track as there is no audio track
console.log("Track - ", window.stream.getTracks()[0]);
console.log('Ready State - ', window.stream.getTracks()[0].readyState);
if (window.URL) {
player.src = window.URL.createObjectURL(stream);
} else {
player.src = stream;
}
player.onloadedmetadata = function (e) {
console.log('Ready State - 3', window.stream.getTracks()[0].readyState);
player.play();
console.log('Ready State - 4', window.stream.getTracks()[0].readyState);
}
console.log('Ready State - 2', window.stream.getTracks()[0].readyState);
}
navigator.mediaDevices.enumerateDevices().then(gotDevices).catch(handleError);
flipBtn.addEventListener('click', function () {
if (window.stream) {
window.stream.getTracks().forEach(function(track) {
track.stop();
});
}
if (front) {
constraints.video.deviceId = {exact: deviceIdMap.back};
} else {
constraints.video.deviceId = {exact: deviceIdMap.front};
}
front = !front;
navigator.getUserMedia(constraints, handleSuccess, handleError);
}, false);
console.log(constraints);
navigator.getUserMedia(constraints, handleSuccess, handleError);
#player {
width: 320px;
}
#flipBtn {
width: 150px;
height: 50px;
}
<video id="player" autoplay></video>
<div>
<button id="flipBtn">
Flip Camera
</button>
</div>
Replace track.stop() to track.enabled=false and when adding track to the stream, enable it back using track.enabled=true
The MediaStream.readyState property is changed to "ended" when we stop the track and can never be used again. Therefore its not wise to use stop. For more reference:
https://developer.mozilla.org/en-US/docs/Web/API/MediaStreamTrack/readyState
https://developer.mozilla.org/en-US/docs/Web/API/MediaStreamTrack/stop

Running Protractor Tests Through Jenkins on Remote Selenium Server Getting No Specs Found

I am able to run tests locally on my remote selenium server and they run just fine.
When I go to run them on my Jenkins box on the same remote selenium server in Jenkins I am getting No Specs found, and in the output of my selenium server I am seeing the following:
21:33:41.256 INFO - Executing: [execute async script: try { return (function (attempts, ng12Hybrid, asyncCallback) {
var callback = function(args) {
setTimeout(function() {
asyncCallback(args);
}, 0);
};
var check = function(n) {
try {
if (!ng12Hybrid && window.getAllAngularTestabilities) {
callback({ver: 2});
} else if (window.angular && window.angular.resumeBootstrap) {
callback({ver: 1});
} else if (n < 1) {
if (window.angular) {
callback({message: 'angular never provided resumeBootstrap'});
} else {
callback({message: 'retries looking for angular exceeded'});
}
} else {
window.setTimeout(function() {check(n - 1);}, 1000);
}
} catch (e) {
callback({message: e});
}
};
check(attempts);
}).apply(this, arguments); }
catch(e) { throw (e instanceof Error) ? e : new Error(e); }, [10, false]])
21:33:41.273 INFO - Done: [execute async script: try { return (function (attempts, ng12Hybrid, asyncCallback) {
var callback = function(args) {
setTimeout(function() {
asyncCallback(args);
}, 0);
};
var check = function(n) {
try {
if (!ng12Hybrid && window.getAllAngularTestabilities) {
callback({ver: 2});
} else if (window.angular && window.angular.resumeBootstrap) {
callback({ver: 1});
} else if (n < 1) {
if (window.angular) {
callback({message: 'angular never provided resumeBootstrap'});
} else {
callback({message: 'retries looking for angular exceeded'});
}
} else {
window.setTimeout(function() {check(n - 1);}, 1000);
}
} catch (e) {
callback({message: e});
}
};
check(attempts);
}).apply(this, arguments); }
catch(e) { throw (e instanceof Error) ? e : new Error(e); }, [10, false]]
21:33:41.288 INFO - Executing: [execute script: return (function (trackOutstandingTimeouts) {
var ngMod = angular.module('protractorBaseModule_', []).config([
'$compileProvider',
function($compileProvider) {
if ($compileProvider.debugInfoEnabled) {
$compileProvider.debugInfoEnabled(true);
}
}
]);
if (trackOutstandingTimeouts) {
ngMod.config([
'$provide',
function ($provide) {
$provide.decorator('$timeout', [
'$delegate',
function ($delegate) {
var $timeout = $delegate;
var taskId = 0;
if (!window['NG_PENDING_TIMEOUTS']) {
window['NG_PENDING_TIMEOUTS'] = {};
}
var extendedTimeout= function() {
var args = Array.prototype.slice.call(arguments);
if (typeof(args[0]) !== 'function') {
return $timeout.apply(null, args);
}
taskId++;
var fn = args[0];
window['NG_PENDING_TIMEOUTS'][taskId] =
fn.toString();
var wrappedFn = (function(taskId_) {
return function() {
delete window['NG_PENDING_TIMEOUTS'][taskId_];
return fn.apply(null, arguments);
};
})(taskId);
args[0] = wrappedFn;
var promise = $timeout.apply(null, args);
promise.ptorTaskId_ = taskId;
return promise;
};
extendedTimeout.cancel = function() {
var taskId_ = arguments[0] && arguments[0].ptorTaskId_;
if (taskId_) {
delete window['NG_PENDING_TIMEOUTS'][taskId_];
}
return $timeout.cancel.apply($timeout, arguments);
};
return extendedTimeout;
}
]);
}
]);
}
}).apply(null, arguments);, [true]])
21:33:41.312 INFO - Done: [execute script: return (function (trackOutstandingTimeouts) {
var ngMod = angular.module('protractorBaseModule_', []).config([
'$compileProvider',
function($compileProvider) {
if ($compileProvider.debugInfoEnabled) {
$compileProvider.debugInfoEnabled(true);
}
}
]);
if (trackOutstandingTimeouts) {
ngMod.config([
'$provide',
function ($provide) {
$provide.decorator('$timeout', [
'$delegate',
function ($delegate) {
var $timeout = $delegate;
var taskId = 0;
if (!window['NG_PENDING_TIMEOUTS']) {
window['NG_PENDING_TIMEOUTS'] = {};
}
var extendedTimeout= function() {
var args = Array.prototype.slice.call(arguments);
if (typeof(args[0]) !== 'function') {
return $timeout.apply(null, args);
}
taskId++;
var fn = args[0];
window['NG_PENDING_TIMEOUTS'][taskId] =
fn.toString();
var wrappedFn = (function(taskId_) {
return function() {
delete window['NG_PENDING_TIMEOUTS'][taskId_];
return fn.apply(null, arguments);
};
})(taskId);
args[0] = wrappedFn;
var promise = $timeout.apply(null, args);
promise.ptorTaskId_ = taskId;
return promise;
};
extendedTimeout.cancel = function() {
var taskId_ = arguments[0] && arguments[0].ptorTaskId_;
if (taskId_) {
delete window['NG_PENDING_TIMEOUTS'][taskId_];
}
return $timeout.cancel.apply($timeout, arguments);
};
return extendedTimeout;
}
]);
}
]);
}
}).apply(null, arguments);, [true]]
Like I said, these run just fine locally, so I am not sure what is going on with my Jenkins machine.
Here is my protractor config file:
// Configuration constants
var downloadsFolder = 'test/downloads/',
today = ("0" + (new Date()).getDate()).slice(-2),
month = ("0" + ((new Date()).getMonth() + 1)).slice(-2),
baseUrl = 'BASE URL GOES HERE';
// Test report setup w/ screenshot
var HtmlScreenshotReporter = require('protractor-jasmine2-screenshot-reporter');
var reporter = new HtmlScreenshotReporter({
dest: 'test/report',
filename: 'e2e-report.html'
});
// Protractor config
exports.config = {
suites: {
explore: '.protractor/src/app/exploration/tests/exploration.scenario.js',
login: '.protractor/src/auth-app/login/tests/login.scenario.js',
stories: '.protractor/src/app/story/tests/story.scenario.js',
cohorts: '.protractor/src/app/cohort/tests/cohort.scenario.js',
visualize: '.protractor/src/app/visualize/tests/visualize.scenario.js'
},
baseUrl: 'BASE URL GOES HERE',
directConnect: false,
// Override default 11s timeout for long requests such as visualize's "Recommended Visualizations"
// See https://github.com/angular/protractor/blob/master/docs/timeouts.md
allScriptsTimeout: 25 * 1000,
jasmineNodeOpts: {
defaultTimeoutInterval: 90 * 1000
},
multiCapabilities: [
{
browserName: 'chrome',
seleniumAddress: "http://SELENIUM SERVER URL HERE:4444/wd/hub",
platform: 'ANY',
version: 'ANY',
chromeOptions: {
args: ['--no-sandbox', '--test-type=browser', '--lang=en', '--start-maximized'],
prefs: {
download: {
prompt_for_download: false,
directory_upgrade: true,
default_directory: 'test/downloads'
},
},
}
// shardTestFiles: true,
// maxInstances: 2
}
],
onPrepare: function() {
// Set browser window size
browser.driver.manage().window().maximize();
//Setup screenshots
jasmine.getEnv().addReporter(reporter);
browser.get('BASE URL GOES HERE');
},
// Setup the report before any tests start
beforeLaunch: function() {
return new Promise(function(resolve){
reporter.beforeLaunch(resolve);
});
},
// Close the report after all tests finish
afterLaunch: function(exitCode) {
return new Promise(function(resolve){
reporter.afterLaunch(resolve.bind(this, exitCode));
});
},
params: {
baseUrl: baseUrl,
downloadsFolder: 'test/downloads',
cohort: {
listView: baseUrl + 'cohorts',
newView: baseUrl + 'cohorts/new'
},
story: {
listView: baseUrl + 'stories',
newView: baseUrl + 'story/new',
displayView: baseUrl + 'story'
},
visualize: {
listView: baseUrl + 'visualize',
newView: baseUrl + 'visualize/new'
},
explore: {
listView: baseUrl + 'explorations',
newView: baseUrl + 'explorations/new',
excelFilename: downloadsFolder + `DataExport_2016-${month}-${today}.xlsx`,
csvFilename: downloadsFolder + `DataExport_2016-${month}-${today}.csv`,
maxDownloadTime: 10 * 1000
}
}
};
This boiled down to a permissions issue. Once I added my jenkins user to sudo I was able to do a make command on the project which built all of the necessary files and which also converted my typescript tests over to Javascript and allowed them to run.

How to choose input video device for webrtc?

I am studying webRTC application.
My reference is this software
apprtc
https://code.google.com/p/webrtc/source/browse/trunk/samples/js/apprtc/
demo
https://apprtc.appspot.com/
My computer has bult-in video device and apprtc uses this video device .
However I want to use USB-video camera instead.
I am searching the way to change input video devices.
But I couldn't find any clue in source files.
does anyone has information?
On Chrome:
chrome://settings/content/camera
chrome://settings/content/microphone
On Firefox: media.navigator.permission.disabled=false
Try this demo which is capturing all audio/video input devices:
https://www.webrtc-experiment.com/demos/MediaStreamTrack.getSources.html
You can capture any "specific" device using same API.
Edited at March 01, 2014:
MediaStreamTrack.getSources(function (media_sources) {
for (var i = 0; i < media_sources.length; i++) {
var media_source = media_sources[i];
var constraints = {};
// if audio device
if (media_source.kind == 'audio') {
constraints.audio = {
optional: [{
sourceId: media_source.id
}]
};
}
// if video device
if (media_source.kind == 'video') {
constraints.video = {
optional: [{
sourceId: media_source.id
}]
};
}
// invoke getUserMedia to capture this device
navigator.webkitGetUserMedia(constraints, function (stream) {
console.log(stream.id, stream);
}, console.error);
}
});
Updated at Sep 05, 2015:
Now Microsoft Edge, Chrome 44+, Firefox 38+, all these browsers are supporting navigator.mediaDevices.enumerateDevices API.
Here is a reusable script that provides cross-browser shim for all these media-sources APIs. It will work even in old-chrome (43 and older) (even on Android devices):
if (navigator.mediaDevices && navigator.mediaDevices.enumerateDevices) {
// Firefox 38+, Microsoft Edge, and Chrome 44+ seems having support of enumerateDevices
navigator.enumerateDevices = function(callback) {
navigator.mediaDevices.enumerateDevices().then(callback);
};
}
function getAllAudioVideoDevices(successCallback, failureCallback) {
if (!navigator.enumerateDevices && window.MediaStreamTrack && window.MediaStreamTrack.getSources) {
navigator.enumerateDevices = window.MediaStreamTrack.getSources.bind(window.MediaStreamTrack);
}
if (!navigator.enumerateDevices && navigator.mediaDevices.enumerateDevices) {
navigator.enumerateDevices = navigator.mediaDevices.enumerateDevices.bind(navigator);
}
if (!navigator.enumerateDevices) {
failureCallback(null, 'Neither navigator.mediaDevices.enumerateDevices NOR MediaStreamTrack.getSources are available.');
return;
}
var allMdiaDevices = [];
var allAudioDevices = [];
var allVideoDevices = [];
var audioInputDevices = [];
var audioOutputDevices = [];
var videoInputDevices = [];
var videoOutputDevices = [];
navigator.enumerateDevices(function(devices) {
devices.forEach(function(_device) {
var device = {};
for (var d in _device) {
device[d] = _device[d];
}
// make sure that we are not fetching duplicate devics
var skip;
allMdiaDevices.forEach(function(d) {
if (d.id === device.id) {
skip = true;
}
});
if (skip) {
return;
}
// if it is MediaStreamTrack.getSources
if (device.kind === 'audio') {
device.kind = 'audioinput';
}
if (device.kind === 'video') {
device.kind = 'videoinput';
}
if (!device.deviceId) {
device.deviceId = device.id;
}
if (!device.id) {
device.id = device.deviceId;
}
if (!device.label) {
device.label = 'Please invoke getUserMedia once.';
}
if (device.kind === 'audioinput' || device.kind === 'audio') {
audioInputDevices.push(device);
}
if (device.kind === 'audiooutput') {
audioOutputDevices.push(device);
}
if (device.kind === 'videoinput' || device.kind === 'video') {
videoInputDevices.push(device);
}
if (device.kind.indexOf('audio') !== -1) {
allAudioDevices.push(device);
}
if (device.kind.indexOf('video') !== -1) {
allVideoDevices.push(device);
}
// there is no 'videoouput' in the spec.
// so videoOutputDevices will always be [empty]
allMdiaDevices.push(device);
});
if (successCallback) {
successCallback({
allMdiaDevices: allMdiaDevices,
allVideoDevices: allVideoDevices,
allAudioDevices: allAudioDevices,
videoInputDevices: videoInputDevices,
audioInputDevices: audioInputDevices,
audioOutputDevices: audioOutputDevices
});
}
});
}
Here is how to use above reusable cross-browser shim:
getAllAudioVideoDevices(function(result) {
if (result.allMdiaDevices.length) {
console.debug('Number of audio/video devices available:', result.allMdiaDevices.length);
}
if (result.allVideoDevices.length) {
console.debug('Number of video devices available:', result.allVideoDevices.length);
}
if (result.allAudioDevices.length) {
console.debug('Number of audio devices available:', result.allAudioDevices.length);
}
if (result.videoInputDevices.length) {
console.debug('Number of video-input devices available:', result.videoInputDevices.length);
}
if (result.audioInputDevices.length) {
console.debug('Number of audio-input devices available:', result.audioInputDevices.length);
}
if (result.audioOutputDevices.length) {
console.debug('Number of audio-output devices available:', result.audioOutputDevices.length);
}
if (result.allMdiaDevices.length && result.allMdiaDevices[0].label === 'Please invoke getUserMedia once.') {
console.warn('It seems you did not invoke navigator-getUserMedia before using these API.');
}
console.info('All audio input devices:');
result.audioInputDevices.forEach(function(device) {
console.log('Audio input device id:', device.id, 'Device label:', device.label);
});
console.info('All audio output devices:');
result.audioOutputDevices.forEach(function(device) {
console.log('Audio output device id:', device.id, 'Device label:', device.label);
});
console.info('All video input devices:');
result.videoInputDevices.forEach(function(device) {
console.log('Video input device id:', device.id, 'Device label:', device.label);
});
}, function(error) {
alert(error);
});
It turns out that Chrome does support MediaStreamTrack API which allows you to do this. In Firefox this API is still experimental. Here is the Chrome implementation:
if (typeof MediaStreamTrack === 'undefined'){
alert('This browser does not support MediaStreamTrack.\n\nTry Chrome Canary.');
} else {
MediaStreamTrack.getSources( onSourcesAcquired);
}
function onSourcesAcquired(sources) {
for (var i = 0; i != sources.length; ++i) {
var source = sources[i];
// source.id -> DEVICE ID
// source.label -> DEVICE NAME
// source.kind = "audio" OR "video"
// TODO: add this to some datastructure of yours or a selection dialog
}
}
....
And then when calling getUserMedia, specify the id in the constraints:
var constraints = {
audio: {
optional: [{sourceId: selected_audio_source_id}]
},
video: {
optional: [{sourceId: selected_video_source_id}]
}
};
navigator.getUserMedia(constraints, onSuccessCallback, onErrorCallback);
It sounds to me you are looking for facingMode. You can check it out in this document:
http://www.w3.org/TR/2013/WD-mediacapture-streams-20130516/#idl-def-AllVideoCapabilities
Not sure how well it is supported yet though.