Using back Camera instead of front camera - camera

How can I force the Capture manager to use the back camera, since I can't use CameraCaptureUI in windows phone 8.1.
The app launches the default front camera.
This is my code:
function initCaptureSettings() {
captureInitSettings = null;
captureInitSettings =
new Windows.Media.Capture.MediaCaptureInitializationSettings();
captureInitSettings.photoCaptureSource = Windows.Media.Capture.PhotoCaptureSource.photo;
captureInitSettings.streamingCaptureMode =
Windows.Media.Capture.StreamingCaptureMode.audioAndVideo;
startDevice();
}
function startDevice() {
Debug.writeln("Starting device");
releaseMediaCapture();
mediaCaptureMgr = new Windows.Media.Capture.MediaCapture();
mediaCaptureMgr.initializeAsync(captureInitSettings).done(function (result) {
// do we have a camera and a microphone present?
if (mediaCaptureMgr.mediaCaptureSettings.videoDeviceId && mediaCaptureMgr.mediaCaptureSettings.audioDeviceId) {
// Update the UI
Debug.writeln("Device started");
} else {
Debug.writeln("No capture device was found");
}
startPreview();
}, errorHandler);
}
function startPreview() {
Debug.writeln("Starting preview");
try {
var video = id("photoCapture");
//Windows.Media.Capture.CameraOptionsUI.show(mediaCaptureMgr);
video.src = URL.createObjectURL(mediaCaptureMgr, { oneTimeOnly: true });
video.play();
} catch (e) {
Debug.writeln("Preview failed: " + e.message);
return;
}
Debug.writeln("Preview started");
intervalID = setInterval(function () {
capturePhoto();
}, 1500);
}
I suppose it is something in the settings.

try this
var devices = await DeviceInformation.FindAllAsync(DeviceClass.VideoCapture);
await mediaCaptureMgr.InitializeAsync(new MediaCaptureInitializationSettings
{
VideoDeviceId = devices[1].Id
});

Related

Webrtc video chat not working in different networks

I am trying to implement video chat with webrtc using code from google code labs . It works fine on same network but does not work on different network. i have installed coturn on my server. I am not sure if it is working correctly.
Any one used google codelabs code and made it working?
This is the modified main.js file
'use strict';
var isChannelReady = false;
var isInitiator = false;
var isStarted = false;
var localStream;
var pc;
var remoteStream;
var turnReady;
var pcConfig = {
"iceServers":[
{'urls': 'stun:stun.l.google.com:19302'},
{"urls":["turn:78.129.167.90"],"username":"wyzturner1","credential":"wyzturnp2ss"}]
};
// Set up audio and video regardless of what devices are present.
var sdpConstraints = {
offerToReceiveAudio: true,
offerToReceiveVideo: true
};
/////////////////////////////////////////////
var room = 'foo';
// Could prompt for room name:
// room = prompt('Enter room name:');
var socket = io.connect('https://www.samplesite.com:8080');
if (room !== '') {
socket.emit('create or join', room);
console.log('Attempted to create or join room', room);
}
socket.on('created', function(room) {
console.log('Created room ' + room);
isInitiator = true;
});
socket.on('full', function(room) {
console.log('Room ' + room + ' is full');
});
socket.on('join', function (room){
console.log('Another peer made a request to join room ' + room);
console.log('This peer is the initiator of room ' + room + '!');
isChannelReady = true;
});
socket.on('joined', function(room) {
console.log('joined: ' + room);
isChannelReady = true;
});
socket.on('log', function(array) {
console.log.apply(console, array);
});
////////////////////////////////////////////////
function sendMessage(message) {
console.log('Client sending message: ', message);
socket.emit('message', message);
}
// This client receives a message
socket.on('message', function(message) {
console.log('Client received message:', message);
if (message === 'got user media') {
maybeStart();
} else if (message.type === 'offer') {
if (!isInitiator && !isStarted) {
maybeStart();
}
pc.setRemoteDescription(new RTCSessionDescription(message));
doAnswer();
} else if (message.type === 'answer' && isStarted) {
pc.setRemoteDescription(new RTCSessionDescription(message));
} else if (message.type === 'candidate' && isStarted) {
var candidate = new RTCIceCandidate({
sdpMLineIndex: message.label,
candidate: message.candidate
});
pc.addIceCandidate(candidate);
} else if (message === 'bye' && isStarted) {
handleRemoteHangup();
}
});
////////////////////////////////////////////////////
var localVideo = document.querySelector('#localVideo');
var remoteVideo = document.querySelector('#remoteVideo');
navigator.mediaDevices.getUserMedia({
audio: false,
video: true
})
.then(gotStream)
.catch(function(e) {
alert('getUserMedia() error: ' + e.name);
});
function gotStream(stream) {
console.log('Adding local stream.');
localVideo.src = window.URL.createObjectURL(stream);
localStream = stream;
sendMessage('got user media');
if (isInitiator) {
maybeStart();
}
}
var constraints = {
video: true
};
console.log('Getting user media with constraints', constraints);
//if (location.hostname !== 'localhost') {
// requestTurn(
// 'https://computeengineondemand.appspot.com/turn?username=41784574&key=4080218913'
// );
//}
function maybeStart() {
console.log('>>>>>>> maybeStart() ', isStarted, localStream, isChannelReady);
if (!isStarted && typeof localStream !== 'undefined' && isChannelReady) {
console.log('>>>>>> creating peer connection');
createPeerConnection();
isStarted = true;
console.log('isInitiator', isInitiator);
if (isInitiator) {
doCall();
}
}
}
window.onbeforeunload = function() {
sendMessage('bye');
};
/////////////////////////////////////////////////////////
function createPeerConnection() {
try {
pc = new RTCPeerConnection(pcConfig);
pc.addStream(localStream);
pc.onicecandidate = handleIceCandidate;
pc.onaddstream = handleRemoteStreamAdded;
pc.oniceconnectionstatechange = function(){
console.log('ICE state: ',pc.iceConnectionState);
}
pc.onremovestream = handleRemoteStreamRemoved;
console.log('Created RTCPeerConnnection');
} catch (e) {
console.log('Failed to create PeerConnection, exception: ' + e.message);
alert('Cannot create RTCPeerConnection object.');
return;
}
}
function handleIceCandidate(event) {
console.log('icecandidate event: ', event);
if (event.candidate) {
sendMessage({
type: 'candidate',
label: event.candidate.sdpMLineIndex,
id: event.candidate.sdpMid,
candidate: event.candidate.candidate
});
} else {
console.log('End of candidates.');
}
}
function handleRemoteStreamAdded(event) {
console.log('Remote stream added.');
remoteVideo.src = window.URL.createObjectURL(event.stream);
remoteStream = event.stream;
}
function handleCreateOfferError(event) {
console.log('createOffer() error: ', event);
}
function doCall() {
console.log('Sending offer to peer');
pc.createOffer(setLocalAndSendMessage, handleCreateOfferError);
}
function doAnswer() {
console.log('Sending answer to peer.');
pc.createAnswer().then(
setLocalAndSendMessage,
onCreateSessionDescriptionError
);
}
function setLocalAndSendMessage(sessionDescription) {
// Set Opus as the preferred codec in SDP if Opus is present.
// sessionDescription.sdp = preferOpus(sessionDescription.sdp);
pc.setLocalDescription(sessionDescription);
console.log('setLocalAndSendMessage sending message', sessionDescription);
sendMessage(sessionDescription);
}
function onCreateSessionDescriptionError(error) {
trace('Failed to create session description: ' + error.toString());
}
function requestTurn(turnURL) {
var turnExists = false;
for (var i in pcConfig.iceServers) {
if (pcConfig.iceServers[i].urls.substr(0, 5) === 'turn:') {
turnExists = true;
turnReady = true;
break;
}
}
// if (!turnExists) {
// console.log('Getting TURN server from ', turnURL);
// // No TURN server. Get one from computeengineondemand.appspot.com:
// var xhr = new XMLHttpRequest();
// xhr.onreadystatechange = function() {
// if (xhr.readyState === 4 && xhr.status === 200) {
// var turnServer = JSON.parse(xhr.responseText);
// console.log('Got TURN server: ', turnServer);
// pcConfig.iceServers.push({
// 'url': 'turn:' + turnServer.username + '#' + turnServer.turn,
// 'credential': turnServer.password
// });
// turnReady = true;
// }
// };
// xhr.open('GET', turnURL, true);
// xhr.send();
// }
}
function handleRemoteStreamAdded(event) {
console.log('Remote stream added.');
remoteVideo.src = window.URL.createObjectURL(event.stream);
remoteStream = event.stream;
}
function handleRemoteStreamRemoved(event) {
console.log('Remote stream removed. Event: ', event);
}
function hangup() {
console.log('Hanging up.');
stop();
sendMessage('bye');
}
function handleRemoteHangup() {
console.log('Session terminated.');
stop();
isInitiator = false;
}
function stop() {
isStarted = false;
// isAudioMuted = false;
// isVideoMuted = false;
pc.close();
pc = null;
}
///////////////////////////////////////////
// Set Opus as the default audio codec if it's present.
function preferOpus(sdp) {
var sdpLines = sdp.split('\r\n');
var mLineIndex;
// Search for m line.
for (var i = 0; i < sdpLines.length; i++) {
if (sdpLines[i].search('m=audio') !== -1) {
mLineIndex = i;
break;
}
}
if (mLineIndex === null) {
return sdp;
}
// If Opus is available, set it as the default in m line.
for (i = 0; i < sdpLines.length; i++) {
if (sdpLines[i].search('opus/48000') !== -1) {
var opusPayload = extractSdp(sdpLines[i], /:(\d+) opus\/48000/i);
if (opusPayload) {
sdpLines[mLineIndex] = setDefaultCodec(sdpLines[mLineIndex],
opusPayload);
}
break;
}
}
// Remove CN in m line and sdp.
sdpLines = removeCN(sdpLines, mLineIndex);
sdp = sdpLines.join('\r\n');
return sdp;
}
function extractSdp(sdpLine, pattern) {
var result = sdpLine.match(pattern);
return result && result.length === 2 ? result[1] : null;
}
// Set the selected codec to the first in m line.
function setDefaultCodec(mLine, payload) {
var elements = mLine.split(' ');
var newLine = [];
var index = 0;
for (var i = 0; i < elements.length; i++) {
if (index === 3) { // Format of media starts from the fourth.
newLine[index++] = payload; // Put target payload to the first.
}
if (elements[i] !== payload) {
newLine[index++] = elements[i];
}
}
return newLine.join(' ');
}
// Strip CN from sdp before CN constraints is ready.
function removeCN(sdpLines, mLineIndex) {
var mLineElements = sdpLines[mLineIndex].split(' ');
// Scan from end for the convenience of removing an item.
for (var i = sdpLines.length - 1; i >= 0; i--) {
var payload = extractSdp(sdpLines[i], /a=rtpmap:(\d+) CN\/\d+/i);
if (payload) {
var cnPos = mLineElements.indexOf(payload);
if (cnPos !== -1) {
// Remove CN payload from m line.
mLineElements.splice(cnPos, 1);
}
// Remove CN line in sdp
sdpLines.splice(i, 1);
}
}
sdpLines[mLineIndex] = mLineElements.join(' ');
return sdpLines;
}

StreamTrack's readyState is getting changed to ended, just before playing the stream (MediaStream - MediaStreamTrack - WebRTC)

The jsfiddle (https://jsfiddle.net/kalyansai99/mm1b74uy/22/) contains code where the user can toggle between front and back camera of the mobile.
In few mobiles its working fine (Moto g5 plus, Moto E3 and so on - Chrome Browser) and in few mobiles (Mi Redimi Note 4 - Chrome Browser) when I am switching to back camera, initially the stream is loading with a track of "readyState" as "live". But when i am about to play the stream in video player, the "readyState" is getting changed to "ended" and black screen is been shown on the video tag.
Not sure whats happening. Any clues?
JSFiddle Code
var player = document.getElementById('player');
var flipBtn = document.getElementById('flipBtn');
var deviceIdMap = {};
var front;
var constraints = {
audio: false,
video: {
frameRate: 1000
}
};
var gotDevices = function (deviceList) {
var length = deviceList.length;
console.log(deviceList);
for (var i = 0; i < length; i++) {
var deviceInfo = deviceList[i];
if (deviceInfo.kind === 'videoinput') {
if (deviceInfo.label.indexOf('front') !== -1) {
deviceIdMap.front = deviceInfo.deviceId;
} else if (deviceInfo.label.indexOf('back') !== -1) {
deviceIdMap.back = deviceInfo.deviceId;
}
}
}
if (deviceIdMap.front) {
constraints.video.deviceId = {exact: deviceIdMap.front};
front = true;
} else if (deviceIdMap.back) {
constraints.video.deviceId = {exact: deviceIdMap.back};
front = false;
}
console.log('deviceIdMap - ', deviceIdMap);
};
var handleError = function (error) {
console.log('navigator.getUserMedia error: ', error);
};
function handleSuccess(stream) {
window.stream = stream;
// this is a video track as there is no audio track
console.log("Track - ", window.stream.getTracks()[0]);
console.log('Ready State - ', window.stream.getTracks()[0].readyState);
if (window.URL) {
player.src = window.URL.createObjectURL(stream);
} else {
player.src = stream;
}
player.onloadedmetadata = function (e) {
console.log('Ready State - 3', window.stream.getTracks()[0].readyState);
player.play();
console.log('Ready State - 4', window.stream.getTracks()[0].readyState);
}
console.log('Ready State - 2', window.stream.getTracks()[0].readyState);
}
navigator.mediaDevices.enumerateDevices().then(gotDevices).catch(handleError);
flipBtn.addEventListener('click', function () {
if (window.stream) {
window.stream.getTracks().forEach(function(track) {
track.stop();
});
}
if (front) {
constraints.video.deviceId = {exact: deviceIdMap.back};
} else {
constraints.video.deviceId = {exact: deviceIdMap.front};
}
front = !front;
navigator.getUserMedia(constraints, handleSuccess, handleError);
}, false);
console.log(constraints);
navigator.getUserMedia(constraints, handleSuccess, handleError);
#player {
width: 320px;
}
#flipBtn {
width: 150px;
height: 50px;
}
<video id="player" autoplay></video>
<div>
<button id="flipBtn">
Flip Camera
</button>
</div>
Replace track.stop() to track.enabled=false and when adding track to the stream, enable it back using track.enabled=true
The MediaStream.readyState property is changed to "ended" when we stop the track and can never be used again. Therefore its not wise to use stop. For more reference:
https://developer.mozilla.org/en-US/docs/Web/API/MediaStreamTrack/readyState
https://developer.mozilla.org/en-US/docs/Web/API/MediaStreamTrack/stop

YouTube OnStateChange with multiple players on the same page

I have multiple YouTube players on a single page inside a banner slider and I want to use the YouTube Player API to control them and do other stuff based on the state of the video's. I have the code below which I'm pretty sure of used to work fine where any state changes where registered. But it doesnt work for me anymore. The YouTube object is still there and I can use it to start and stop a video but the onStateChange event never gets triggered. What is wrong with this code?
var YT_ready = (function() {
var onReady_funcs = [],
api_isReady = false;
return function(func, b_before) {
if (func === true) {
api_isReady = true;
for (var i = 0; i < onReady_funcs.length; i++) {
onReady_funcs.shift()();
}
}
else if (typeof func == "function") {
if (api_isReady) func();
else onReady_funcs[b_before ? "unshift" : "push"](func);
}
}
})();
function onYouTubePlayerAPIReady() {
YT_ready(true);
}
var players_homepage = {};
YT_ready(function() {
$("li.video iframe.youtube").each(function(event) {
var frameID_homepage = $(this).attr('id');
if (frameID_homepage) {
players_homepage[frameID_homepage] = new YT.Player(frameID_homepage, {
events: {
'onStateChange': onPlayerStateChange_homepage
}
});
}
});
});
(function(){
var tag = document.createElement('script');
tag.src = "//www.youtube.com/iframe_api";
var firstScriptTag = document.getElementsByTagName('script')[0];
firstScriptTag.parentNode.insertBefore(tag, firstScriptTag);
})();
function onPlayerStateChange_homepage(event) {
if (event.data === 0) {
// do something on end
} else if (event.data === 1) {
// do something on play
} else if (event.data === 2) {
// do something on pause
}
}
function pauseVideo_homepage(previousVideo) {
players_homepage[previousVideo].pauseVideo();
}
function playVideo_homepage(currentVideo) {
players_homepage[currentVideo].playVideo();
}

How to choose input video device for webrtc?

I am studying webRTC application.
My reference is this software
apprtc
https://code.google.com/p/webrtc/source/browse/trunk/samples/js/apprtc/
demo
https://apprtc.appspot.com/
My computer has bult-in video device and apprtc uses this video device .
However I want to use USB-video camera instead.
I am searching the way to change input video devices.
But I couldn't find any clue in source files.
does anyone has information?
On Chrome:
chrome://settings/content/camera
chrome://settings/content/microphone
On Firefox: media.navigator.permission.disabled=false
Try this demo which is capturing all audio/video input devices:
https://www.webrtc-experiment.com/demos/MediaStreamTrack.getSources.html
You can capture any "specific" device using same API.
Edited at March 01, 2014:
MediaStreamTrack.getSources(function (media_sources) {
for (var i = 0; i < media_sources.length; i++) {
var media_source = media_sources[i];
var constraints = {};
// if audio device
if (media_source.kind == 'audio') {
constraints.audio = {
optional: [{
sourceId: media_source.id
}]
};
}
// if video device
if (media_source.kind == 'video') {
constraints.video = {
optional: [{
sourceId: media_source.id
}]
};
}
// invoke getUserMedia to capture this device
navigator.webkitGetUserMedia(constraints, function (stream) {
console.log(stream.id, stream);
}, console.error);
}
});
Updated at Sep 05, 2015:
Now Microsoft Edge, Chrome 44+, Firefox 38+, all these browsers are supporting navigator.mediaDevices.enumerateDevices API.
Here is a reusable script that provides cross-browser shim for all these media-sources APIs. It will work even in old-chrome (43 and older) (even on Android devices):
if (navigator.mediaDevices && navigator.mediaDevices.enumerateDevices) {
// Firefox 38+, Microsoft Edge, and Chrome 44+ seems having support of enumerateDevices
navigator.enumerateDevices = function(callback) {
navigator.mediaDevices.enumerateDevices().then(callback);
};
}
function getAllAudioVideoDevices(successCallback, failureCallback) {
if (!navigator.enumerateDevices && window.MediaStreamTrack && window.MediaStreamTrack.getSources) {
navigator.enumerateDevices = window.MediaStreamTrack.getSources.bind(window.MediaStreamTrack);
}
if (!navigator.enumerateDevices && navigator.mediaDevices.enumerateDevices) {
navigator.enumerateDevices = navigator.mediaDevices.enumerateDevices.bind(navigator);
}
if (!navigator.enumerateDevices) {
failureCallback(null, 'Neither navigator.mediaDevices.enumerateDevices NOR MediaStreamTrack.getSources are available.');
return;
}
var allMdiaDevices = [];
var allAudioDevices = [];
var allVideoDevices = [];
var audioInputDevices = [];
var audioOutputDevices = [];
var videoInputDevices = [];
var videoOutputDevices = [];
navigator.enumerateDevices(function(devices) {
devices.forEach(function(_device) {
var device = {};
for (var d in _device) {
device[d] = _device[d];
}
// make sure that we are not fetching duplicate devics
var skip;
allMdiaDevices.forEach(function(d) {
if (d.id === device.id) {
skip = true;
}
});
if (skip) {
return;
}
// if it is MediaStreamTrack.getSources
if (device.kind === 'audio') {
device.kind = 'audioinput';
}
if (device.kind === 'video') {
device.kind = 'videoinput';
}
if (!device.deviceId) {
device.deviceId = device.id;
}
if (!device.id) {
device.id = device.deviceId;
}
if (!device.label) {
device.label = 'Please invoke getUserMedia once.';
}
if (device.kind === 'audioinput' || device.kind === 'audio') {
audioInputDevices.push(device);
}
if (device.kind === 'audiooutput') {
audioOutputDevices.push(device);
}
if (device.kind === 'videoinput' || device.kind === 'video') {
videoInputDevices.push(device);
}
if (device.kind.indexOf('audio') !== -1) {
allAudioDevices.push(device);
}
if (device.kind.indexOf('video') !== -1) {
allVideoDevices.push(device);
}
// there is no 'videoouput' in the spec.
// so videoOutputDevices will always be [empty]
allMdiaDevices.push(device);
});
if (successCallback) {
successCallback({
allMdiaDevices: allMdiaDevices,
allVideoDevices: allVideoDevices,
allAudioDevices: allAudioDevices,
videoInputDevices: videoInputDevices,
audioInputDevices: audioInputDevices,
audioOutputDevices: audioOutputDevices
});
}
});
}
Here is how to use above reusable cross-browser shim:
getAllAudioVideoDevices(function(result) {
if (result.allMdiaDevices.length) {
console.debug('Number of audio/video devices available:', result.allMdiaDevices.length);
}
if (result.allVideoDevices.length) {
console.debug('Number of video devices available:', result.allVideoDevices.length);
}
if (result.allAudioDevices.length) {
console.debug('Number of audio devices available:', result.allAudioDevices.length);
}
if (result.videoInputDevices.length) {
console.debug('Number of video-input devices available:', result.videoInputDevices.length);
}
if (result.audioInputDevices.length) {
console.debug('Number of audio-input devices available:', result.audioInputDevices.length);
}
if (result.audioOutputDevices.length) {
console.debug('Number of audio-output devices available:', result.audioOutputDevices.length);
}
if (result.allMdiaDevices.length && result.allMdiaDevices[0].label === 'Please invoke getUserMedia once.') {
console.warn('It seems you did not invoke navigator-getUserMedia before using these API.');
}
console.info('All audio input devices:');
result.audioInputDevices.forEach(function(device) {
console.log('Audio input device id:', device.id, 'Device label:', device.label);
});
console.info('All audio output devices:');
result.audioOutputDevices.forEach(function(device) {
console.log('Audio output device id:', device.id, 'Device label:', device.label);
});
console.info('All video input devices:');
result.videoInputDevices.forEach(function(device) {
console.log('Video input device id:', device.id, 'Device label:', device.label);
});
}, function(error) {
alert(error);
});
It turns out that Chrome does support MediaStreamTrack API which allows you to do this. In Firefox this API is still experimental. Here is the Chrome implementation:
if (typeof MediaStreamTrack === 'undefined'){
alert('This browser does not support MediaStreamTrack.\n\nTry Chrome Canary.');
} else {
MediaStreamTrack.getSources( onSourcesAcquired);
}
function onSourcesAcquired(sources) {
for (var i = 0; i != sources.length; ++i) {
var source = sources[i];
// source.id -> DEVICE ID
// source.label -> DEVICE NAME
// source.kind = "audio" OR "video"
// TODO: add this to some datastructure of yours or a selection dialog
}
}
....
And then when calling getUserMedia, specify the id in the constraints:
var constraints = {
audio: {
optional: [{sourceId: selected_audio_source_id}]
},
video: {
optional: [{sourceId: selected_video_source_id}]
}
};
navigator.getUserMedia(constraints, onSuccessCallback, onErrorCallback);
It sounds to me you are looking for facingMode. You can check it out in this document:
http://www.w3.org/TR/2013/WD-mediacapture-streams-20130516/#idl-def-AllVideoCapabilities
Not sure how well it is supported yet though.

WebRTC + JSEP + Google Channel API - cannot receive remoteStream

I've been trying to get this works, but I don't know what's wrong, can you guys help me? I tried WebRTC code with modification like this.
Both caller and callee enter the web
Web will create channel based on their username
Caller clicks the call button and sends message to spesific channel to some user's username. When caller clicks the call button, he then creates peerConnection and adds localStream
Callee will receive message, and the process goes on like WebRTC sample code. When callee receives an offer, he then creates peerConnection and adds localStream, then creates and sends answer
My code goes like this
var my_username = '{{ current_username }}';
var friend;
var localVideo;
var remoteVideo;
var localStream;
var remoteStream;
var channel;
var channelReady = false;
var pc;
var socket;
var started = false;
// Set up audio and video regardless of what devices are present.
var mediaConstraints = {'mandatory': {
'OfferToReceiveAudio':true,
'OfferToReceiveVideo':true }};
var isVideoMuted = false;
var isAudioMuted = false;
function choiceFriendInitialize() {
var choice_visible = false;
$('ul#friendlist > li').click(function(e) {
$(this).css('background-color', '#808080');
friend = $(this).text();
var choice = $('ul#choice');
choice_visible = true;
choice.css('display', 'inline-block');
choice.css('top', e.pageY);
choice.css('left', e.pageX);
});
// trigger call from here
$('li#call').click(function() {
$('ul#friendlist > li').css('background-color', 'transparent');
$('ul#choice').css('display', 'none');
choice_visible = false;
// call
maybeStart();
doCall();
});
$('li#unfriend').click(function() {
$('ul#friendlist > li').css('background-color', 'transparent');
$('ul#choice').css('display', 'none');
choice_visible = false;
});
$("body").mouseup(function(){
if (choice_visible) {
$('ul#friendlist > li').css('background-color', 'transparent');
$('ul#choice').css('display', 'none');
choice_visible = false;
}
});
}
function initialize() {
console.log("Initializing..");
choiceFriendInitialize();
localVideo = document.getElementById("localVideo");
remoteVideo = document.getElementById("remoteVideo");
openChannel();
doGetUserMedia();
}
function openChannel() {
console.log("Opening channel.");
var channel = new goog.appengine.Channel('{{ token }}');
var handler = {
'onopen': onChannelOpened,
'onmessage': onChannelMessage,
'onerror': onChannelError,
'onclose': onChannelClosed
};
socket = channel.open(handler);
}
function doGetUserMedia() {
// Call into getUserMedia via the polyfill (adapter.js).
var constraints = {"mandatory": {}, "optional": []};
try {
getUserMedia({'audio':true, 'video':constraints}, onUserMediaSuccess, onUserMediaError);
console.log("Requested access to local media with mediaConstraints:\n" + " \"" + JSON.stringify(constraints) + "\"");
} catch (e) {
alert("getUserMedia() failed. Is this a WebRTC capable browser?");
console.log("getUserMedia failed with exception: " + e.message);
}
}
function createPeerConnection() {
var pc_config = {"iceServers": [{"url": "stun:stun.l.google.com:19302"}]};
try {
// Create an RTCPeerConnection via the polyfill (adapter.js).
pc = new RTCPeerConnection(pc_config);
pc.onicecandidate = onIceCandidate;
console.log("Created RTCPeerConnnection with config:\n" + " \"" + JSON.stringify(pc_config) + "\".");
} catch (e) {
console.log("Failed to create PeerConnection, exception: " + e.message);
alert("Cannot create RTCPeerConnection object; WebRTC is not supported by this browser.");
return;
}
pc.onconnecting = onSessionConnecting;
pc.onopen = onSessionOpened;
pc.onaddstream = onRemoteStreamAdded;
pc.onremovestream = onRemoteStreamRemoved;
}
function maybeStart() {
if (!started && localStream && channelReady) {
console.log("Creating PeerConnection.");
createPeerConnection();
console.log("Adding local stream.");
pc.addStream(localStream);
started = true;
// Caller initiates offer to peer.
//if (initiator)
//doCall();
}
}
function doCall() {
console.log("Sending offer to peer.");
pc.createOffer(setLocalAndSendMessage, null, mediaConstraints);
}
function doAnswer() {
console.log("Sending answer to peer.");
pc.createAnswer(setLocalAndSendMessage, null, mediaConstraints);
}
function setLocalAndSendMessage(sessionDescription) {
// Set Opus as the preferred codec in SDP if Opus is present.
sessionDescription.sdp = preferOpus(sessionDescription.sdp);
pc.setLocalDescription(sessionDescription);
sendMessage({from: my_username, to: friend}, sessionDescription);
}
function sendMessage(client, message) {
console.log('C->S: ' + JSON.stringify(message));
var xhr = new XMLHttpRequest();
xhr.open('POST', '/send', true);
xhr.setRequestHeader("Content-type","application/json");
var msgString = {send_info: client, data_message: message};
xhr.send(JSON.stringify(msgString));
}
function processSignalingMessage(message) {
var msg = JSON.parse(message);
var data_message = msg.data_message;
var send_info = msg.send_info;
if (data_message.type === 'offer') {
// Callee creates PeerConnection
if (!started)
maybeStart();
pc.setRemoteDescription(new RTCSessionDescription(data_message));
friend = send_info.from;
doAnswer();
} else if (data_message.type === 'answer' && started) {
pc.setRemoteDescription(new RTCSessionDescription(data_message));
} else if (data_message.type === 'candidate' && started) {
var candidate = new RTCIceCandidate({sdpMLineIndex:data_message.label, candidate:data_message.candidate});
pc.addIceCandidate(candidate);
} else if (data_message.type === 'bye' && started) {
onRemoteHangup();
}
}
function onChannelOpened() {
console.log('Channel opened.');
channelReady = true;
}
function onChannelMessage(message) {
console.log('S->C: ' + message.data);
processSignalingMessage(message.data);
}
function onChannelError() {
console.log('Channel error.');
}
function onChannelClosed() {
console.log('Channel closed.');
}
function onUserMediaSuccess(stream) {
console.log("User has granted access to local media.");
// Call the polyfill wrapper to attach the media stream to this element.
attachMediaStream(localVideo, stream);
localVideo.style.opacity = 1;
localStream = stream;
// Caller creates PeerConnection.
//if (initiator) maybeStart();
}
function onUserMediaError(error) {
console.log("Failed to get access to local media. Error code was " + error.code);
alert("Failed to get access to local media. Error code was " + error.code + ".");
}
function onIceCandidate(event) {
if (event.candidate) {
sendMessage({from: my_username, to: friend}, {type: 'candidate',
label: event.candidate.sdpMLineIndex,
id: event.candidate.sdpMid,
candidate: event.candidate.candidate});
} else {
console.log("End of candidates.");
}
}
function onSessionConnecting(message) {
console.log("Session connecting.");
}
function onSessionOpened(message) {
console.log("Session opened.");
}
function onRemoteStreamAdded(event) {
console.log("Remote stream added.");
attachMediaStream(remoteVideo, event.stream);
remoteStream = event.stream;
waitForRemoteVideo();
}
function onRemoteStreamRemoved(event) {
console.log("Remote stream removed.");
}
function onHangup() {
console.log("Hanging up.");
transitionToDone();
stop();
// will trigger BYE from server
socket.close();
}
function onRemoteHangup() {
console.log('Session terminated.');
transitionToWaiting();
stop();
}
function stop() {
started = false;
isAudioMuted = false;
isVideoMuted = false;
pc.close();
pc = null;
}
function waitForRemoteVideo() {
if (remoteStream.videoTracks.length === 0 || remoteVideo.currentTime > 0) {
console.log('ada remote stream');
transitionToActive();
} else {
console.log('ga ada remote stream');
setTimeout(waitForRemoteVideo, 100);
}
}
function transitionToActive() {
remoteVideo.style.opacity = 1;
}
function transitionToWaiting() {
remoteVideo.style.opacity = 0;
}
function transitionToDone() {
localVideo.style.opacity = 0;
remoteVideo.style.opacity = 0;
}
function toggleVideoMute() {
if (localStream.videoTracks.length === 0) {
console.log("No local video available.");
return;
}
if (isVideoMuted) {
for (i = 0; i < localStream.videoTracks.length; i++) {
localStream.videoTracks[i].enabled = true;
}
console.log("Video unmuted.");
} else {
for (i = 0; i < localStream.videoTracks.length; i++) {
localStream.videoTracks[i].enabled = false;
}
console.log("Video muted.");
}
isVideoMuted = !isVideoMuted;
}
function toggleAudioMute() {
if (localStream.audioTracks.length === 0) {
console.log("No local audio available.");
return;
}
if (isAudioMuted) {
for (i = 0; i < localStream.audioTracks.length; i++) {
localStream.audioTracks[i].enabled = true;
}
console.log("Audio unmuted.");
} else {
for (i = 0; i < localStream.audioTracks.length; i++){
localStream.audioTracks[i].enabled = false;
}
console.log("Audio muted.");
}
isAudioMuted = !isAudioMuted;
}
setTimeout(initialize, 1);
// Send BYE on refreshing(or leaving) a demo page
// to ensure the room is cleaned for next session.
window.onbeforeunload = function() {
sendMessage({from: my_username, to: friend}, {type: 'bye'});
//Delay 100ms to ensure 'bye' arrives first.
setTimeout(function(){}, 100);
}
// Ctrl-D: toggle audio mute; Ctrl-E: toggle video mute.
// On Mac, Command key is instead of Ctrl.
// Return false to screen out original Chrome shortcuts.
document.onkeydown = function() {
if (navigator.appVersion.indexOf("Mac") != -1) {
if (event.metaKey && event.keyCode === 68) {
toggleAudioMute();
return false;
}
if (event.metaKey && event.keyCode === 69) {
toggleVideoMute();
return false;
}
} else {
if (event.ctrlKey && event.keyCode === 68) {
toggleAudioMute();
return false;
}
if (event.ctrlKey && event.keyCode === 69) {
toggleVideoMute();
return false;
}
}
}
// Set Opus as the default audio codec if it's present.
function preferOpus(sdp) {
var sdpLines = sdp.split('\r\n');
// Search for m line.
for (var i = 0; i < sdpLines.length; i++) {
if (sdpLines[i].search('m=audio') !== -1) {
var mLineIndex = i;
break;
}
}
if (mLineIndex === null)
return sdp;
// If Opus is available, set it as the default in m line.
for (var i = 0; i < sdpLines.length; i++) {
if (sdpLines[i].search('opus/48000') !== -1) {
var opusPayload = extractSdp(sdpLines[i], /:(\d+) opus\/48000/i);
if (opusPayload)
sdpLines[mLineIndex] = setDefaultCodec(sdpLines[mLineIndex], opusPayload);
break;
}
}
// Remove CN in m line and sdp.
sdpLines = removeCN(sdpLines, mLineIndex);
sdp = sdpLines.join('\r\n');
return sdp;
}
function extractSdp(sdpLine, pattern) {
var result = sdpLine.match(pattern);
return (result && result.length == 2)? result[1]: null;
}
// Set the selected codec to the first in m line.
function setDefaultCodec(mLine, payload) {
var elements = mLine.split(' ');
var newLine = new Array();
var index = 0;
for (var i = 0; i < elements.length; i++) {
if (index === 3) // Format of media starts from the fourth.
newLine[index++] = payload; // Put target payload to the first.
if (elements[i] !== payload)
newLine[index++] = elements[i];
}
return newLine.join(' ');
}
// Strip CN from sdp before CN constraints is ready.
function removeCN(sdpLines, mLineIndex) {
var mLineElements = sdpLines[mLineIndex].split(' ');
// Scan from end for the convenience of removing an item.
for (var i = sdpLines.length-1; i >= 0; i--) {
var payload = extractSdp(sdpLines[i], /a=rtpmap:(\d+) CN\/\d+/i);
if (payload) {
var cnPos = mLineElements.indexOf(payload);
if (cnPos !== -1) {
// Remove CN payload from m line.
mLineElements.splice(cnPos, 1);
}
// Remove CN line in sdp
sdpLines.splice(i, 1);
}
}
sdpLines[mLineIndex] = mLineElements.join(' ');
return sdpLines;
}
When it comes to waitForRemoteVideo, the function calls the else condition. But the blob url for remoteVideo exists.
It's funny that I've been searching for the error and re-writing the code for like three times, and suddenly after posted my question here, I realized my mistake.
Here in the function processSignallingMessage..
if (data_message.type === 'offer') {
// Callee creates PeerConnection
if (!started)
maybeStart();
pc.setRemoteDescription(new RTCSessionDescription(data_message));
friend = send_info.from;
doAnswer();
} else ....
Should be like this:
if (data_message.type === 'offer') {
friend = send_info.from;
// Callee creates PeerConnection
if (!started)
maybeStart();
pc.setRemoteDescription(new RTCSessionDescription(data_message));
doAnswer();
} else ....
because callee need variable friend to be filled before sending candidate message.