rtcmulticonnection safari apple mac - safari

I have my own rtcmulticonnection server up and running
I found this plugin https://github.com/muaz-khan/PluginRTC
But it doesn't seems to work
I get WebRTC 1.0 (RTCPeerConnection) API are NOT available in this
browser.
here my code (working in chrome and firefox but not in safari)
<div id="videos-container"></div>
<script src="js/rmc3.min.js').'"></script>
<script src="js/socket.io.js').'"></script>
<script src="js/Plugin.EveryWhere.js').'"></script>
<script>
var connection = new RTCMultiConnection();
connection.socketURL = "https://__MyDomain.com:9000/";
var roomid = "main_room";
connection.session = {
audio: true,
video: true
};
connection.sdpConstraints.mandatory = {
OfferToReceiveAudio: true,
OfferToReceiveVideo: true
};
var videosContainer = document.getElementById("videos-container");
connection.onstream = function(event) {
videosContainer.appendChild(event.mediaElement);
setTimeout(function() { event.mediaElement.play(); }, 5000);
};
connection.openOrJoin(roomid);
// *** Plugin.EveryWhere.js [BEGIN]
var Plugin = {};
window.onPluginRTCInitialized = function(pluginRTCObject) {
Plugin = pluginRTCObject;
MediaStreamTrack = Plugin.MediaStreamTrack;
RTCPeerConnection = Plugin.RTCPeerConnection;
RTCIceCandidate = Plugin.RTCIceCandidate;
RTCSessionDescription = Plugin.RTCSessionDescription;
};
if (!!window.PluginRTC) window.onPluginRTCInitialized(window.PluginRTC);
connection.onaddstream = function(event) {
if (isPluginRTC) {
var mediaElement = document.createElement("videos-container");
var body = (document.body || document.documentElement);
body.insertBefore(mediaElement, body.firstChild);
setTimeout(function() {
Plugin.attachMediaStream(mediaElement, event.stream);
// here you can append "mediaElement" to specific container
// specificContainer.appendChild(mediaElement);
}, 3000);
} else {
// do chrome/Firefox relevant stuff with "event.stream"
}
};
// *** Plugin.EveryWhere.js [END]
</script>
Is someone was able to make the plugin work ?
If yes how or what step did I missed ?
Thanks

RTCMultiConnection-v3.2.95 now supports IE/Safari:
https://github.com/muaz-khan/RTCMultiConnection/releases/tag/3.2.95
To support Safari/IE, please modify Gruntfile.js#L30 to enable dev/Plugin.EveryWhere.js.
Then don't forget calling grunt to recompile the codes.
Now set connection.trickleIce=false in your HTML file. And now video-conferencing/chat/etc. will work both among chrome/firefox/safari/IE.
Please install PluginRTC.dmg or PluginRTC.exe.
Additional notes:
You've to initiate calls from Safari/IE, because currently Safari/IE can not create answers. i.e. IE/Safari MUST call connection.open instead of calling connection.join.
Set this in your HTML: connection.processSdp = function(sdp) {return sdp;}; to prevent SDP conflicts/errors.

Related

No sound in safari using Web Audio API webkitAudioContext()

I am trying to use the Web Audio API to play sound in my React application.
It's currently playing sound in all browsers except Safari v12.1.
I am aware Safari has restrictions on autoplay and requires user interaction to play sound, so I have a play button which calls the _play() function:
_play = (url, index) => {
this._getData(url);
this.source.start(index)
}
It's calling the _getData() function which looks like this:
_getData(url) {
this.source = this.audioContext.createBufferSource();
var request = new XMLHttpRequest();
request.open('GET', url, true);
request.responseType = 'arraybuffer';
request.onload = () => {
var audioData = request.response;
console.log(this.audioContext)
this.audioContext.decodeAudioData(audioData, buffer => {
this.source.buffer = buffer;
this.source.connect(this.audioContext.destination);
},
function(e){ console.log("Error with decoding audio data" + e.err); });
}
request.send();
}
this.audioContext is created in the component constructor using:
this.audioContext = new (window.AudioContext || window.webkitAudioContext)();
The console.log(this.audioContext) inside the request.onload outputs this before pressing play:
...and this after pressing play:
But no sound is playing (in Safari).
What am I doing wrong?
I think the problem that you ran into is that Safari does not allow you to modify the buffer anymore once you called start().
The following page does for example play a second of noise in Safari when you press the play button.
<!DOCTYPE html>
<html>
<body>
<button id="play-button">play</button>
<script>
document
.getElementById('play-button')
.addEventListener('click', () => {
const audioContext = new AudioContext();
const audioBufferSourceNode = audioContext.createBufferSource();
const sampleRate = audioContext.sampleRate;
const audioBuffer = audioContext.createBuffer(1, sampleRate, sampleRate);
const channelData = audioBuffer.getChannelData(0);
for (let i = 0; i < sampleRate; i += 1) {
channelData[i] = (Math.random() * 2) - 1;
}
audioBufferSourceNode.buffer = audioBuffer;
audioBufferSourceNode.connect(audioContext.destination);
audioBufferSourceNode.start(audioContext.currentTime);
});
</script>
</body>
</html>
But it doesn't work anymore if you modify it slightly. When starting the audioBufferSourceNode before assigning the buffer there will be no output anymore.
audioBufferSourceNode.connect(audioContext.destination);
audioBufferSourceNode.start(audioContext.currentTime);
audioBufferSourceNode.buffer = audioBuffer;
I guess you can get your code working by waiting for the HTTP response and the audio decoding before you start the source. Make sure to execute this.source.buffer = buffer before you execute this.source.start(index).
I hope this helps.

Share screen using getScreenId.js in WebRTC for two peers

I am trying to implement share screen function in webrtc video conferencing. From suggestion, I am now following muaz-khan's solution using https://www.webrtc-experiment.com/getScreenId/ . I can easily capture the application images of one peer, and replace the video stream with the capture stream. But it is a video conferencing experiment, so two browsers need to video conference with each other. For example, browser 1, has video streams A (local video), video streams B (remote video); browser 2 has video streams B (local video), video streams A (remote video). So when I am in browser 1 and trying to share the screen, the share screen stream should replace the local video in browser 1, and remote video in browser 2.
But right now, I can only make the share screen replace the local video in browser 1, browser 2 doesn't have any changes, cann't see any changes in its remote video (which is the local video in browser 1). I don't know how to trigger the changes in browser 2 as well. do i need to signal the share screen streams to server? and change the remote stream accordingly?
Here is my code in javascript:
$(function() {
var brokerController, ws, webRTC, localid;
// ws = new XSockets.WebSocket("wss://rtcplaygrouund.azurewebsites.net:443", ["connectionbroker"], {
ws = new XSockets.WebSocket("ws://localhost:4502", ["connectionbroker"], {
ctx: "152300ed-4d84-4e72-bc99-965052dc1e95"
});
var addRemoteVideo = function(peerId,mediaStream) {
var remoteVideo = document.createElement("video");
remoteVideo.setAttribute("autoplay", "true");
remoteVideo.setAttribute("rel",peerId);
attachMediaStream(remoteVideo, mediaStream);
remoteVideo.setAttribute("class", "col-md-3");
remoteVideo.setAttribute("height", $( document ).height() * 0.3);
remoteVideo.setAttribute("id", 'remoteVideo');
$("#videoscreen").append(remoteVideo);
};
var onConnectionLost = function (remotePeer) {
console.log("onconnectionlost");
var peerId = remotePeer.PeerId;
var videoToRemove = $("video[rel='" + peerId + "']");
videoToRemove.remove();
};
var oncConnectionCreated = function() {
console.log("oncconnectioncreated", arguments);
}
var onGetUerMedia = function(stream) {
console.log("Successfully got some userMedia , hopefully a goat will appear..");
webRTC.connectToContext(); // connect to the current context?
};
var onRemoteStream = function (remotePeer) {
addRemoteVideo(remotePeer.PeerId, remotePeer.stream);
console.log("Opps, we got a remote stream. lets see if its a goat..");
};
var onLocalStream = function(mediaStream) {
console.log("Got a localStream", mediaStream.id);
localid = mediaStream.id;
console.log("check this id: meadiastram id ", mediaStream.id);
var video = document.createElement("video");
video.setAttribute("height", "100%");
video.setAttribute("autoplay", "true");
video.setAttribute("id", "localvideo");
video.setAttribute("name", mediaStream.id);
attachMediaStream(video, mediaStream);
$("#videoscreen").append(video);
$('#share').click(function() {
getScreenId(function (error, sourceId, screen_constraints) {
navigator.getUserMedia = navigator.mozGetUserMedia || navigator.webkitGetUserMedia;
navigator.getUserMedia(screen_constraints, function (stream) {
$('#localvideo').attr('src', URL.createObjectURL(stream));
}, function (error) {
console.error(error);
});
});
});
};
var onContextCreated = function(ctx) {
console.log("RTC object created, and a context is created - ", ctx);
webRTC.getUserMedia(webRTC.userMediaConstraints.hd(true), onGetUerMedia, onError);
};
var onOpen = function() {
console.log("Connected to the brokerController - 'connectionBroker'");
webRTC = new XSockets.WebRTC(this);
webRTC.onlocalstream = onLocalStream;
webRTC.oncontextcreated = onContextCreated;
webRTC.onconnectioncreated = oncConnectionCreated;
webRTC.onconnectionlost = onConnectionLost;
webRTC.onremotestream = onRemoteStream;
};
var onConnected = function() {
console.log("connection to the 'broker' server is established");
console.log("Try get the broker controller form server..");
brokerController = ws.controller("connectionbroker");
brokerController.onopen = onOpen;
};
ws.onconnected = onConnected;
});
I am using xsocket as the server, and the codes for click share and change the local stream with the share screen streams are just very simple as this:
$('#share').click(function() {
getScreenId(function (error, sourceId, screen_constraints) {
navigator.getUserMedia = navigator.mozGetUserMedia || navigator.webkitGetUserMedia;
navigator.getUserMedia(screen_constraints, function (stream) {
$('#localvideo').attr('src', URL.createObjectURL(stream));
}, function (error) {
console.error(error);
});
});
Any help or suggestion would be grateful.
Thanks for pointing out the other post: How to addTrack in MediaStream in WebRTC, but I don't think they are the same. And also I am not sure how to renegotiate the remote connection in this case.
Xsocket.webrtc.js file for webrtc connection:
https://github.com/XSockets/XSockets.WebRTC/blob/master/src/js/XSockets.WebRTC.latest.js
How I could I renegotiate the remote connection in this case?
I figured out a work around solution by myself for this question, do not replace the local stream with the sharescreen stream, instead remove the old local stream from local div, then add the new sharescreen stream to local div. In the meantime, send the old local stream id by datachanel to the other peer, and remove that old remote video as well.
The most important thing is reflesh the streams (renegotiation), then sharescreen stream would display in remote peer.
Code:
$('#share').click(function() {
getScreenId(function (error, sourceId, screen_constraints) {
navigator.getUserMedia = navigator.mozGetUserMedia || navigator.webkitGetUserMedia;
navigator.getUserMedia(screen_constraints, function (stream) {
webRTC.removeStream(webRTC.getLocalStreams()[0]);
var id = $('#localvideo').attr('name');
$('#localvideo').remove();
brokerController.invoke('updateremotevideo', id);
webRTC.addLocalStream(stream);
webRTC.getRemotePeers().forEach(function (p) {
webRTC.refreshStreams(p);
});
}, function (error) {
console.error(error);
});
});
});
after get the command to remove that old video stream from the server:
brokerController.on('updateremotevideo', function(streamid){
$(document.getElementById(streamid)).remove();
});
This solution works for me. Although if only like to replace the local video stream with share screen stream, we need to re create the offer with sdp, and send sdp to remote peer. It is more complicated.
getScreenId(function (error, sourceId, screen_constraints) {
navigator.getUserMedia = navigator.mozGetUserMedia || navigator.webkitGetUserMedia;
navigator.getUserMedia(screen_constraints, function (stream) {
navigator.getUserMedia({audio: true}, function (audioStream) {
stream.addTrack(audioStream.getAudioTracks()[0]);
var mediaRecorder = new MediaStreamRecorder(stream);
mediaRecorder.mimeType = 'video/mp4'
mediaRecorder.stream = stream;
self.setState({recorder: mediaRecorder, startRecord: true, shareVideo: true, pauseRecord: false, resumeRecord: false, stopRecord: false, downloadRecord: false, updateRecord: false});
document.querySelector('video').src = URL.createObjectURL(stream);
var video = document.getElementById('screen-video')
if (video) {
video.src = URL.createObjectURL(stream);
video.width = 360;
video.height = 300;
}
}, function (error) {
alert(error);
});
}, function (error) {
alert(error);
});
});

rtcmulticonnection: webrtc recordrtc not returning a blob

I am trying to simply record the webrtc video using what I though was a standard example. The library is here: https://github.com/muaz-khan/RTCMultiConnection
rtcMultiConnection.onstream = function(e) {
var mediaElement = getMediaElement(e.mediaElement, {
onRecordingStarted: function(type) {
// www.RTCMultiConnection.org/docs/startRecording/
rtcMultiConnection.streams[e.streamid].startRecording();
},
onRecordingStopped: function(type) {
// www.RTCMultiConnection.org/docs/stopRecording/
rtcMultiConnection.streams[e.streamid].stopRecording(function(blob){
console.log("test");
console.log(blob);
});
}});}
I can follow the steps through the function calls, the issue is that the callback is never run from recordrtc.js....
It goes to line 100 of https://github.com/muaz-khan/RecordRTC/blob/master/RecordRTC.js
There it runs:
mediaRecorder.stop(_callback);
Which never calls the callback....
Even calling functions directly doesn't work:
console.log(rtcMultiConnection.streams[e.streamid].audioRecorder.getBlob());
console.log(rtcMultiConnection.streams[e.streamid].videoRecorder.save("a.png"));
I am wondering if two different versions of recordrtc and rtcmulticonneciton are interacting.... Any ideas? Maybe an older recordrtc, but I can't find an older version
Please use blob.video:
var stream = connection.streams['stream-id'];
stream.stopRecording(function(blob) {
var h2;
if (blob.audio) {
h2 = document.createElement('h2');
h2.innerHTML = 'Open recorded ' + blob.audio.type + '';
div.appendChild(h2);
}
if (blob.video) {
h2 = document.createElement('h2');
h2.innerHTML = 'Open recorded ' + blob.video.type + '';
div.appendChild(h2);
}
});
Updated at March 29, 2016
Here is the actual documentation:
http://www.rtcmulticonnection.org/docs/startRecording/
Please make sure that:
You are using v2.2.2
You called startRecording first
For v3, you can directly use the RecordRTC:
connection.onstream = function(event) {
recordStream(event.stream);
};
function recordStream(stream) {
if (!!window.recorder) return;
window.recorder = RecordRTC(stream, {
type: 'video'
});
recorder.startRecording();
}
btnStopRecording.onclick = function() {
if (!window.recorder) return;
recorder.stopRecording(function() {
var blob = recorder.blob;
// or dataURL
recorder.getDataURL(func_callback);
});
};
btnStartRecording.onclick = function() {
var stream = connection.attachStreams[0];
recordStream(straem);
// or
var stream = connection.streamEvents['stream-id'].stream;
recordStream(straem);
};
Above snippet can be used within v2.2.2 as well.

Simple WebRTC Example! But why it didn't work & what did I do wrong?

I found this link on the internet which demonstrates how WebRTC works https://shanetully.com/2014/09/a-dead-simple-webrtc-example/
Its source code is here https://github.com/shanet/WebRTC-Example
Now, I am trying to follow the example and here what I did:
1- I created a folder name voicechat
2- I created 2 folders inside voicechat. That is voicechat\client & voicechat\server
3- I put the index.html & webrtc.js into voicechat\client
4- I put server.js into voicechat\server
5- I put the folder voicechat into my Tomcat webapps folder. So The path will be like this C:\apache-tomcat-7.0.53\webapps\ROOT\voicechat
6- I started my Tomcat.
7- I opened http://xxx.xxx.xxx.xxx/voicechat/client/index.html in my PC & the webpage showed webcam (webcam 1) of my PC. No problem.
8- I opened http://xxx.xxx.xxx.xxx/voicechat/client/index.html in another PC & the webpage also showed webcam (webcam 2) of other PC. But I could not see webcam 1 of my PC. And when I talked in my PC, the person sitting in other PC could not hear what I am talking and via versa.
So, why it didn't work What did I do wrong?
Here is the code of 3 files:
index.html
<html>
<head>
<script src="webrtc.js"></script>
</head>
<body>
<video id="localVideo" autoplay muted style="width:40%;"></video>
<video id="remoteVideo" autoplay style="width:40%;"></video>
<br />
<input type="button" id="start" onclick="start(true)" value="Start Video"></input>
<script type="text/javascript">
pageReady();
</script>
</body>
</html>
webrtc.js
var localVideo;
var remoteVideo;
var peerConnection;
var peerConnectionConfig = {'iceServers': [{'url': 'stun:stun.services.mozilla.com'}, {'url': 'stun:stun.l.google.com:19302'}]};
navigator.getUserMedia = navigator.getUserMedia || navigator.mozGetUserMedia || navigator.webkitGetUserMedia;
window.RTCPeerConnection = window.RTCPeerConnection || window.mozRTCPeerConnection || window.webkitRTCPeerConnection;
window.RTCIceCandidate = window.RTCIceCandidate || window.mozRTCIceCandidate || window.webkitRTCIceCandidate;
window.RTCSessionDescription = window.RTCSessionDescription || window.mozRTCSessionDescription || window.webkitRTCSessionDescription;
function pageReady() {
localVideo = document.getElementById('localVideo');
remoteVideo = document.getElementById('remoteVideo');
serverConnection = new WebSocket('ws://127.0.0.1:3434');
serverConnection.onmessage = gotMessageFromServer;
var constraints = {
video: true,
audio: true,
};
if(navigator.getUserMedia) {
navigator.getUserMedia(constraints, getUserMediaSuccess, errorHandler);
} else {
alert('Your browser does not support getUserMedia API');
}
}
function getUserMediaSuccess(stream) {
localStream = stream;
localVideo.src = window.URL.createObjectURL(stream);
}
function start(isCaller) {
peerConnection = new RTCPeerConnection(peerConnectionConfig);
peerConnection.onicecandidate = gotIceCandidate;
peerConnection.onaddstream = gotRemoteStream;
peerConnection.addStream(localStream);
if(isCaller) {
peerConnection.createOffer(gotDescription, errorHandler);
}
}
function gotMessageFromServer(message) {
if(!peerConnection) start(false);
var signal = JSON.parse(message.data);
if(signal.sdp) {
peerConnection.setRemoteDescription(new RTCSessionDescription(signal.sdp), function() {
peerConnection.createAnswer(gotDescription, errorHandler);
}, errorHandler);
} else if(signal.ice) {
peerConnection.addIceCandidate(new RTCIceCandidate(signal.ice));
}
}
function gotIceCandidate(event) {
if(event.candidate != null) {
serverConnection.send(JSON.stringify({'ice': event.candidate}));
}
}
function gotDescription(description) {
console.log('got description');
peerConnection.setLocalDescription(description, function () {
serverConnection.send(JSON.stringify({'sdp': description}));
}, function() {console.log('set description error')});
}
function gotRemoteStream(event) {
console.log('got remote stream');
remoteVideo.src = window.URL.createObjectURL(event.stream);
}
function errorHandler(error) {
console.log(error);
}
server.js
var WebSocketServer = require('ws').Server;
var wss = new WebSocketServer({port: 3434});
wss.broadcast = function(data) {
for(var i in this.clients) {
this.clients[i].send(data);
}
};
wss.on('connection', function(ws) {
ws.on('message', function(message) {
console.log('received: %s', message);
wss.broadcast(message);
});
});
server.js is intended to be run as a node server for websocket signaling. Run it with node server.js. You shouldn't need Tomcat at all.
From the project readme:
The signaling server uses Node.js and ws and can be started as such:
$ npm install ws
$ node server/server.js
With the client running, open client/index.html in a recent version of either Firefox or Chrome.
You can open index.html with just a file URL.
I changed HTTPS_PORT = 8443 to HTTP_PORT = 8443. Do same with all the https; change it to http. Next, have only const serverConfig = { }; as the serverConfig and delete serverConfig in const httpServer = http.createServer(handleRequest); After these changes, u can now run your server with npm start.
This is the ultimate simple code can do the job. No need to install Node.js. Why need to install Node.js?
AND put that code into index.html file and start your webhost, then you done!
<!DOCTYPE html>
<html>
<head>
<script src="//simplewebrtc.com/latest.js"></script>
</head>
<body>
<div id="localVideo" muted></div>
<div id="remoteVideo"></div>
<script>
var webrtc = new SimpleWebRTC({
localVideoEl: 'localVideo',
remoteVideosEl: 'remoteVideo',
autoRequestMedia: true
});
webrtc.on('readyToCall', function () {
webrtc.joinRoom('My room name');
});
</script>
</body>
</html>

CasperJS looking for 404 error on links site

I'm beginner programmer. I found nice script
http://planzero.org/blog/2013/03/07/spidering_the_web_with_casperjs
I tried to rewrite this script with CasperJS test framework.
I would to get xunit report from this code
var startUrl = 'http://yoursite.foo';
var visitedUrls = [], pendingUrls = [];
var casper = require('casper').create({
pageSettings: {
loadImages: false,
loadPlugins: false
}});
var utils = require('utils')
var helpers = require('helpers')
// Spider from the given URL
casper.test.begin('href' , function(test) {
casper.start(startUrl, function() {
function spider(url) {
// Add the URL to the visited stack
visitedUrls.push(url);
// Open the URL
casper.open(url).then(function() {
test.assertHttpStatus(200, ":" + url);
// Find links present on this page
var links = this.evaluate(function() {
var links = [];
Array.prototype.forEach.call(__utils__.findAll('a'), function(e) {
links.push(e.getAttribute('href'));
});
return links;
});
// Add newly found URLs to the stack
var baseUrl = this.getGlobal('location').origin;
Array.prototype.forEach.call(links, function(link) {
var newUrl = helpers.absoluteUri(baseUrl, link);
if (pendingUrls.indexOf(newUrl) == -1 && visitedUrls.indexOf(newUrl) == -1 && !(link.search(startUrl) == -1)) {
pendingUrls.push(newUrl);
}
});
// If there are URLs to be processed
if (pendingUrls.length > 0) {
var nextUrl = pendingUrls.shift();
spider(nextUrl);
}
else {
console.log('links ended');
this.break;
}
});
}
spider(startUrl);
}).run(function(){
test.done();
});
});
Script is running but when he and Job I can't get report.
If you're trying to learn how to use CasperJS you need to start with a smaller example than that. That script is a mess which goes after a site named yoursite.foo (maybe you put that name in there?)
I would take small steps. I have a video which may help explain how to use CasperJS.
http://www.youtube.com/watch?v=Kefil5tCL9o