Advantages of blob over video url - blob

I have trawled the internet looking at why blob videos are used but I am not tech savy enough to really understand it. Can someone explain simply why a blob URL for my video is better (if it is) than loading the src as /video/intro.mp4.
Here is the code that I have used. Which one is better for my use case?
<video rel='preload' as='video' id='bgvid'>
<source type='video/mp4' src='/video/intro.mp4' />
</video>
or
var req = new XMLHttpRequest();
req.open('GET', videoURL, true);
req.responseType = 'blob';
req.onload = function() {
if (this.status === 200) {
var videoBlob = this.response;
var vid = URL.createObjectURL(videoBlob); // IE10+
var video = document.getElementById('bgvid');
video.autoplay = true;
video.src = vid;
}
}
req.onerror = function() {
// Error
}
Thanks

I would say the regular html way because it does not take extra time to process because the html is built right into the webpage

Related

Translate an entire page with Google Translate API

I'm using the google Translate API to translate content that's inside an IFRAME. When using the API, I notice that it returns a array(string) with the translated texts, but when inserting it in the DOM, the entire structure disappears, leaving only the text.
The original Page
The result page after the response of the Google API (translate in Portuguese)
This is the code that I'm using:
function translate(lang) {
var iframe = document.getElementById('mainIframe');
var iframeDoc = iframe.contentDocument || iframe.contentWindow.document;
var iframeBody = iframeDoc.body;
var iframeHTML = iframeDoc.documentElement;
var iframeText = iframeBody.innerText || iframeHTML.innerText;
var url = 'https://translate.googleapis.com/translate_a/single?client=gtx&sl=auto&tl=pt&dt=t&q=' + encodeURI(iframeText);
var xhr = new XMLHttpRequest();
xhr.open('GET', url, true);
xhr.onreadystatechange = function() {
if (xhr.readyState == 4) {
var response = JSON.parse(xhr.responseText);
var translatedText = ''
for (var i = 0; i < response[0].length; i++) {
translatedText += response[0][i][0];
}
iframeDoc.body.innerHTML = translatedText;
}
};
xhr.send();
}
How can I not affect the structure within the Iframe (Not to be like the second image I sent above)? Just replace the original text with what comes in the response and leave the original look.
I've already tried using functions to find the original text in the DOM to replace it with the translated text but without success. In this case, the idea was just to replace the text correspondence literally.
Thanks for your help and thanks in advance

MSE WebM video with no audio

I've written a MSE video player and it's loading WebMs. These are loading well, however I have a problem with video files with no audio tracks.
I've tried changing the codec depending on if there is audio
mediaSource.addSourceBuffer(`video/webm; ${videoHasAudio(asset) ? 'codecs="vp9,vorbis"' : 'codecs="vp9"'}`)`
And I thought this was working but now isn't. How do I run silent WebMs in MSE?
I have added sample MSE project here:
https://github.com/thowfeeq178/MediaSourceExtention
checkout the example in the github
overview:
we need to add one for video and one for audio like below:
// BBB : https://dash.akamaized.net/akamai/bbb_30fps/bbb_30fps.mpd
var baseUrl = "https://dash.akamaized.net/akamai/bbb_30fps/";
var initUrl = baseUrl + "bbb_30fps_480x270_600k/bbb_30fps_480x270_600k_0.m4v";
var initAudioUrl = baseUrl + "bbb_a64k/bbb_a64k_0.m4a";
var templateUrl =
baseUrl + "bbb_30fps_480x270_600k/bbb_30fps_480x270_600k_$Number$.m4v";
var templateUrlForAudio = baseUrl + "bbb_a64k/bbb_a64k_$Number$.m4a";
var sourceBuffer;
var audioSourceBuffer;
var index = 0;
var audioIndex = 0;
var numberOfChunks = 159;
var video = document.querySelector("video");
var ms = new MediaSource();
function onPageLoad() {
console.log("page loaded ..");
if (!window.MediaSource) {
console.error("No Media Source API available");
return;
}
// making source controlled by JS using MS
video.src = window.URL.createObjectURL(ms);
ms.addEventListener("sourceopen", onMediaSourceOpen);
}
function onMediaSourceOpen() {
// create source buffer
sourceBuffer = ms.addSourceBuffer('video/mp4; codecs="avc1.4d401f"');
audioSourceBuffer = ms.addSourceBuffer('audio/mp4; codecs="mp4a.40.5"');
// when ever one segment is loaded go for next
sourceBuffer.addEventListener("updateend", nextSegment);
audioSourceBuffer.addEventListener("updateend", nextAudioSegment);
// fire init segemnts
GET(initUrl, appendToBuffer);
GET(initAudioUrl, appendToAudioBuffer);
// play
video.play();
}
// get next segment based on index and append, once everything loaded unlisten to the event
function nextSegment() {
var url = templateUrl.replace("$Number$", index);
GET(url, appendToBuffer);
index++;
if (index > numberOfChunks) {
sourceBuffer.removeEventListener("updateend", nextSegment);
}
}
// get next audio segment based on index and append, once everything loaded unlisten to the event
function nextAudioSegment() {
var audioUrl = templateUrlForAudio.replace("$Number$", audioIndex);
GET(audioUrl, appendToAudioBuffer);
audioIndex++;
if (index > numberOfChunks) {
audioSourceBuffer.removeEventListener("updateend", nextAudioSegment);
}
}
// add to existing source
function appendToBuffer(videoChunk) {
if (videoChunk) {
sourceBuffer.appendBuffer(new Uint8Array(videoChunk));
}
}
function appendToAudioBuffer(audioChunk) {
if (audioChunk) {
audioSourceBuffer.appendBuffer(new Uint8Array(audioChunk));
}
}
// just network thing
function GET(url, callback) {
var xhr = new XMLHttpRequest();
xhr.open("GET", url);
xhr.responseType = "arraybuffer";
xhr.onload = function(e) {
if (xhr.status != 200) {
console.warn("Unexpected status code " + xhr.status + " for " + url);
return false;
}
callback(xhr.response);
};
xhr.send();
}
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<title>MSE Demo</title>
</head>
<body onload="onPageLoad()">
<h1>MSE Demo</h1>
<div>
<video muted controls width="80%"></video>
</div>
</body>
</html>

No sound in safari using Web Audio API webkitAudioContext()

I am trying to use the Web Audio API to play sound in my React application.
It's currently playing sound in all browsers except Safari v12.1.
I am aware Safari has restrictions on autoplay and requires user interaction to play sound, so I have a play button which calls the _play() function:
_play = (url, index) => {
this._getData(url);
this.source.start(index)
}
It's calling the _getData() function which looks like this:
_getData(url) {
this.source = this.audioContext.createBufferSource();
var request = new XMLHttpRequest();
request.open('GET', url, true);
request.responseType = 'arraybuffer';
request.onload = () => {
var audioData = request.response;
console.log(this.audioContext)
this.audioContext.decodeAudioData(audioData, buffer => {
this.source.buffer = buffer;
this.source.connect(this.audioContext.destination);
},
function(e){ console.log("Error with decoding audio data" + e.err); });
}
request.send();
}
this.audioContext is created in the component constructor using:
this.audioContext = new (window.AudioContext || window.webkitAudioContext)();
The console.log(this.audioContext) inside the request.onload outputs this before pressing play:
...and this after pressing play:
But no sound is playing (in Safari).
What am I doing wrong?
I think the problem that you ran into is that Safari does not allow you to modify the buffer anymore once you called start().
The following page does for example play a second of noise in Safari when you press the play button.
<!DOCTYPE html>
<html>
<body>
<button id="play-button">play</button>
<script>
document
.getElementById('play-button')
.addEventListener('click', () => {
const audioContext = new AudioContext();
const audioBufferSourceNode = audioContext.createBufferSource();
const sampleRate = audioContext.sampleRate;
const audioBuffer = audioContext.createBuffer(1, sampleRate, sampleRate);
const channelData = audioBuffer.getChannelData(0);
for (let i = 0; i < sampleRate; i += 1) {
channelData[i] = (Math.random() * 2) - 1;
}
audioBufferSourceNode.buffer = audioBuffer;
audioBufferSourceNode.connect(audioContext.destination);
audioBufferSourceNode.start(audioContext.currentTime);
});
</script>
</body>
</html>
But it doesn't work anymore if you modify it slightly. When starting the audioBufferSourceNode before assigning the buffer there will be no output anymore.
audioBufferSourceNode.connect(audioContext.destination);
audioBufferSourceNode.start(audioContext.currentTime);
audioBufferSourceNode.buffer = audioBuffer;
I guess you can get your code working by waiting for the HTTP response and the audio decoding before you start the source. Make sure to execute this.source.buffer = buffer before you execute this.source.start(index).
I hope this helps.

WebRtc,onaddstream,receive,stream,no show?

I met with difficulties.
var onaddstream = function(event){
var video = $("#chat_dialogForOne video[name='remote']")[0];
var remoteStream = event.stream;
video.srcObject = remoteStream;
video.onloadedmetadata = function(e) {
video.play();
};
}
$("#chat_dialogForOne button[name='openVideo']").on("click",function(){
$(this).toggleClass("active");
$(this).data("use",$(this).data("use") ? false : true);
if($(this).data("use")){//开启视频语音聊天
rtc.openVideoAudioLocal(function(localStream){//创建本地视频流,绑定到控件上
var video = $("#chat_dialogForOne video[name='video']")[0]; //获取到展现视频的标签
video.srcObject=localStream;
video.onloadedmetadata = function(e) {
video.play();
};
rtc.openVideoAudioLocal(function(remoteStream){
rtc.sendAddStream(remoteStream);
},true,true);
},true,false);//为了防止自己能听到自己发出的声音,只启动视频,不启动音频
$(this).find(" > span").html("结束视频");
$("#chat_dialogForOne button[name='openAudio']").hide();
}else{//关闭视频语音聊天
// closeRemoteChannelStream([oneWebRtc]);
// closeLocalStream();
// resetVideoButton();
}
});
images 2,Successful display of local
images 1,Failure Display Remote
onaddstream,Received the remote stream, it does not show.to video
I need your help.
Sorry, it was a mistake of mine. I made a very slight mistake, which resulted in a new PC object being recreated after receiving the offer. Although the remote received the video stream object, the channel had been replaced.

Can I send a MediaStream from a PeerConnection to another?

I'm using Chrome 23.0.1246.0 canary, the latest version.
I want to send a MediaStream that reveived from a client via PeerConnection to another client via PeerConnection.
I mean, the ClientA send its local media stream to me via the PeerConnection between us, and then, I send this media stream to ClientB via the PeerConnection between ClientB and me.
This is my code, but it doesn't work, when I click the AddVideo button for the second time, the "gotRemoteStream" function doesn't be invoked. I don't konw the reason.
Anybody can help me?
<!DOCTYPE html>
<html>
<head>
<title>Video Link</title>
<style type="text/css">
video { width: 200px;}
</style>
</head>
<body>
<input id="btnAddVideo" type="button" value="Add Video" onclick="AddVideo();" />
<div id="videos"></div>
<script type="text/ecmascript">
var pcs = new Array();
var pcr = new Array();
var mediaStream = new Array();
var msIndex = 0;
navigator.webkitGetUserMedia({ audio: true, video: true }, gotStream, function () { alert('get MediaStream Error'); });
function gotStream(stream) {
mediaStream[0] = stream;
}
var pc1;
var pc2;
function AddVideo() {
if (mediaStream[msIndex] == null) return;
pc1 = new webkitPeerConnection00(null, iceCallback1);
pc1.addStream(mediaStream[msIndex]);
var offer = pc1.createOffer(null);
pc1.setLocalDescription(256, offer);
pc2 = new webkitPeerConnection00(null, iceCallback2);
pc2.onaddstream = gotRemoteStream;
pc2.setRemoteDescription(256, new SessionDescription(offer.toSdp()));
var answer = pc2.createAnswer(offer.toSdp(), { has_audio: true, has_video: true });
pc2.setLocalDescription(768, answer);
pc1.setRemoteDescription(768, new SessionDescription(answer.toSdp()));
pc2.startIce();
pc1.startIce();
pcs.push(pc1);
pcr.push(pc2);
}
function iceCallback1(candidate, bMore) {
pc2.processIceMessage(new IceCandidate(candidate.label, candidate.toSdp()));
}
function iceCallback2(candidate, bMore) {
pc1.processIceMessage(new IceCandidate(candidate.label, candidate.toSdp()));
}
function gotRemoteStream(e) {
var v = document.createElement('video');
v.autoplay = 'autoplay';
v.src = webkitURL.createObjectURL(e.stream);
document.getElementById('videos').appendChild(v);
mediaStream.push(e.stream);
msIndex++;
}
</script>
</body>
</html>
Yes, you can. With the function addStream of a PeerConnection you can add any MediaStream you want. Indeed you have to exchange the SDPs (local and remote description) between the clients once again after adding the stream to the PeerConnection.
Your code is not up-to-date, because since you posted your question there have been a lot of improvment to the api. If you update it with the help of the current standard, I can help you. :)