audio fades in and out on mobile chrome with SpeechSynthesis and howler js - text-to-speech

For our application, we're using Web Speech API to play TTS and howler.js to play music exercises. It seems like whenever TTS is active, howler volume gets reduced and gradually increases as time goes on (live demo: https://www.besteartraining.com/learn/1/3/5/course) (Note: test on mobile chrome with headphones). Any help will be greatly appreciated.
//Howler.js initialization
sound = new Howl({
src: [`/assets/audio/${getSoundData().name}.mp3`],
volume: 1.0,
onload() {
const lengthOfNote = getSoundData().length;
let timeIndex = 0;
for (let i = 24; i <= 96; i++) {
sound["_sprite"][i] = [timeIndex, lengthOfNote];
timeIndex += lengthOfNote;
}
setHasInited(true);
},
onloaderror(e: any, msg: any) {
console.log("Error", e, msg);
setHasInited(true);
},
});
//Web Speech API initialization
const speech = new SpeechSynthesisUtterance();
speech.lang = "en-US";
speech.volume = 0.35;
speech.rate = 1;
speech.pitch = 1;
speech.text = "C major" //This is just an example.
speech.onend = function () {
playMidiArray(chordMidiArray); //chordMidiArray = [36, 40, 43] = ["C1", "E1", "G1"]
};
function playMidiArray(midis: any) {
midis.forEach((noteMidiNumber: any) => {
sound.play(noteMidiNumber.toString());
});
}
window.speechSynthesis.speak(speech);
Expected result:
Says the name "C major"(volume:0.35) => plays music exercises (volume:1.0)
Actual result:
Says the name "C major"(volume:0.35) => plays music exercises (volume:0.35 -> 1.0)
List of things that I have tried:
set speech.volume to 1 when TTS is finished playing
set sound.volume to 1 before and after sound.play()
remove speechSynthesis instance (window.speechSynthesis.cancel()) before sound.play() is called

Related

OfflineAudioContext processing takes increasingly longer in Safari

I am processing an audio buffer with an OfflineAudioContext with the following node layout:
[AudioBufferSourceNode] -> [AnalyserNode] -> [OfflineAudioContext]
This works very good on Chrome (106.0.5249.119) but on Safari 16 (17614.1.25.9.10, 17614) each time I run the analysis takes longer and longer. Both running on macOS.
What's curious is that I must quit Safari to "reset" the processing time.
I guess there's a memory leak?
Is there anything that I'm doing wrong in the JavaScript code that would cause Safari to not garbage collect?
async function processFrequencyData(
audioBuffer,
options
) {
const {
fps,
numberOfSamples,
maxDecibels,
minDecibels,
smoothingTimeConstant,
} = options;
const frameFrequencies = [];
const oc = new OfflineAudioContext({
length: audioBuffer.length,
sampleRate: audioBuffer.sampleRate,
numberOfChannels: audioBuffer.numberOfChannels,
});
const lengthInMillis = 1000 * (audioBuffer.length / audioBuffer.sampleRate);
const source = new AudioBufferSourceNode(oc);
source.buffer = audioBuffer;
const az = new AnalyserNode(oc, {
fftSize: numberOfSamples * 2,
smoothingTimeConstant,
minDecibels,
maxDecibels,
});
source.connect(az).connect(oc.destination);
const msPerFrame = 1000 / fps;
let currentFrame = 0;
function process() {
const frequencies = new Uint8Array(az.frequencyBinCount);
az.getByteFrequencyData(frequencies);
// const times = new number[](az.frequencyBinCount);
// az.getByteTimeDomainData(times);
frameFrequencies[currentFrame] = frequencies;
const nextTime = (currentFrame + 1) * msPerFrame;
if (nextTime < lengthInMillis) {
currentFrame++;
const nextTimeSeconds = (currentFrame * msPerFrame) / 1000;
oc.suspend(nextTimeSeconds).then(process);
}
oc.resume();
}
oc.suspend(0).then(process);
source.start(0);
await oc.startRendering();
return frameFrequencies;
}
const buttonsDiv = document.createElement('div');
document.body.appendChild(buttonsDiv);
const initButton = document.createElement('button');
initButton.onclick = init;
initButton.innerHTML = 'Load audio'
buttonsDiv.appendChild(initButton);
const processButton = document.createElement('button');
processButton.disabled = true;
processButton.innerHTML = 'Process'
buttonsDiv.appendChild(processButton);
const resultElement = document.createElement('pre');
document.body.appendChild(resultElement)
async function init() {
initButton.disabled = true;
resultElement.innerText += 'Loading audio... ';
const audioContext = new AudioContext();
const arrayBuffer = await fetch('https://gist.githubusercontent.com/marcusstenbeck/da36a5fc2eeeba14ae9f984a580db1da/raw/84c53582d3936ac78625a31029022c8fdb734b2a/base64audio.txt').then(r => r.text()).then(fetch).then(r => r.arrayBuffer())
resultElement.innerText += 'finished.';
resultElement.innerText += '\nDecoding audio... ';
const audioBuffer = await audioContext.decodeAudioData(arrayBuffer);
resultElement.innerText += 'finished.';
processButton.onclick = async () => {
processButton.disabled = true;
resultElement.innerText += '\nStart processing... ';
const t0 = Date.now();
await processFrequencyData(audioBuffer, {
fps: 30,
numberOfSamples: 2 ** 13,
maxDecibels: -25,
minDecibels: -70,
smoothingTimeConstant: 0.2,
});
resultElement.innerText += `finished in ${Date.now() - t0} ms`;
processButton.disabled = false;
};
processButton.disabled = false;
}
I guess this is really a bug in Safari. I'm able to reproduce it by rendering an OfflineAudioContext without any nodes. As soon as I use suspend()/resume() every invocation takes a little longer.
I'm only speculating here but I think it's possible that there is some internal mechanism which tries to prevent the rapid back and forth between the audio thread and the main thread. It almost feels like one of those login forms which takes a bit longer to validate the password every time you try.
Anyway I think you can avoid using suspend()/resume() for your particular use case. It should be possible to create an OfflineAudioContext for each of the slices instead. In order to get the same effect you would only render the particular slice with each OfflineAudioContext.
const currentTime = 0;
while (currentTime < duration) {
const offlineAudioContext = new OfflineAudioContext({
length: LENGTH_OF_ONE_SLICE,
sampleRate
});
const audioBufferSourceNode = new AudioBufferSourceNode(
offlineAudioContext,
{
buffer
}
);
const analyserNode = new AnalyserNode(offlineAudioContext);
audioBufferSourceNode.start(0, currentTime);
audioBufferSourceNode
.connect(analyserNode)
.connect(offlineAudioContext.destination);
await offlineAudioContext.startRendering();
const frequencies = new Uint8Array(analyserNode.frequencyBinCount);
analyserNode.getByteFrequencyData(frequencies);
// do something with the frequencies ...
currentTime += LENGTH_OF_ONE_SLICE * sampleRate;
}
I think the only thing missing would be the smoothing since each of those slices will have it's own AnalyserNode.

How does Puppeteer handle the click Object / DevTools Protocol Chromium/Chrome

I need to know how puppeteer handles the click object, as well as Chromium DevTools API. I've tried to research it on my own and have found myself not being able to find the actual code that handles it.
The reason why I need to know is I'm developing a wrapper that tests events in code for testing Web Pages, and was looking to see if implementing a event handling routine is beneficial instead of using puppeteers interface of events (clicks and taps an hover, as well as other events that might be needed like touch events, or scrolling)
Here is how far I've gotten:
Puppeteer API uses the Frame Logic of DevTools to contact API:
https://github.com/puppeteer/puppeteer/blob/master/lib/Page.js
/**
* #param {string} selector
* #param {!{delay?: number, button?: "left"|"right"|"middle", clickCount?: number}=} options
*/
click(selector, options = {}) {
return this.mainFrame().click(selector, options);
}
/**
* #return {!Puppeteer.Frame}
*/
/**
* #param {!Protocol.Page.Frame} framePayload`
*/
_onFrameNavigated(framePayload) {
const isMainFrame = !framePayload.parentId;
let frame = isMainFrame ? this._mainFrame : this._frames.get(framePayload.id);
assert(isMainFrame || frame, 'We either navigate top level or have old version of the navigated frame');
// Detach all child frames first.
if (frame) {
for (const child of frame.childFrames())
this._removeFramesRecursively(child);
}
if (isMainFrame) {
if (frame) {
// Update frame id to retain frame identity on cross-process navigation.
this._frames.delete(frame._id);
frame._id = framePayload.id;
} else {
// Initial main frame navigation.
frame = new Frame(this, this._client, null, framePayload.id);
}
this._frames.set(framePayload.id, frame);
this._mainFrame = frame;
}
This is as far as I have gotten because I've tried to look up the Page Protocol but I can't figure out what happens there.
Any help would be appreciated, even in research.
The main parts are happening in JSHandle here:
async click(options) {
await this._scrollIntoViewIfNeeded();
const {x, y} = await this._clickablePoint();
await this._page.mouse.click(x, y, options);
}
It scrolls until the element is in viewport (otherwise it won't click) which is happening here, then it finds the clickable coordinates on the element using DevTools API here:
async _clickablePoint() {
const [result, layoutMetrics] = await Promise.all([
this._client.send('DOM.getContentQuads', {
objectId: this._remoteObject.objectId
}).catch(debugError),
this._client.send('Page.getLayoutMetrics'),
]);
if (!result || !result.quads.length)
throw new Error('Node is either not visible or not an HTMLElement');
// Filter out quads that have too small area to click into.
const {clientWidth, clientHeight} = layoutMetrics.layoutViewport;
const quads = result.quads.map(quad => this._fromProtocolQuad(quad)).map(quad => this._intersectQuadWithViewport(quad, clientWidth, clientHeight)).filter(quad => computeQuadArea(quad) > 1);
if (!quads.length)
throw new Error('Node is either not visible or not an HTMLElement');
// Return the middle point of the first quad.
const quad = quads[0];
let x = 0;
let y = 0;
for (const point of quad) {
x += point.x;
y += point.y;
}
return {
x: x / 4,
y: y / 4
};
}
and finally it moves the mouse to the coordinate here and clicks on it here
async click(x, y, options = {}) {
const {delay = null} = options;
if (delay !== null) {
await Promise.all([
this.move(x, y),
this.down(options),
]);
await new Promise(f => setTimeout(f, delay));
await this.up(options);
} else {
await Promise.all([
this.move(x, y),
this.down(options),
this.up(options),
]);
}
}
which uses DevTools API to interact with mouse here
async down(options = {}) {
const {button = 'left', clickCount = 1} = options;
this._button = button;
await this._client.send('Input.dispatchMouseEvent', {
type: 'mousePressed',
button,
x: this._x,
y: this._y,
modifiers: this._keyboard._modifiers,
clickCount
});
}

how to re initialize webglglobe with vue

I am using http://www.webglearth.org/api for a globe on my vue app.
I have a route called globe that initializes the globe with new WE.map. when I change routes and go back it re initializes but re adds scripts to my head which jam up the globe tiles from loading. Is there a way to keep and re use an instantiated object? Or any tips that could help with this
I tried saving my created globe in the store and re using it though it wont reload without doing a WE.map and that method adds the headers
this method is being called on vue create
initialize(data) {
var earth = WE.map("earth_div");
WE.tileLayer(
"https://webglearth.github.io/webglearth2-offline/{z}/{x}/{y}.jpg",
{
tileSize: 256,
bounds: [[-85, -180], [85, 180]],
minZoom: 0,
maxZoom: 16,
attribution: "WebGLEarth example",
tms: true
}
).addTo(earth);
data.forEach(doc => {
let latlng = [];
console.log(typeof doc.data().images);
console.log(doc.data().galleryTitle);
latlng[0] = doc.data().lat;
latlng[1] = doc.data().lng;
console.log(latlng);
var marker = WE.marker(latlng).addTo(earth);
marker.element.id = doc.data().safeTitle;
let popup = `${doc.data().location}`;
marker
.bindPopup(popup, { maxWidth: 150, closeButton: true })
.openPopup();
marker.element.onclick = e => {
console.log(e.target.parentElement.id);
var gallery = e.target.parentElement.id;
this.$router.push({ path: "/gallery/" + gallery });
};
});
// Start a simple rotation animation
var before = null;
requestAnimationFrame(function animate(now) {
console.log('animating')
var c = earth.getPosition();
var elapsed = before ? now - before : 0;
before = now;
earth.setCenter([c[0], c[1] + 0.1 * (elapsed / 30)]);
// requestAnimationFrame(animate);
});
earth.setView([46.8011, 8.2266], 3);
this.$store.commit('setEarth',earth)
},
I would like the globe to re initiate without re adding headers.

How to continue download multiple files (AWS server images) in background in react-native iOS

I created an app where user can download multiple images on one click using react-native-fs which is working perfectly in Android. But in iOS when app is inactive then download stopped and user have to start download again.
async.eachSeries(DownloadData, async function (tourData, finish) {
console.log("# resumable : 655612", tourData);
var fileExtension = '';
var fileURL = tourData.path;
var fileExtension = "/" + tourData.name + "Image" + p + ".png
p = p + 1;
const downloadDest = RNFS.DocumentDirectoryPath + fileExtension;
let dirs = RNFetchBlob.fs.dirs;
var v = dirs.DocumentDir;
var jobId = -1;
const ret = RNFS.downloadFile({
fromUrl: encodeURI(fileURL),
toFile: downloadDest,
connectionTimeout: 1000 * 10,
readTimeout: 1000 * 10,
background: true,
discretionary: true,
progressDivider: 1,
resumable: (res) => {
console.log("# resumable", res);
},
begin: (res) => {
console.log(res)
},
progress: (data) => {
console.log(data)
},
});
jobId = ret.jobId;
RNFS.isResumable(jobId).then(true);
if (await RNFS.isResumable(jobId)) {
console.log("# resumable : # resumable : # resumable :",jobId);
RNFS.resumeDownload(jobId)
}
ret.promise.then((res) => {
finish();
}).catch(err => {
finish();
})
},function (err) {
if (!err) {
callback(true)
} else {
callback(false)
}
}));
Running download in background in IOS require few extra settings check this section https://github.com/itinance/react-native-fs#background-downloads-tutorial-ios
they also mentioned that IOS will give you 30 sec after handleEventsForBackgroundURLSession
BE AWARE! iOS will give about 30 sec. to run your code after handleEventsForBackgroundURLSession is called and until completionHandler is triggered so don't do anything that might take a long time (like unzipping), you will be able to do it after the user re-launces the app, otherwide iOS will terminate your app.
I hope this helps

Vimeo - Get Duration / countdown on play&pause

I've been struggling with the Vimeo API for a few days now.
I want to get the video duration and show a countdown when it's playing, and pause it when the video is paused in HTML.
Does anyone know how to do this or point me in the right direction?
Thanks,
Aron
I developed a countdown to play at ad videos before the main video in my website. I believe the code below may help you, you should adapt it to your needs.
$(document).on('ready',function(){
/* PLAY AD VIDEO */
var $video = $('#ad_video),
player = new Vimeo.Player($video);
player.play();
/* COUNTDOWN */
var interval = null;
player.getDuration().then(function(duration) {
var duration_val = duration;
$("#countdown").attr("data-countdown", duration);
$("#countdown").html('Video starts in ' + (duration));
interval = setInterval(function(){
player.getCurrentTime().then(function(seconds) {
var seconds = Math.floor(seconds);
var countdown_val = $("#countdown").attr("data-countdown");
if(seconds == (duration_val - countdown_val - 1))
{
$("#countdown").html('Video starts in ' + (duration_val - seconds));
$("#countdown").attr("data-countdown", duration_val - seconds);
}
if(countdown_val == 1)
{
clearInterval(interval);
}
});
});
}, 1000);
/* PLAY MAIN VIDEO */
$(function(){
var $video = $('#ad_video),
player = new Vimeo.Player($video);
player.on('ended', play_main_video);
});
function play_main_video() {
$("#countdown").hide();
$("#text_countdown").hide();
$('#ad_video).hide();
$('#main_video).show();
var $video = $('#main_video),
player = new Vimeo.Player($video);
player.play();
};
});