mute audio livestream nodemon client - react-native

I am using nodemon client package in order to live stream using rtmp. Everything is working fine with video. I want to mute/unmute audio before sending the stream through rtmp.
My code:
var config = {
cameraConfig: {
cameraId: 0,
cameraFrontMirror: true,
},
videoConfig: {
preset: 4,
bitrate: 32000000,
profile: 2,
fps: 30,
videoFrontMirror: false,
},
audioConfig: {
bitrate: 128000,
profile: 1,
samplerate: 44100,
}
};
<NodeCameraView
ref={(vb) => {
setPlayerRef(vb);
}}
outputUrl={url}
camera={config.cameraConfig}
audio={config.audioConfig}
video={config.videoConfig}
autopreview={true}
/>
How can I do this.

Related

react native IOS can not access microphone when lockscreen + app killed

I am building Calling app, includes:
1.PushKit VOIP to show incomming call
2.React-native-callkeep to handle answer/end call
3. React-native-webrtc to make the call
The problem is:
1.With app state active/background. The call working normally
2.Only with case app locked + app not running. I can not see the microphone in top of screen, then I think I can not access into the microphone(tested with audio call)
About my code: I am getMediaDivices in useEffect() like this:
useEffect(() => {
....
getMediaStream()
return () => {
mounted.current = false
....
}
}, [])
const getMediaStream = async () => {
if (!localMediaStream) {
let isFront = true
let stream = await mediaDevices.getUserMedia({
audio: {
echoCancellation: true,
noiseSuppression: true,
autoGainControl: true,
googEchoCancellation: true,
googAutoGainControl: true,
googNoiseSuppression: true,
googHighpassFilter: true,
googTypingNoiseDetection: true,
googNoiseReduction: true
},
video: isVideo ? {
width: { min: 480, max: 1280},
height: { min: 320, max: 720 },
// vb: true,
frameRate: 25,
facingMode: (isFront ? 'user' : 'environment'),
} : false
})
setlocalMediaStream({
publisher: {
id: currentUserName,
userId: masterInfo.user.id,
displayName: masterInfo.user.name
},
stream: stream,
})
await initJanus(stream)
}
}
I dont know why the green dot not appear in case 3, can someone help?

How can I get onHover to work for deck.gl MVTLayer?

The deck.gl MVTLayer inherits from Layer that has onHover enabled which together with pickable should give interactivity. I am trying to get interactivity to work so I can do a popup with the data I hover. But in below code, I can get the onClick event to fire, but not the onHover event. what am I doing wrong
Thanks :)
import React from "react";
import DeckGL from "#deck.gl/react";
import { MVTLayer } from "#deck.gl/geo-layers";
const INITIAL_VIEW_STATE = {
longitude: -122.41669,
latitude: 37.7853,
zoom: 13,
pitch: 0,
bearing: 0
};
const layer = new MVTLayer({
id: "MVTLayer",
data: [
"https://tiles-a.basemaps.cartocdn.com/vectortiles/carto.streets/v1/{z}/{x}/{y}.mvt"
],
stroked: false,
getLineColor: [255, 0, 0],
getFillColor: (f) => {
switch (f.properties.layerName) {
case "poi":
return [0, 0, 0];
case "water":
return [120, 150, 180];
case "building":
return [255, 0, 0];
default:
return [240, 240, 240];
}
},
getPointRadius: 2,
pointRadiusUnits: "pixels",
getLineWidth: (f) => {
switch (f.properties.class) {
case "street":
return 6;
case "motorway":
return 10;
default:
return 1;
}
},
maxZoom: 14,
minZoom: 0,
onHover: (info) => console.log("Hover", info.object),
onClick: (info) => console.log("Click", info.object),
pickable: true
});
export default function App() {
return (
<DeckGL
initialViewState={INITIAL_VIEW_STATE}
controller={true}
layers={[layer]}
></DeckGL>
);
}
It works .... I think I had a dodgy chrome instance running

How i can use my back camera of my phone in the browser with QUAGGA JS

i want to use Quagga js in my webapp to scan a barcodes...
The problem is that i want use the mobile's back camera, in the documentation say's that quagga use a parameter called "facingMode".
If you set facingMode = "enviroment" that use the webcam if you stay in a PC or the back camera if you stay in a Phone .
And if you set facingMode = "user" that use the mobile's front camera.
Well, i set user and enviroment for test and there is no difference, it stays the same, on the cell phone continues to use the front camera, i try to use the back camera but nothing.
I leave my code at there
function startScanner() {
Quagga.init({
inputStream: {
name: "Live",
type: "LiveStream",
target: document.querySelector('#scanner-container'),
constraints: {
width: 600,
height: 450,
facingMode: "enviroment" //or user for front camera
},
},
decoder: {
readers: [
"code_128_reader",
"ean_reader",
"ean_8_reader",
"code_39_reader",
"code_39_vin_reader",
"codabar_reader",
"upc_reader",
"upc_e_reader",
"i2of5_reader"
],
debug: {
showCanvas: true,
showPatches: true,
showFoundPatches: true,
showSkeleton: true,
showLabels: true,
showPatchLabels: true,
showRemainingPatchLabels: true,
boxFromPatches: {
showTransformed: true,
showTransformedBox: true,
showBB: true
}
}
},
}, function (err) {
if (err) {
console.log(err);
return
}
console.log("Initialization finished. Ready to start");
console.log(navigator.mediaDevices.enumerateDevices());
Quagga.start();
// Set flag to is running
_scannerIsRunning = true;
});
i tried in Android Phone

PhantomJS doesn't run .create()

Hardware - Macbook Pro 2019 - Catalina 10.15.5
using Visual Studio Code
In my project we have the following issue;
On a Linux machine it is working, on a Windows machine it is working, on my machine is NOT working.
The code that is not working is this one.
phantom.create().then(function (ph) {
ph.createPage().then(function (page) {
page.property('paperSize', {
format: 'A4',
orientation: 'portrait',
margin: '0'
}).then(function () {
it is crashing on the phantom.create() function.
I tried to do variations, to see the objects getting built, like,
ph = phantom.create();
console.log('after the phantom create ');
console.log(ph);
console.log('this is the ph **');
(async function (ph) {
page = ph.createPage().then(
function (page) {
page.property('paperSize', {
format: 'A4',
orientation: 'portrait',
margin: '0'
Still crashing.
Using wait and async
(async function() {
ph = await phantom.create();
page = await instance.createPage();
ph.createPage().then(function (page) {
page = page;
page.property('paperSize', {
format: 'A4',
orientation: 'portrait',
margin: '0'
}).
In terminal when I do:
Node > require('phantom').create()
I get the following
> require('phantom').create()
Promise {
Phantom {
logger:
{ info: [Function],
debug: [Function],
error: [Function],
warn: [Function] },
process:
ChildProcess {
domain: [Domain],
_events: [Object],
_eventsCount: 2,
_maxListeners: undefined,
_closesNeeded: 3,
_closesGot: 0,
connected: false,
signalCode: null,
exitCode: null,
killed: false,
spawnfile: '/usr/local/lib/node_modules/phantomjs/lib/phantom/bin/phantomjs',
_handle: [Process],
spawnargs: [Array],
pid: 14804,
stdin: [Socket],
stdout: [Socket],
stderr: [Socket],
stdio: [Array] },
commands: Map {},
events: Map {},
heartBeatId:
Timeout {
_called: false,
_idleTimeout: 100,
_idlePrev: [TimersList],
_idleNext: [TimersList],
_idleStart: 3640,
_onTimeout: [Function: bound heartBeat],
_timerArgs: undefined,
_repeat: 100,
_destroyed: false,
domain: [Domain],
[Symbol(asyncId)]: 40,
[Symbol(triggerAsyncId)]: 6 } },
domain:
Domain {
domain: null,
_events:
{ removeListener: [Function: updateExceptionCapture],
newListener: [Function: updateExceptionCapture],
error: [Function: debugDomainError] },
_eventsCount: 3,
_maxListeners: undefined,
members: [] } }
When I use the debugger it's crashing on line
ph = phantom.create();
and resume debugger and then pause it's stuck on async_hooks.js - line const fn = function(asyncId) giving an enless loop until I get a timeout.
function emitHookFactory(symbol, name) {
// Called from native. The asyncId stack handling is taken care of there
// before this is called.
// eslint-disable-next-line func-style
const fn = function(asyncId) {
active_hooks.call_depth += 1;
// Use a single try/catch for all hook to avoid setting up one per
// iteration.
try {
for (var i = 0; i < active_hooks.array.length; i++) {
if (typeof active_hooks.array[i][symbol] === 'function') {
active_hooks.array[i][symbol](asyncId);
}
}
I also tried changing the launch.json and adding
"skipFiles": [
"inspector_async_hook.js",
"async_hooks.js",
"node_internals/**/*.js"
],
Still stucks on async_hooks.js though.
Any ideas what might be the reason?
Should I change to a Windows machine?
I'm installing a VM in the meantime.

origin youtube api Failed to execute 'postMessage' on 'DOMWindow':

I'm getting this error when I use the Youtube API to embed a video on Chrome:
"origin youtube api Failed to execute 'postMessage' on 'DOMWindow': The target origin provided ('https://www.youtube.com') does not match the recipient window's origin ('XXXXXXX')."
It works fine, but I get that error.
If I'm doing a simple Youtube iframe embed, I typically solve this problem by adding an origin setting to the URL. However, I don't seem to be able to do this with the API. I added an origin to the playerVars but it doesn't resolve the error.
<script async src="https://youtube.com/iframe_api"></script>
<script>
function onYouTubeIframeAPIReady() {
var player1;
player1 = new YT.Player('YouTubePlayer', {
videoId: 'XXXXXXXXXXXX',
width: 1920,
height: 1080,
playerVars: {
autoplay: 1,
start: 40,
end:640,
controls: 0,
showinfo: 0,
enablejsapi: 1,
origin: 'https://XXXXXXXXX.com',
modestbranding: 1,
loop: 1,
fs: 0,
cc_load_policy: 0,
iv_load_policy: 3,
autohide: 0
},
events: {
onReady: function(e) {
e.target.mute();
player1.addEventListener('onStateChange', function(e) {
var id = 'XXXXXXXXX';
if(e.data === YT.PlayerState.ENDED){
player1.loadVideoById({'videoId': id,
'startSeconds': 40,
'endSeconds': 640,
'suggestedQuality': 'large'});
};
});
}
}
});
Where should I add an origin? Or is there another way to address this problem? Thanks!