How to share screen using webRTC - webrtc

I need to get the screen sharing working. It is working if it is video sharing. This is the code:
public n = navigator as any;
ngAfterViewInit(): void {
const video = this.myVideo.nativeElement;
let peerx: any;
this.n.getUserMedia =
this.n.getUserMedia ||
this.n.webkitGetUserMedia ||
this.n.mozGetUserMedia ||
this.n.msGetUserMedia;
}
this.n.getUserMedia( // this.n.mediaDevices.getDisplayMedia
{
video: {
madatory: {
chromeMediaSource: 'screen',
maxWidth: 1920,
maxHeight: 1080,
minAspectRatio: 1.77
},
}
},
(stream: any) => {
peerx = new SimplePeer({
initiator: location.hash === '#init',
trickle: false,
stream,
});
peerx.on('signal', (data) => {
const strData = JSON.stringify(data);
console.log(strData);
this.targetpeer = data;
});
peerx.on('stream', (streamX: any) => {
if ('srcObject' in video) {
video.srcObject = streamX;
} else {
video.src = window.URL.createObjectURL(streamX);
}
const playPromise = video.play();
if (playPromise !== undefined) {
playPromise
.then((_) => {
video.play();
})
.catch((error) => {
console.log(`Playing was prevented: ${error}`);
});
}
});
If I change the line 'this.n.getUserMedia(....)' to this 'this.n.mediaDevices.getDisplayMedia(...)', I do not get the 'signal' (the key that I need to paste on the client to connect).

You are attempting to mix a constraints style that was needed years ago when a Chrome extension was required with getDisplayMedia. That isn't going to work.
const stream = await navigator.mediaDevices.getDisplayMedia({video: true})
See here for a canonical sample.

Related

Getstream.io with React Native

I suffer from uploading image to getstream service. And it doesn't upload image.
Here is my code. I get the following error.
Here is my code.
const filter = { type: 'messaging', members: { $in: [userInfo.id] } };
const sort = [{ last_message_at: -1 }];
let channels;
await chatClient.queryChannels(filter, sort, {
watch: false,
state: true,
}).then((response) => {
channels = response;
})
const file = this.state.avatar;
let avatarURI;
channels[0].sendImage(file).then((response) => {
avatarURI = response.file;
console.log(avatarURI);
})

Can't get WebRTC to work in different networks

I am trying to transfer a video streaming from one browser to another with WebRTC and socket.io. It works just fine in the same network. No image is getting through across different ones.
I use socket-io as a signal server. I register two browsers in a "room" and then start sending signals.
The code which is executed in the browser from which the streaming is sent:
function joinRoom(room) {
if (room === '') {
alert('Please type a room ID')
} else {
data = { room: room};
socket.emit('join', data);
}
}
// SOCKET EVENT CALLBACKS =====================================================
socket.on('room_created', async () => {
console.log('Socket event callback: room_created')
await setLocalStream(mediaConstraints)
socket.emit('startc', {room: roomId, clientip: clientip})
isRoomCreator = true
})
socket.on('full_room', () => {
console.log('Socket event callback: full_room')
alert('The room is full, please try another one')
})
socket.on('startc', async () => {
console.log('Socket event callback: start_call')
if (isRoomCreator) {
rtcPeerConnection = new RTCPeerConnection(iceServers)
addLocalTracks(rtcPeerConnection)
rtcPeerConnection.ontrack = setRemoteStream
rtcPeerConnection.onicecandidate = sendIceCandidate
await createOffer(rtcPeerConnection)
}
})
socket.on('offer', async (event) => {
console.log('Socket event callback: offer')
if (!isRoomCreator) {
rtcPeerConnection = new RTCPeerConnection(iceServers)
addLocalTracks(rtcPeerConnection)
rtcPeerConnection.ontrack = setRemoteStream
rtcPeerConnection.onicecandidate = sendIceCandidate
rtcPeerConnection.setRemoteDescription(new RTCSessionDescription(event))
await createAnswer(rtcPeerConnection)
}
})
socket.on('answer', (event) => {
console.log('answer');
console.log('Socket event callback: webrtc_answer')
rtcPeerConnection.setRemoteDescription(new RTCSessionDescription(event))
})
socket.on('webrtc_ice_candidate', (event) => {
console.log('Socket event callback: webrtc_ice_candidate')
// ICE candidate configuration.
var candidate = new RTCIceCandidate({
sdpMLineIndex: event.label,
candidate: event.candidate,
})
rtcPeerConnection.addIceCandidate(candidate)
})
The code that accepts the streamed media (vuejs):
socket.on("offer", (data) => {
this.$emit("closeWaitingToConnect");
this.createAnswer(data);
});
joinMeToRoom() {
console.log("joinToRoom: ", this.room);
this.$socket.emit("join", this.room);
}, //joinMeToRoom ()
createAnswer: function(event) {
var roomId = this.room.room;
let sessionDescription
this.peer.ontrack = this.setRemoteStream
this.peer.onicecandidate = this.sendIceCandidate
this.peer.setRemoteDescription(new RTCSessionDescription(event))
try {
sessionDescription = this.peer.createAnswer().then((answer) => {
var anwer =
console.log('sessionDescription');
console.log(answer);
this.$socket.emit('answer', {
type: 'webrtc_answer',
sdp: answer,
sessionDescription: JSON.stringify(answer),
roomId,
})
return this.peer.setLocalDescription(answer)
});
} catch (error) {
console.error('cae: '+error)
}
},
getScreenPosition() {
const right = this.$refs.screen.getBoundingClientRect().right;
const bottom = this.$refs.screen.getBoundingClientRect().bottom;
return { bottom: bottom, right: right };
},
setRemoteStream(event) {
console.log('event setRemoteStream');
console.log(event);
var stream_screen = document.querySelector("video");
stream_screen.srcObject = event.streams[0];
stream_screen.play();
var remoteStream = event.stream
},
I have setup my own TURN server and tried paid versions. Still can't get the stream across different networks.
What am I missing?

react native image uri and async storage

i'm trying to save a profile picture and i save it with asyncStorage. store part working perfectly but if i close app and reopen it doesn`t show image.
i logging image uri and there is uri but cant solve the problem.
here is my code
this is image pick part
const ImagePick = async () => {
const options = {
title: 'Seçim yapınız.',
cancelButtonTitle: 'İptal',
takePhotoButtonTitle: 'Bir fotoğraf çek',
chooseFromLibraryButtonTitle: 'Galeriden seç',
chooseWhichLibraryTitle: 'Galeriden seç',
mediaType: 'photo',
storageOptions: {skipBackup: true, path: 'images'},
};
let isCameraPermitted = await requestCameraPermission();
let isStoragePermitted = await requestExternalWritePermission();
if (isCameraPermitted && isStoragePermitted) {
ImagePicker.showImagePicker(options, async response => {
//console.log('response', response);
if (response.didCancel) {
console.log('Kullanıcı fotoğraf seçimini iptal etti');
} else if (response.customButton) {
console.log('Özel butona tıklandı.');
} else if (response.error) {
console.log('error', 'Bir hata oluştu.');
} else {
console.log(response.fileName);
let uri = response.uri;
let path = response.uri;
if (Platform.OS === 'ios') {
path = '~' + path.substring(path.indexOf('/Documents'));
}
if (!response.fileName) {
response.fileName = path.split('/').pop();
}
let name = response.fileName;
let type = `image/${name.split('.').slice(-1)[0]}`;
console.log('uri', uri);
console.log('name', name);
console.log('type', type);
setImageUri(response.uri);
try {
await AsyncStorage.setItem('profilePicture', response.uri);
console.log('async storage kayıt başarılı');
} catch (error) {
console.log(error);
}
}
});
}
};
i get image like this
useEffect(() => {
getProfilePicture();
}, []);
const getProfilePicture = async () => {
const profilePicture = await AsyncStorage.getItem('profilePicture');
console.log('profilePicture', profilePicture);
if (profilePicture !== null) {
setImageUri(profilePicture);
setIsLoading(false);
} else {
setImageUri(
'https://t3.ftcdn.net/jpg/03/46/83/96/360_F_346839683_6nAPzbhpSkIpb8pmAwufkC7c5eD7wYws.jpg',
);
setIsLoading(false);
}
};
emulator is the problem. in device it's working

In react-native-share on ios we need to know why share options pop up screens getting dismissed immediately?

composeEmail = async () => {
const file = await this.downloadCsv();
const res = await RNFetchBlob.fs.exists(file.path);
if (res) {
const data = await RNFetchBlob.fs.readFile(file.path, 'base64');
console.log("encoded data", data)
if (Platform.OS === "ios") {
const shareOptions = {
social: Share.Social.EMAIL,
subject: '',
filename: file.filename.split(".")[0],
saveToFiles: file.path,
url: file.path,
type: 'csv',
failOnCancel: true,
}
try {
const ShareResponse = await Share.shareSingle(shareOptions);
} catch (error) {
console.log('Error:', error)
}
}
In that when we click on share button pop up the share screen but dismissed immediately.for that we also use Share.open(ShareOptions) but same result found.

How to pass audio stream recorded with WebRTC to Google Speech api for realtime transcription?

What I'm trying to do is get real time transcription for video recorded in the browser with webRTC. Use case is basically subtitles in real time like google hangouts has.
So I have a WebRTC program running in the browser. It sends webm objects back to the server. They are linear32 audio encodings. Google speech to text only accepts linear16 or Flac files.
Is there a way to convert linear32 to linear16 in real time?
Otherwise has anyone been able to hook up webRTC with Google speech to get real time transcriptions working?
Any advice on where to look to solve this problem would be great
Check out this repository it might help you - https://github.com/muaz-khan/Translator
Translator.js is a JavaScript library built top on Google Speech-Recognition & Translation API to transcript and translate voice and text. It supports many locales and brings globalization in WebRTC!
I had the same problem and failed with webRTC. I recommend you use the Web Audio Api instead if you are just interested in transcribing the audio from the video.
Here is how I did it with a nodejs sever and react client app. It is uploaded to github here
You need an audio worklet script. (Put it in the public folder because that is where the API expects to find it)
recorderWorkletProcessor.js (saved in public/src/worklets/recorderWorkletProcessor.js)
/**
An in-place replacement for ScriptProcessorNode using AudioWorklet
*/
class RecorderProcessor extends AudioWorkletProcessor {
// 0. Determine the buffer size (this is the same as the 1st argument of ScriptProcessor)
bufferSize = 2048;
// 1. Track the current buffer fill level
_bytesWritten = 0;
// 2. Create a buffer of fixed size
_buffer = new Float32Array(this.bufferSize);
constructor() {
super();
this.initBuffer();
}
initBuffer() {
this._bytesWritten = 0;
}
isBufferEmpty() {
return this._bytesWritten === 0;
}
isBufferFull() {
return this._bytesWritten === this.bufferSize;
}
/**
* #param {Float32Array[][]} inputs
* #returns {boolean}
*/
process(inputs) {
// Grabbing the 1st channel similar to ScriptProcessorNode
this.append(inputs[0][0]);
return true;
}
/**
*
* #param {Float32Array} channelData
*/
append(channelData) {
if (this.isBufferFull()) {
this.flush();
}
if (!channelData) return;
for (let i = 0; i < channelData.length; i++) {
this._buffer[this._bytesWritten++] = channelData[i];
}
}
flush() {
// trim the buffer if ended prematurely
const buffer = this._bytesWritten < this.bufferSize ? this._buffer.slice(0, this._bytesWritten) : this._buffer;
const result = this.downsampleBuffer(buffer, 44100, 16000);
this.port.postMessage(result);
this.initBuffer();
}
downsampleBuffer(buffer, sampleRate, outSampleRate) {
if (outSampleRate == sampleRate) {
return buffer;
}
if (outSampleRate > sampleRate) {
throw new Error("downsampling rate show be smaller than original sample rate");
}
var sampleRateRatio = sampleRate / outSampleRate;
var newLength = Math.round(buffer.length / sampleRateRatio);
var result = new Int16Array(newLength);
var offsetResult = 0;
var offsetBuffer = 0;
while (offsetResult < result.length) {
var nextOffsetBuffer = Math.round((offsetResult + 1) * sampleRateRatio);
var accum = 0,
count = 0;
for (var i = offsetBuffer; i < nextOffsetBuffer && i < buffer.length; i++) {
accum += buffer[i];
count++;
}
result[offsetResult] = Math.min(1, accum / count) * 0x7fff;
offsetResult++;
offsetBuffer = nextOffsetBuffer;
}
return result.buffer;
}
}
registerProcessor("recorder.worklet", RecorderProcessor);
Install Socket.io-client on front end
npm i socket.io-client
React component code
/* eslint-disable react-hooks/exhaustive-deps */
import { default as React, useEffect, useState, useRef } from "react";
import { Button } from "react-bootstrap";
import Container from "react-bootstrap/Container";
import * as io from "socket.io-client";
const sampleRate = 16000;
const getMediaStream = () =>
navigator.mediaDevices.getUserMedia({
audio: {
deviceId: "default",
sampleRate: sampleRate,
sampleSize: 16,
channelCount: 1,
},
video: false,
});
interface WordRecognized {
final: boolean;
text: string;
}
const AudioToText: React.FC = () => {
const [connection, setConnection] = useState<io.Socket>();
const [currentRecognition, setCurrentRecognition] = useState<string>();
const [recognitionHistory, setRecognitionHistory] = useState<string[]>([]);
const [isRecording, setIsRecording] = useState<boolean>(false);
const [recorder, setRecorder] = useState<any>();
const processorRef = useRef<any>();
const audioContextRef = useRef<any>();
const audioInputRef = useRef<any>();
const speechRecognized = (data: WordRecognized) => {
if (data.final) {
setCurrentRecognition("...");
setRecognitionHistory((old) => [data.text, ...old]);
} else setCurrentRecognition(data.text + "...");
};
const connect = () => {
connection?.disconnect();
const socket = io.connect("http://localhost:8081");
socket.on("connect", () => {
console.log("connected", socket.id);
setConnection(socket);
});
socket.emit("send_message", "hello world");
socket.emit("startGoogleCloudStream");
socket.on("receive_message", (data) => {
console.log("received message", data);
});
socket.on("receive_audio_text", (data) => {
speechRecognized(data);
console.log("received audio text", data);
});
socket.on("disconnect", () => {
console.log("disconnected", socket.id);
});
};
const disconnect = () => {
if (!connection) return;
connection?.emit("endGoogleCloudStream");
connection?.disconnect();
processorRef.current?.disconnect();
audioInputRef.current?.disconnect();
audioContextRef.current?.close();
setConnection(undefined);
setRecorder(undefined);
setIsRecording(false);
};
useEffect(() => {
(async () => {
if (connection) {
if (isRecording) {
return;
}
const stream = await getMediaStream();
audioContextRef.current = new window.AudioContext();
await audioContextRef.current.audioWorklet.addModule(
"/src/worklets/recorderWorkletProcessor.js"
);
audioContextRef.current.resume();
audioInputRef.current =
audioContextRef.current.createMediaStreamSource(stream);
processorRef.current = new AudioWorkletNode(
audioContextRef.current,
"recorder.worklet"
);
processorRef.current.connect(audioContextRef.current.destination);
audioContextRef.current.resume();
audioInputRef.current.connect(processorRef.current);
processorRef.current.port.onmessage = (event: any) => {
const audioData = event.data;
connection.emit("send_audio_data", { audio: audioData });
};
setIsRecording(true);
} else {
console.error("No connection");
}
})();
return () => {
if (isRecording) {
processorRef.current?.disconnect();
audioInputRef.current?.disconnect();
if (audioContextRef.current?.state !== "closed") {
audioContextRef.current?.close();
}
}
};
}, [connection, isRecording, recorder]);
return (
<React.Fragment>
<Container className="py-5 text-center">
<Container fluid className="py-5 bg-primary text-light text-center ">
<Container>
<Button
className={isRecording ? "btn-danger" : "btn-outline-light"}
onClick={connect}
disabled={isRecording}
>
Start
</Button>
<Button
className="btn-outline-light"
onClick={disconnect}
disabled={!isRecording}
>
Stop
</Button>
</Container>
</Container>
<Container className="py-5 text-center">
{recognitionHistory.map((tx, idx) => (
<p key={idx}>{tx}</p>
))}
<p>{currentRecognition}</p>
</Container>
</Container>
</React.Fragment>
);
};
export default AudioToText;
server.js
const express = require("express");
const speech = require("#google-cloud/speech");
//use logger
const logger = require("morgan");
//use body parser
const bodyParser = require("body-parser");
//use corrs
const cors = require("cors");
const http = require("http");
const { Server } = require("socket.io");
const app = express();
app.use(cors());
app.use(logger("dev"));
app.use(bodyParser.json());
const server = http.createServer(app);
const io = new Server(server, {
cors: {
origin: "http://localhost:3000",
methods: ["GET", "POST"],
},
});
//TODO: run in terminal first to setup credentials export GOOGLE_APPLICATION_CREDENTIALS="./speech-to-text-key.json"
const speechClient = new speech.SpeechClient();
io.on("connection", (socket) => {
let recognizeStream = null;
console.log("** a user connected - " + socket.id + " **\n");
socket.on("disconnect", () => {
console.log("** user disconnected ** \n");
});
socket.on("send_message", (message) => {
console.log("message: " + message);
setTimeout(() => {
io.emit("receive_message", "got this message" + message);
}, 1000);
});
socket.on("startGoogleCloudStream", function (data) {
startRecognitionStream(this, data);
});
socket.on("endGoogleCloudStream", function () {
console.log("** ending google cloud stream **\n");
stopRecognitionStream();
});
socket.on("send_audio_data", async (audioData) => {
io.emit("receive_message", "Got audio data");
if (recognizeStream !== null) {
try {
recognizeStream.write(audioData.audio);
} catch (err) {
console.log("Error calling google api " + err);
}
} else {
console.log("RecognizeStream is null");
}
});
function startRecognitionStream(client) {
console.log("* StartRecognitionStream\n");
try {
recognizeStream = speechClient
.streamingRecognize(request)
.on("error", console.error)
.on("data", (data) => {
const result = data.results[0];
const isFinal = result.isFinal;
const transcription = data.results
.map((result) => result.alternatives[0].transcript)
.join("\n");
console.log(`Transcription: `, transcription);
client.emit("receive_audio_text", {
text: transcription,
final: isFinal,
});
});
} catch (err) {
console.error("Error streaming google api " + err);
}
}
function stopRecognitionStream() {
if (recognizeStream) {
console.log("* StopRecognitionStream \n");
recognizeStream.end();
}
recognizeStream = null;
}
});
server.listen(8081, () => {
console.log("WebSocket server listening on port 8081.");
});
// =========================== GOOGLE CLOUD SETTINGS ================================ //
// The encoding of the audio file, e.g. 'LINEAR16'
// The sample rate of the audio file in hertz, e.g. 16000
// The BCP-47 language code to use, e.g. 'en-US'
const encoding = "LINEAR16";
const sampleRateHertz = 16000;
const languageCode = "en-US"; //en-US
const alternativeLanguageCodes = ["en-US", "ko-KR"];
const request = {
config: {
encoding: encoding,
sampleRateHertz: sampleRateHertz,
languageCode: languageCode,
//alternativeLanguageCodes: alternativeLanguageCodes,
enableWordTimeOffsets: true,
enableAutomaticPunctuation: true,
enableWordConfidence: true,
enableSpeakerDiarization: true,
diarizationSpeakerCount: 2,
model: "video",
//model: "command_and_search",
useEnhanced: true,
speechContexts: [
{
phrases: ["hello", "안녕하세요"],
},
],
},
interimResults: true,
};