WebRTC in react-native (hooks), redux - Unhandled Promise Rejections - react-native

I'm developing a react-native application, which uses webRTC.
I extremely liked the minimal version I found here (kudos to baconcheese113!) and I decided to refactor it to create my react component.
I have set up a backend (DynamoDB, Appsync) and a redux store that allows me to:
dispatch an action sendCreateUserControlMsg, which down the line calls the Appsync endpoint to create a new ControlUserMsg
subscribe to a ControlUserMsg, set the flag triggerWebrtcData and save webrtcData in the Redux state
The following component (which for now calls itself), sometimes works, but mostly doesn't. I feel that the problem is related to JS Promises, but I do not fully understand how I should design the component to avoid race conditions.
import React, { useState, useEffect } from 'react';
import { View, SafeAreaView, Button, StyleSheet } from 'react-native';
import { RTCPeerConnection, RTCView, mediaDevices } from 'react-native-webrtc';
import { sendCreateUserControlMsg } from '../redux/actions/UserControlMsgActions';
import controlMsgActions from './../model/control_msg_actions';
import webrtcActionTypes from './../model/webrtc_action_types';
import { useDispatch, useSelector } from "react-redux";
import * as triggersMatch from '../redux/actions/TriggersMatchActions';
var IS_LOCAL_USER = true //manual flag I temporarily set
var localUserID = '00';
var localUser = 'localUser'
var remoteUserID = '01';
var remoteUser = 'remoteUser'
if (IS_LOCAL_USER) {
var matchedUserId = remoteUserID
var user_id = localUserID;
var user = localUser
}
else {
var matchedUserId = localUserID
var user_id = remoteUserID;
var user = remoteUser
}
export default function App() {
const dispatch = useDispatch();
var triggersMatchBool = useSelector(state => state.triggers_match)
var webrtcData = useSelector(state => state.webrtc_description.webrtcData)
const [localStream, setLocalStream] = useState();
const [remoteStream, setRemoteStream] = useState();
const [cachedLocalPC, setCachedLocalPC] = useState();
const [cachedRemotePC, setCachedRemotePC] = useState();
const sendICE = (candidate, isLocal) => {
var type
isLocal ? type = webrtcActionTypes["NEW_ICE_CANDIDATE_FROM_LOCAL"] : type = webrtcActionTypes["NEW_ICE_CANDIDATE_FROM_REMOTE"]
var payload = JSON.stringify({
type,
candidate
})
console.log(`Sending ICE to ${matchedUserId}`)
dispatch(sendCreateUserControlMsg(matchedUserId, user_id, user, payload, controlMsgActions["WEBRTC_DATA"]));
}
const sendOffer = (offer) => {
type = webrtcActionTypes["OFFER"]
var payload = JSON.stringify({
type,
offer
})
console.log(`Sending Offer to ${matchedUserId}`)
dispatch(sendCreateUserControlMsg(matchedUserId, user_id, user, payload, controlMsgActions["WEBRTC_DATA"]));
}
const sendAnswer = (answer) => {
type = webrtcActionTypes["ANSWER"]
var payload = JSON.stringify({
type,
answer
})
console.log(`Sending answer to ${matchedUserId}`)
dispatch(sendCreateUserControlMsg(matchedUserId, user_id, user, payload, controlMsgActions["WEBRTC_DATA"]));
}
const [isMuted, setIsMuted] = useState(false);
// START triggers
async function triggerMatchWatcher() {
if (triggersMatchBool.triggerWebrtcData) {
dispatch(triggersMatch.endTriggerWebrtcData());
switch (webrtcData.type) {
case webrtcActionTypes["NEW_ICE_CANDIDATE_FROM_LOCAL"]:
try {
setCachedRemotePC(cachedRemotePC.addIceCandidate(webrtcData.candidate))
} catch (error) {
console.warn('ICE not added')
}
break;
case webrtcActionTypes["NEW_ICE_CANDIDATE_FROM_REMOTE"]:
try {
setCachedLocalPC(cachedLocalPC.addIceCandidate(webrtcData.candidate))
} catch (error) {
console.warn('ICE not added')
}
break;
case webrtcActionTypes["OFFER"]:
console.log('remotePC, setRemoteDescription');
try {
await cachedRemotePC.setRemoteDescription(webrtcData.offer);
console.log('RemotePC, createAnswer');
const answer = await cachedRemotePC.createAnswer();
setCachedRemotePC(cachedRemotePC)
sendAnswer(answer);
} catch (error) {
console.warn(`setRemoteDescription failed ${error}`);
}
case webrtcActionTypes["ANSWER"]:
try {
console.log(`Answer from remotePC: ${webrtcData.answer.sdp}`);
console.log('remotePC, setLocalDescription');
await cachedRemotePC.setLocalDescription(webrtcData.answer);
setCachedRemotePC(cachedRemotePC)
console.log('localPC, setRemoteDescription');
await cachedLocalPC.setRemoteDescription(cachedRemotePC.localDescription);
setCachedLocalPC(cachedLocalPC)
} catch (error) {
console.warn(`setLocalDescription failed ${error}`);
}
}
}
}
useEffect(() => {
triggerMatchWatcher()
}
);
const startLocalStream = async () => {
// isFront will determine if the initial camera should face user or environment
const isFront = true;
const devices = await mediaDevices.enumerateDevices();
const facing = isFront ? 'front' : 'environment';
const videoSourceId = devices.find(device => device.kind === 'videoinput' && device.facing === facing);
const facingMode = isFront ? 'user' : 'environment';
const constraints = {
audio: true,
video: {
mandatory: {
minWidth: 500, // Provide your own width, height and frame rate here
minHeight: 300,
minFrameRate: 30,
},
facingMode,
optional: videoSourceId ? [{ sourceId: videoSourceId }] : [],
},
};
const newStream = await mediaDevices.getUserMedia(constraints);
setLocalStream(newStream);
};
const startCall = async () => {
const configuration = { iceServers: [{ url: 'stun:stun.l.google.com:19302' }] };
const localPC = new RTCPeerConnection(configuration);
const remotePC = new RTCPeerConnection(configuration);
localPC.onicecandidate = e => {
try {
console.log('localPC icecandidate:', e.candidate);
if (e.candidate) {
sendICE(e.candidate, true)
}
} catch (err) {
console.error(`Error adding remotePC iceCandidate: ${err}`);
}
};
remotePC.onicecandidate = e => {
try {
console.log('remotePC icecandidate:', e.candidate);
if (e.candidate) {
sendICE(e.candidate, false)
}
} catch (err) {
console.error(`Error adding localPC iceCandidate: ${err}`);
}
};
remotePC.onaddstream = e => {
console.log('remotePC tracking with ', e);
if (e.stream && remoteStream !== e.stream) {
console.log('RemotePC received the stream', e.stream);
setRemoteStream(e.stream);
}
};
localPC.addStream(localStream);
// Not sure whether onnegotiationneeded is needed
// localPC.onnegotiationneeded = async () => {
// try {
// const offer = await localPC.createOffer();
// console.log('Offer from localPC, setLocalDescription');
// await localPC.setLocalDescription(offer);
// sendOffer(localPC.localDescription)
// } catch (err) {
// console.error(err);
// }
// };
try {
const offer = await localPC.createOffer();
console.log('Offer from localPC, setLocalDescription');
await localPC.setLocalDescription(offer);
sendOffer(localPC.localDescription)
} catch (err) {
console.error(err);
}
setCachedLocalPC(localPC);
setCachedRemotePC(remotePC);
};
const switchCamera = () => {
localStream.getVideoTracks().forEach(track => track._switchCamera());
};
const closeStreams = () => {
if (cachedLocalPC) {
cachedLocalPC.removeStream(localStream);
cachedLocalPC.close();
})
}
if (cachedRemotePC) {
cachedRemotePC.removeStream(localStream);
cachedRemotePC.close();
})
}
setLocalStream();
setRemoteStream();
setCachedRemotePC();
setCachedLocalPC();
};
return (
<SafeAreaView style={styles.container}>
{!localStream && <Button title="Click to start stream" onPress={startLocalStream} />}
{localStream && <Button title="Click to start call" onPress={startCall} disabled={!!remoteStream} />}
{localStream && (
<View style={styles.toggleButtons}>
<Button title="Switch camera" onPress={switchCamera} />
</View>
)}
<View style={styles.rtcview}>
{localStream && <RTCView style={styles.rtc} streamURL={localStream.toURL()} />}
</View>
<View style={styles.rtcview}>
{remoteStream && <RTCView style={styles.rtc} streamURL={remoteStream.toURL()} />}
</View>
<Button title="Click to stop call" onPress={closeStreams} disabled={!remoteStream} />
</SafeAreaView>
);
}
const styles = StyleSheet.create({
// omitted
});
The most common errors I receive are:
Error: Failed to add ICE candidate
Possible Unhandled Promise Rejection
and
setLocalDescription failed TypeError: Cannot read property 'sdp' of
undefined
If I console.log I can see that are JS Promise, but since are not a functions I cannot use .then().
How can I call the addIceCandidate method or setLocalDescription method without incurring in the Unhandled Promise Rejection errors?
What are the best practices to work with WebRTC in react-native?

Related

Multiple useEffect in react-native to achieve mentioned functionality

I need help with the async nature of Async storage and axios api. Here's the functionality that I am trying to achieve ->
send request to two separate api to get some data.
display that data on the screen with some additional text
api request are authenticated so a token is passed as Authentication Header
I have attached the current implementation, I am having the a number of errors in this
Errors:
Login_token not set in state after fetching from Async Storage.
Data not set in state after api call
both resulting in either failed api calls or undefined state errors on render
This is my code.
import React, { FunctionComponent, useEffect, useCallback, useState} from 'react';
import { StyleSheet, View} from 'react-native';
// chat
import { GiftedChat } from 'react-native-gifted-chat';
// navigation
import { RootStackParamList } from '../../navigators/RootStack';
import { StackScreenProps } from '#react-navigation/stack';
export type Props = StackScreenProps<RootStackParamList, "Chat">;
// api
import { Convo_details, Send_Msg, List_Msg, Expert_Public_Profile } from '../../api/UserApi';
import Spinner from 'react-native-loading-spinner-overlay';
import AsyncStorage from '#react-native-async-storage/async-storage';
import uuid from 'react-native-uuid';
const Chat: FunctionComponent<Props> = ({ navigation, route, ...props }) => {
// console.log(props.route.params);
const [login_token, setlogin_token] = useState('')
const [conversation_id, setconversation_id] = useState('')
const [conversation_details, setconversation_details] = useState({})
const [currentuser, setcurrentuser] = useState({})
const [loading, setLoading] = useState(false);
const [expertuid, setexpertuid] = useState('')
const [ExpertProfile, setExpertProfile] = useState({})
const [messages, setMessages] = useState([]);
useEffect(() => {
getlogintoken()
console.log("####################################","getlogintoken");
}, [])
/* conversationid */
useEffect(() => {
if (route.params != null) {
setconversation_id(route.params[0])
}
console.log("####################################","conversation id");
}, [])
/* expert uid */
useEffect(() => {
if (route.params != null) {
setexpertuid(route.params[1])
}
console.log("####################################","expert uid");
}, [])
/* expert public profile */
useEffect(() => {
getexpertpublicprofile()
getConvo_details()
console.log("####################################","convo_details");
}, [])
useEffect(() => {
// get current user
AsyncStorage.getItem("currentuser").then(res => {
if (res != null) setcurrentuser(res)
else alert("Current user not found")
})
console.log("####################################","current user");
}, [])
// set welcome msg
useEffect(() => {
if (Object.keys(conversation_details).length != 0 && Object.keys(ExpertProfile).length != 0)
setwelcomemsg()
}, [])
const onSend = useCallback(async (messages = []) => {
// console.log(messages[0].text);
setMessages(previousMessages => GiftedChat.append(previousMessages, messages))
const data = {
conversation_id: "f98d6851-a713-4f58-9118-77a779ff175f",//conversation_id,
message_type: "TEXT",
body: messages[0].text
}
const res: any = await Send_Msg(data, login_token)
.catch(error => {
alert(`Send_Msg -> ${error}`)
console.log(error);
return
})
if (res.status == 200) {
console.log(res.data);
} else console.log(res);
}, [])
const getexpertpublicprofile = async () => {
setLoading(true)
const res: any = await Expert_Public_Profile(expertuid, login_token)
.catch(error => {
setLoading(false)
console.log("Expert public profile ->");
alert(`Expert public profile ->${error.message}`)
console.log(error);
return
})
setLoading(false)
if (res.status === 200) setExpertProfile(res.data)
else {
alert(`get expert public profile${res.data.message}`)
console.log("getexpertpublicprofile -->");
console.log(res.data);
}
}
const getlogintoken = () => {
AsyncStorage.getItem("login_token").then(res => {
if (res != null) {
setLoading(false)
setlogin_token(res)
}
else alert("No login token found")
})
}
const getConvo_details = async () => {
setLoading(true)
const res: any = await Convo_details(conversation_id, login_token)
.catch(error => {
setLoading(false)
alert(`Convo_details-->${error.message}`)
console.log("Convo_details -->");
console.log(error);
return
})
setLoading(false)
if (res.status === 200) setconversation_details(res.data)
else {
alert(`get convo details-> ${res.data.message}`)
console.log("getConvo_details -->");
console.log(res.data);
}
}
const setwelcomemsg = () => {
try {
let user = JSON.parse(currentuser)
let messages = [
{
_id: uuid.v4().toString(),
conversation_id: conversation_details.conversation_id,
created_at: new Date(),
from: conversation_details.recipient.user_uid,
type: "TEXT",
text: `About Me - ${ExpertProfile.bio}`,
user: {
_id: conversation_details.recipient.user_uid,
}
},
{
_id: uuid.v4().toString(),
conversation_id: conversation_details.conversation_id,
created_at: new Date(),
from: conversation_details.recipient.user_uid,
type: "TEXT",
text: `My name is ${conversation_details.recipient.name}`,
user: {
_id: conversation_details.recipient.user_uid,
}
},
{
_id: uuid.v4().toString(),
conversation_id: conversation_details.conversation_id,
created_at: new Date(),
from: conversation_details.recipient.user_uid,
type: "TEXT",
text: `Hi ${user.full_name}`,
user: {
_id: conversation_details.recipient.user_uid,
}
}]
setMessages(previousMessages => GiftedChat.append(previousMessages, messages))
} catch (error) {
console.log("try -> set welcome msg");
console.log(error);
return
}
}
return (
<View style={styles.maincontainer}>
<Spinner
visible={loading}
textContent={'Loading...'}
textStyle={{ color: '#FFF' }}
/>
<GiftedChat
messages={messages}
onSend={messages => onSend(messages)}
user={{
_id: currentuser.user_uid,
}}
isTyping={false}
scrollToBottom={true}
showAvatarForEveryMessage={true}
renderAvatar={() => null}
/>
</View>
);
}
export default Chat;
const styles = StyleSheet.create({
maincontainer: {
flex: 1,
},
});
When axios returns, it usually give the response as res.data, so in your case, try either res.data or res.data.yourToken (I'm not sure how it's your object).
Gurav,
As far as your code above, The api call's will trigger even before you get currentuser or loginToken. You have to handle the api call after getting the currentuser and loginToken. This can be gracefully handled with async, await.
example code:
useEffect(() => {
getData()
}, [])
useEffect(() => {
if(login_token && currentuser) {
//The api call goes here after you get the logintoken andcurrentuser.
// The above condition is just an example but will vary based on your requirements
}
}, [login_token, currentuser])
const getData = async () => {
await getlogintoken()
await getcurrentuser()
}
const getlogintoken = async () => {
await AsyncStorage.getItem("login_token").then(res => {
if (res != null) {
setLoading(false)
setlogin_token(res)
}
else alert("No login token found")
})
}
const getcurrentuser = async () => {
await AsyncStorage.getItem("currentuser").then(res => {
if (res != null) setcurrentuser(res)
else alert("Current user not found")
})
}

How to implement Agora SDK videocall to react-native project

I successfully implemented the agora SDK videocall module with virtual background to my react.js web app, but when I try to implement it to the react-native mobile version I keep getting erros I don't know how to solve. I'm a bit new to react-native so this migth be an easy fix but I can't find it.
Basically, after submitting a form with the uid, channel, role, and token (I have a token service) the videocall component is rendered.
These are my dependencies
"agora-access-token": "^2.0.4",
"agora-react-native-rtm": "^1.5.0",
"agora-extension-virtual-background": "^1.1.1",
"agora-rtc-sdk-ng": "^4.14.0",
"agora-rn-uikit": "^4.0.0",
"axios": "^0.27.2",
"react": "18.0.0",
"react-native": "0.69.4",
"react-native-agora": "^3.7.1",
"react-native-dotenv": "^3.3.1"
This is the main videocall component.
import React,{ useEffect } from "react";
import { useState } from "react";
import axios from "axios";
import { Call } from "./components/Call";
const VideoCallApp = ({ videoCallData }) => {
const [token, setToken] = useState("");
const [virtualBackgroundData, setVirtualBackgroundData] = useState({
type: "img",
// example
// type: 'img',
// value: ''
//
// type: 'blur',
// value: integer // blurring degree, low (1), medium (2), or high (3).
//
// type: 'color',
// value: string // color on hex or string
});
useEffect(() => {
const getToken = async () => {
const url = `${process.env.REACT_APP_AGORA_TOKEN_SERVICE}/rtc/${videoCallData.channel}/${videoCallData.role}/uid/${videoCallData.uid}`;
try {
const response = await axios.get(url);
const token = response.data.rtcToken;
setToken(token);
} catch (err) {
alert(err);
}
};
getToken();
}, []);
return (
token && (
<Call
rtcProps={{
appId: process.env.REACT_APP_AGORA_APP_ID,
channel: videoCallData.channel,
token: token,
uid: videoCallData.uid,
}}
virtualBackground={virtualBackgroundData}
/>
)
);
};
export default VideoCallApp;
Which renders the Call component, it has more functionality for the virtual background.
import { useEffect, useState } from 'react'
import AgoraRTC from 'agora-rtc-sdk-ng';
import VirtualBackgroundExtension from 'agora-extension-virtual-background';
import { LocalVideo } from './LocalVideo';
import { RemoteVideo } from './RemoteVideo';
import { VideoControllers } from './VideoButtons';
import { View } from 'react-native';
const client = AgoraRTC.createClient({ mode: "rtc", codec: "vp8" });
const extension = new VirtualBackgroundExtension();
AgoraRTC.registerExtensions([extension]);
export const Call = ({ rtcProps = {}, virtualBackground = {} }) => {
const [localTracks, setLocalTracks] = useState({
audioTrack: null,
videoTrack: null
});
const [processor, setProcessor] = useState(null);
useEffect(() => {
if (processor) {
try {
const initProcessor = async () => {
// Initialize the extension and pass in the URL of the Wasm file
await processor.init(process.env.PUBLIC_URL + "/assets/wasms");
// Inject the extension into the video processing pipeline in the SDK
localTracks.videoTrack.pipe(processor).pipe(localTracks.videoTrack.processorDestination);
playVirtualBackground();
}
initProcessor()
} catch (e) {
console.log("Fail to load WASM resource!"); return null;
}
}
}, [processor]);
useEffect(() => {
if (localTracks.videoTrack && processor) {
setProcessor(null);
}
}, [localTracks]);
const playVirtualBackground = async () => {
try {
switch (virtualBackground.type) {
case 'color':
processor.setOptions({ type: 'color', color: virtualBackground.value });
break;
case 'blur':
processor.setOptions({ type: 'blur', blurDegree: Number(virtualBackground.value) });
break;
case 'img':
const imgElement = document.createElement('img');
imgElement.onload = async () => {
try {
processor.setOptions({ type: 'img', source: imgElement });
await processor.enable();
} catch (error) {
console.log(error)
}
}
imgElement.src = process.env.PUBLIC_URL + '/assets/backgrounds/background-7.jpg';
imgElement.crossOrigin = "anonymous";
break;
default:
break;
}
await processor.enable();
} catch (error) {
console.log(error)
}
}
const join = async () => {
await client.join(rtcProps.appId, rtcProps.channel, rtcProps.token, Number(rtcProps.uid));
}
const startVideo = () => {
AgoraRTC.createCameraVideoTrack()
.then(videoTrack => {
setLocalTracks(tracks => ({
...tracks,
videoTrack
}));
client.publish(videoTrack);
videoTrack.play('local');
})
}
const startAudio = () => {
AgoraRTC.createMicrophoneAudioTrack()
.then(audioTrack => {
setLocalTracks(tracks => ({
...tracks,
audioTrack
}));
client.publish(audioTrack);
});
}
const stopVideo = () => {
localTracks.videoTrack.close();
localTracks.videoTrack.stop();
client.unpublish(localTracks.videoTrack);
}
const stopAudio = () => {
localTracks.audioTrack.close();
localTracks.audioTrack.stop();
client.unpublish(localTracks.audioTrack);
}
const leaveVideoCall = () => {
stopVideo();
stopAudio();
client.leave();
}
async function startOneToOneVideoCall() {
join()
.then(() => {
startVideo();
startAudio();
client.on('user-published', async (user, mediaType) => {
if (client._users.length > 1) {
client.leave();
alert('Please Wait Room is Full');
return;
}
await client.subscribe(user, mediaType);
if (mediaType === 'video') {
const remoteVideoTrack = user.videoTrack;
remoteVideoTrack.play('remote');
}
if (mediaType === 'audio') {
user.audioTrack.play();
}
});
});
}
// Initialization
function setProcessorInstance() {
if (!processor && localTracks.videoTrack) {
// Create a VirtualBackgroundProcessor instance
setProcessor(extension.createProcessor());
}
}
async function setBackground() {
if (localTracks.videoTrack) {
setProcessorInstance()
}
}
useEffect(() => {
startOneToOneVideoCall();
}, []);
return (
<View >
<View>
<LocalVideo />
<RemoteVideo />
<VideoControllers
actions={{
startAudio,
stopAudio,
startVideo,
stopVideo,
leaveVideoCall,
startOneToOneVideoCall,
setBackground
}}
/>
</View>
</View>
)
}
The local an remote video component are emty Views where the videos are displayed and the VideoControllers are Buttons that manage the videocall.
When I run the app the form works fine but as soon as I subbmit it the app crashes with these errors.
WARN `new NativeEventEmitter()` was called with a non-null argument without the required `addListener` method.
WARN `new NativeEventEmitter()` was called with a non-null argument without the required `removeListeners` method.
LOG Running "videocall" with {"rootTag":1}
ERROR TypeError: window.addEventListener is not a function. (In 'window.addEventListener("online", function () {
_this32.networkState = EB.ONLINE;
})', 'window.addEventListener' is undefined)
VideoCallApp
Form#http://localhost:8081/index.bundle?platform=android&dev=true&minify=false&app=com.videocall&modulesOnly=false&runModule=true:121056:41
RCTView
View
RCTView
View
AppContainer#http://localhost:8081/index.bundle?platform=android&dev=true&minify=false&app=com.videocall&modulesOnly=false&runModule=true:78626:36
videocall(RootComponent)
ERROR TypeError: undefined is not an object (evaluating '_$$_REQUIRE(_dependencyMap[5], "./components/Call").Call')
Something is happening at the Call component and I think it migth be the DOM manipulation for the videos but I can't find an example of a react-native project with agora SDK.
I don't want to use the UIkit because, eventhough it works, I can't use the virtual background which I need for this project. Can anyone help me?

EXPO-AV not playing sound and not throwing any errors

I am trying to load the sound which i retrieve from my own API into the EXPO AV createAsync function:
const PlayerWidget: React.FC = () => {
const [song, setSong] = useState(null);
const [sound, setSound] = useState<Sound | null>(null);
const [isPlaying, setIsPlaying] = useState<boolean>(true);
const [liked, setLiked] = useState<boolean>(false);
const [duration, setDuration] = useState<number | null>(null);
const [position, setPosition] = useState<number | null>(null);
const { songId } = useContext(AppContext);
const { data, error } = useQuery(SongQuery, {
variables: { _id: songId },
});
useEffect(() => {
if (data && data.song) {
setSong(data.song);
}
}, [data]);
useEffect(() => {
if (song) {
playCurrentSong();
}
}, [song]);
const playCurrentSong = async () => {
if (sound) {
await sound.unloadAsync();
}
const { sound: newSound } = await Sound.createAsync(
{ uri: song.soundUri },
{ shouldPlay: isPlaying }
);
console.log("sound" + newSound);
setSound(newSound);
};
const onPlayPausePress = async () => {
if (!sound) {
console.log("no sound");
return;
}
if (isPlaying) {
await sound.pauseAsync();
} else {
await sound.playAsync();
}
};
const onLikeSong = async () => {
try {
setLiked(true);
} catch (e) {
console.log(e);
}
};
const getProgress = () => {
if (sound === null || duration === null || position === null) {
return 0;
}
return (position / duration) * 100;
};
const onPlaybackStatusUpdate = (status) => {
setIsPlaying(status.isPlaying);
setDuration(status.durationMillis);
setPosition(status.positionMillis);
};
}
Weirdly enough, the log after the function does not even work, it is never logged. I don't get any errors though, making it quite hard to debug this where it goes wrong, the URI is working and pointing towards an mp3 file, and the state is set correctly. Any pointers how i could debug this further?

React-native: How to change the audio speed in expo-av

I'm having trouble changing the prop: 'rate' to change the speed of the audio being played.
I'm using expo-av (https://docs.expo.dev/versions/latest/sdk/av/).
Here's my code:
import {Text, View, Alert } from 'react-native'
import * as MediaLibrary from 'expo-media-library';
import { DataProvider } from 'recyclerlistview';
import {Audio} from 'expo-av';
import { play, pause, resume, playNext } from "../misc/AudioController";
export const AudioContext = createContext()
export class AudioProvider extends Component {
constructor(props) {
super(props);
this.state = {
audioFiles: [],
permissionError: false,
dataProvider: new DataProvider((r1, r2) => r1 !== r2),
playbackObj: null,
soundObj: null,
currentAudio: {},
isPlaying: false,
currentAudioIndex: null,
playbackPosition: null,
playbackDuration: null,
rate: 2.0,
};
this.totalAudioCount = 0;
}
permissionAlert = () => {
Alert.alert("Permission Required", "This app needs to read audio files", [
{ text: "I am ready", onPress: () => this.getPermission() },
{
text: "cancel",
onPress: () => this.permissionAlert(),
},
]);
};
getAudioFiles = async () => {
const { dataProvider, audioFiles } = this.state;
let media = await MediaLibrary.getAssetsAsync({
mediaType: "audio",
});
media = await MediaLibrary.getAssetsAsync({
mediaType: "audio",
first: media.totalCount,
});
this.totalAudioCount = media.totalCount;
this.setState({
...this.state,
dataProvider: dataProvider.cloneWithRows([
...audioFiles,
...media.assets,
]),
audioFiles: [...audioFiles, ...media.assets],
});
};
loadPreviousAudio = async () => {
let previousAudio = await AsyncStorageLib.getItem("previousAudio");
let currentAudio;
let currentAudioIndex;
if (previousAudio === null) {
currentAudio = this.state.audioFiles[0];
currentAudioIndex = 0;
} else {
previousAudio = JSON.parse(previousAudio);
currentAudio = previousAudio.audio;
currentAudioIndex = previousAudio.index;
}
this.setState({ ...this.state, currentAudio, currentAudio });
};
getPermission = async () => {
// {
// "canAskAgain": true,
// "expires": "never",
// "granted": false,
// "status": "undetermined",
// }
const permission = await MediaLibrary.getPermissionsAsync();
if (permission.granted) {
this.getAudioFiles();
}
if (!permission.canAskAgain && !permission.granted) {
this.setState({ ...this.state, permissionError: true });
}
if (!permission.granted && permission.canAskAgain) {
const { status, canAskAgain } =
await MediaLibrary.requestPermissionsAsync();
if (status === "denied" && canAskAgain) {
this.permissionAlert();
}
if (status === "granted") {
this.getAudioFiles();
}
if (status === "denied" && !canAskAgain) {
this.setState({ ...this.state, permissionError: true });
}
}
};
onPlaybackStatusUpdate = async (playbackStatus) => {
console.log("hier");
if (playbackStatus.isLoaded && playbackStatus.isPlaying) {
this.updateState(this, {
playbackPosition: playbackStatus.positionMillis,
playbackDuration: playbackStatus.durationMillis,
});
}
if (playbackStatus.didJustFinish) {
const nextAudioIndex = this.state.currentAudioIndex + 1;
if (nextAudioIndex >= this.totalAudioCount) {
this.state.playbackObj.unloadAsync();
this.updateState(this, {
soundObj: null,
currentAudio: this.state.audioFiles[0],
isPlaying: false,
currentAudioIndex: 0,
playbackPosition: null,
playbackDuration: null,
});
}
const audio = this.state.audioFiles[nextAudioIndex];
const status = await playNext(this.state.playbackObj, audio.uri);
this.updateState(this, {
soundObj: status,
currentAudio: audio,
isPlaying: true,
currentAudioIndex: nextAudioIndex,
});
}
};
componentDidMount() {
this.getPermission();
if (this.state.playbackObj === null) {
this.setState({ ...this.state, playbackObj: new Audio.Sound(), });
}
}
updateState = (prevState, newState = {}) => {
this.setState({ ...prevState, ...newState });
};
render() {
const {
audioFiles,
dataProvider,
permissionError,
playbackObj,
soundObj,
currentAudio,
isPlaying,
currentAudioIndex,
playbackPosition,
playbackDuration,
rate,
} = this.state;
if (permissionError)
return (
<View
style={{
flex: 1,
justifyContent: "center",
alignItems: "center",
}}
>
<Text>It looks like you haven't accepted the permission</Text>
</View>
);
return (
<AudioContext.Provider
value={{
audioFiles,
dataProvider,
playbackObj,
soundObj,
currentAudio,
isPlaying,
currentAudioIndex,
totalAudioCount: this.totalAudioCount,
playbackPosition,
playbackDuration,
rate,
updateState: this.updateState,
loadPreviousAudio: this.loadPreviousAudio,
onPlaybackStatusUpdate: this.onPlaybackStatusUpdate
}}
>
{this.props.children}
</AudioContext.Provider>
);
}
}
import {Component} from 'react';
import AsyncStorageLib from '#react-native-async-storage/async-storage';
export default AudioProvider;
and here's some more:
// play audio
// Import the react-native-sound module
import { PitchCorrectionQuality,shouldCorrectPitch, rate } from "expo-av/build/AV.types";
export const play = async (playbackObj, uri,) => {
try {
return await playbackObj.loadAsync(
{uri},
{shouldPlay: true},
);
} catch (error) {
console.log('error inside play helper method', error.message)
}
};
//pause
export const pause = async playbackObj => {
try {
// playbackObj.setRateAsync(rate = 2.0, shouldCorrectPitch = true, PitchCorrectionQuality= High);
return await playbackObj.setStatusAsync({
shouldPlay: false},
);
} catch (error) {
console.log('error inside pause helper method', error.message)
}
};
//resume
export const resume = async playbackObj => {
try {
return await playbackObj.playAsync(
);
} catch (error) {
console.log('error inside pause resume method', error.message)
}
};
//select next
export const playNext = async (playbackObj, uri) => {
try {
await playbackObj.stopAsync()
await playbackObj.unloadAsync();
return await play(playbackObj, uri);
} catch (error) {
console.log('error inside playNext helper method')
}
}
I've tried including 'rate: 2.0' inside this.state{audioFiles: [],
permissionError: false, etc.} but it didn't work.
Also I've tried doing: await playbackObj.setRateAsync() in the 2nd code snippet.
Any suggestions?
Nvm, I found the solution. Here's my updated code:
// play audio
// Import the react-native-sound module
import { PitchCorrectionQuality,shouldCorrectPitch, rate } from "expo-av/build/AV.types";
export const play = async (playbackObj, uri,) => {
try {
await playbackObj.loadAsync(
{uri},
{shouldPlay: true},
);
return await playbackObj.setStatusAsync({ rate: 0.9749090909 });
} catch (error) {
console.log('error inside play helper method', error.message)
}
};
//pause
export const pause = async playbackObj => {
try {
return await playbackObj.setStatusAsync({
shouldPlay: false,
rate: 0.9749090909,
});
} catch (error) {
console.log('error inside pause helper method', error.message)
}
};
//resume
export const resume = async playbackObj => {
try {
return await playbackObj.playAsync(
);
} catch (error) {
console.log('error inside pause resume method', error.message)
}
};
//select next
export const playNext = async (playbackObj, uri) => {
try {
await playbackObj.stopAsync()
await playbackObj.unloadAsync();
return await play(playbackObj, uri);
} catch (error) {
console.log('error inside playNext helper method')
}
}

Mobile project with expo and hapi.js

Im strugglling with an error that im not capable of get a solution and i dont know if its happening in the front or in the back of the app.
Im trying to follow a tutorial where the guy is setting up an app that records your voice and return it as text from Google API speech to text.
this is the front part, the app is failing in the axios.post, the request is not passing and keeps loading until the error log says it cannot connect to the path.
import React, { Component } from 'react'
import {
StyleSheet, Text, View, TouchableOpacity, ActivityIndicator, Platform,
} from 'react-native'
// import { Audio, Permissions, FileSystem } from 'expo'
import * as Permissions from 'expo-permissions'
import { Audio } from 'expo-av'
import * as FileSystem from 'expo-file-system';
import axios from 'axios'
const styles = StyleSheet.create({
container: {
marginTop: 40,
backgroundColor: '#fff',
alignItems: 'center',
},
button: {
backgroundColor: '#1e88e5',
paddingVertical: 20,
width: '90%',
alignItems: 'center',
borderRadius: 5,
padding: 8,
marginTop: 20,
},
text: {
color: '#fff',
}
})
/*this.recordingSettings = JSON.parse(JSON.stringify(Audio.RECORDING_OPTIONS_PRESET_HIGH_QUALITY = */let recordingOptions = {
android: {
extension: '.m4a',
outputFormat: Audio.RECORDING_OPTION_ANDROID_OUTPUT_FORMAT_MPEG_4,
audioEncoder: Audio.RECORDING_OPTION_ANDROID_AUDIO_ENCODER_AAC,
sampleRate: 44100,
numberOfChannels: 2,
bitRate: 128000,
},
ios: {
extension: '.m4a',
outputFormat: Audio.RECORDING_OPTION_IOS_OUTPUT_FORMAT_MPEG4AAC,
audioQuality: Audio.RECORDING_OPTION_IOS_AUDIO_QUALITY_HIGH,
sampleRate: 44100,
numberOfChannels: 2,
bitRate: 128000,
linearPCMBitDepth: 16,
linearPCMIsBigEndian: false,
linearPCMIsFloat: false,
},
};
export default class SpeechToTextButton extends Component {
constructor(props) {
super(props)
this.recording = null
this.state = {
isRecording: false,
//we would like to know if data fetching is in progress
isFetching: false,
//we will write the transcript result here
transcript: '',
}
}
startRecording = async () => {
// request permissions to record audio
const { status } = await Permissions.askAsync(Permissions.AUDIO_RECORDING)
// if the user doesn't allow us to do so - return as we can't do anything further :(
if (status !== 'granted') return
// when status is granted - setting up our state
this.setState({ isRecording: true })
// basic settings before we start recording,
// you can read more about each of them in expo documentation on Audio
await Audio.setAudioModeAsync({
allowsRecordingIOS: true,
interruptionModeIOS: Audio.INTERRUPTION_MODE_IOS_DO_NOT_MIX,
playsInSilentModeIOS: true,
interruptionModeAndroid: Audio.INTERRUPTION_MODE_ANDROID_DO_NOT_MIX,
playThroughEarpieceAndroid: true,
})
const recording = new Audio.Recording()
try {
// here we pass our recording options
await recording.prepareToRecordAsync(recordingOptions)
// and finally start the record
await recording.startAsync()
console.log('funcionaaa')
} catch (error) {
console.log(error)
// we will take a closer look at stopRecording function further in this article
this.stopRecording()
console.log('no funca')
}
// if recording was successful we store the result in variable,
// so we can refer to it from other functions of our component
this.recording = recording
}
stopRecording = async () => {
// set our state to false, so the UI knows that we've stopped the recording
this.setState({ isRecording: false })
try {
// stop the recording
await this.recording.stopAndUnloadAsync()
console.log('aca tiene que parar')
} catch (error) {
console.log(error)
}
}
getTranscription = async () => {
// set isFetching to true, so the UI knows about it
this.setState({ isFetching: true })
try {
// take the uri of the recorded audio from the file system
const { uri } = await FileSystem.getInfoAsync(this.recording.getURI())
// now we create formData which will be sent to our backend
const formData = new FormData()
formData.append('file', {
uri,
// as different audio types are used for android and ios - we should handle it
type: Platform.OS === 'ios' ? 'audio/x-wav' : 'audio/m4a',
name: Platform.OS === 'ios' ? `${Date.now()}.wav` : `${Date.now()}.m4a`,
})
// post the formData to our backend
const { data } = await axios.post('http://190.19.68.120/api/speech', formData, {
headers: {
'Content-Type': 'multipart/form-data',
},
})
console.log(data)
// set transcript from the data which we received from the api
this.setState({ transcript: data.transcript })
} catch (error) {
console.log('There was an error reading file', error.request)
this.stopRecording()
// we will take a closer look at resetRecording function further down
this.resetRecording()
}
// set isFetching to false so the UI can properly react on that
this.setState({ isFetching: false })
}
deleteRecordingFile = async () => {
// deleting file
try {
const info = await FileSystem.getInfoAsync(this.recording.getURI())
await FileSystem.deleteAsync(info.uri)
} catch (error) {
console.log('There was an error deleting recorded file', error)
}
}
resetRecording = () => {
this.deleteRecordingFile()
this.recording = null
}
handleOnPressOut = () => {
// first we stop the recording
this.stopRecording()
console.log('para en el pressout')
// second we interact with our backend
this.getTranscription()
}
render() {
const {
isRecording, transcript, isFetching,
} = this.state
return (
<View style={styles.container}>
<TouchableOpacity
style={styles.button}
onPressIn={this.startRecording}
onPressOut={this.handleOnPressOut}
>
{isFetching && <ActivityIndicator color="#ffffff" />}
{!isFetching &&
<Text style={styles.text}>
{isRecording ? 'Recording...' : 'Start recording'}
</Text>
}
</TouchableOpacity>
<Text>
{transcript}
</Text>
</View>
)
}
}
Here it is the back part, where i receive the request from the front and send the audio to the google API
'use strict';
const Hapi = require('#hapi/hapi');
const fs = require('fs')
const axios = require('axios')
const speech = require('#google-cloud/speech');
const ffmpeg = require('fluent-ffmpeg');
const client = new speech.SpeechClient();
const init = async () => {
const server = Hapi.server({
port: 3005,
host: 'localhost'
});
server.route({
method: 'POST',
path: '/speech',
config: {
handler: async (request, h) => {
const data = request.payload;
console.log(data)
if (data.file) {
const name = data.file.hapi.filename;
const path = __dirname + "/uploads/" + name;
const encodedPath = __dirname + "/uploads/encoded_" + name;
const file = fs.createWriteStream(path);
file.on('error', (err) => console.error(err));
data.file.pipe(file);
return new Promise(resolve => {
data.file.on('end', async (err) => {
const ret = {
filename: data.name,
headers: data.file.hapi.headers
}
ffmpeg()
.input(path)
.outputOptions([
'-f s16le',
'-acodec pcm_s16le',
'-vn',
'-ac 1',
'-ar 41k',
'-map_metadata -1'
])
.save(encodedPath)
.on('end', async () => {
const savedFile = fs.readFileSync(encodedPath)
const audioBytes = savedFile.toString('base64');
const audio = {
content: audioBytes,
}
const sttConfig = {
enableAutomaticPunctuation: false,
encoding: "LINEAR16",
sampleRateHertz: 41000,
languageCode: "en-US",
model: "default"
}
const request = {
audio: audio,
config: sttConfig,
}
const [response] = await client.recognize(request);
const transcription = response.results
.map(result => result.alternatives[0].transcript)
.join('\n');
fs.unlinkSync(path)
fs.unlinkSync(encodedPath)
resolve(JSON.stringify({ ...ret, transcript: transcription }))
})
})
})
}
},
payload: {
output: 'stream',
parse: true,
}
}
})
await server.start();
console.log('Server running on %s', server.info.uri);
};
process.on('unhandledRejection', (err) => {
console.log(err);
});
init();
i thought i could try migrating the server to express.js but under handler, i got payload key, and i dont know what to do with that in an express server.