Record Video with Tensorflow.js and React Native - react-native

I am trying to record video while using BlazePose with React Native. Wherever I add in cameraRef.current.camera.recordAsync() into causes the video to freeze and app to crash.
From what I've found, it might need to go in the handleCameraStream but I am unsure. Here is that method:
const handleCameraStream = async (
images: IterableIterator<tf.Tensor3D>,
updatePreview: () => void,
gl: ExpoWebGLRenderingContext) => {
const loop = async () => {
// const video = cameraRef.current.camera.recordAsync()
// Get the tensor and run pose detection.
const imageTensor = images.next().value as tf.Tensor3D;
const startTs = Date.now();
const poses = await model!.estimatePoses(
imageTensor,
undefined,
Date.now()
);
const latency = Date.now() - startTs;
setFps(Math.floor(1000 / latency));
setPoses(poses);
tf.dispose([imageTensor]);
if (rafId.current === 0) {
return;
}
// Render camera preview manually when autorender=false.
if (!AUTO_RENDER) {
updatePreview();
gl.endFrameEXP();
}
rafId.current = requestAnimationFrame(loop);
};
loop();
};
And here is my useEffect:
useEffect(() => {
async function prepare() {
rafId.current = null;
// Set initial orientation.
const curOrientation = await ScreenOrientation.getOrientationAsync();
setOrientation(curOrientation);
// Listens to orientation change.
ScreenOrientation.addOrientationChangeListener((event) => {
setOrientation(event.orientationInfo.orientation);
});
// Camera permission.
await Camera.requestCameraPermissionsAsync();
// Wait for tfjs to initialize the backend.
await tf.ready();
// Load model
const model = await posedetection.createDetector(
posedetection.SupportedModels.BlazePose,
{
runtime: "tfjs",
enableSmoothing: true,
modelType: "full",
}
);
setModel(model);
// Ready!
setTfReady(true);
}
prepare();}, []);
And finally the TensorCamera
<TensorCamera
ref={cameraRef}
style={styles.camera}
autorender={AUTO_RENDER}
type={cameraType}
// tensor related props
resizeWidth={getOutputTensorWidth()}
resizeHeight={getOutputTensorHeight()}
resizeDepth={3}
rotation={getTextureRotationAngleInDegrees()}
onReady={handleCameraStream}
/>

Related

Expo AV playback callback strange behaviour with state properties

I am starting to go nuts with this one. I am recording an audio, once I stop the recording as part of the this process I also load the audio so that it is ready to be played back when necessary and here I do setOnPlaybackStatusUpdate. I use the playback callback so that I can update a my currentSeconds state based on positionMillis.
The problem is the currentSeconds and recordedDuration state values that I am getting. How come their values change between playAudio method which triggers the audio to be played and onPlaybackStatusUpdate which is the callback method?
When I output in the console their values in both methods this is what I obtain when I expect them to be the same:
In playAudio - currentSeconds: 0
In playAudio - recordedDuration: 4.5
In onPlaybackStatusUpdate - currentSeconds: 115.5
In onPlaybackStatusUpdate - recordedDuration: 0
And here is the code:
const AudioRecorder = useRef(new Audio.Recording());
const AudioPlayer = useRef(new Audio.Sound());
const timerMaxDuration = 120
const [currentSeconds, setCurrentSeconds] = useState<number>(timerMaxDuration);
const [recordedDuration, setRecordedDuration] = useState<number>(0);
const stopRecording = async () => {
try {
await AudioRecorder.current.stopAndUnloadAsync();
// To hear sound through speaker and not earpiece on iOS
await Audio.setAudioModeAsync({ allowsRecordingIOS: false });
const recordedURI = AudioRecorder.current.getURI();
SetRecordingURI(recordedURI)
AudioRecorder.current = new Audio.Recording();
send('STOP')
setRecordedDuration(+(timerMaxDuration - currentSeconds).toFixed(1)) // there is subtraction because during the recording there is a countdown from timerMaxDuration
setCurrentSeconds(0)
// Load audio after recording so that it is ready to be played
loadAudio(recordedURI)
} catch (error) {
console.log(error);
}
};
const loadAudio = async (recordedUri) => {
try {
const playerStatus = await AudioPlayer.current.getStatusAsync();
if (playerStatus.isLoaded === false) {
AudioPlayer.current.setOnPlaybackStatusUpdate(onPlaybackStatusUpdate)
await AudioPlayer.current.loadAsync({ uri: recordedUri }, { progressUpdateIntervalMillis: 20 }, true)
}
} catch (error) {
console.log(error)
}
}
const playAudio = async () => {
console.log(`In playAudio - currentSeconds: ${currentSeconds}`)
console.log(`In playAudio - recordedDuration: ${recordedDuration}`)
try {
const playerStatus = await AudioPlayer.current.getStatusAsync();
if (playerStatus.isLoaded) {
if (playerStatus.isPlaying === false) {
AudioPlayer.current.playAsync();
send('PLAY')
}
}
} catch (error) {
console.log(error);
}
};
const onPlaybackStatusUpdate = playbackStatus => {
if (playbackStatus.isPlaying) {
console.log(`In onPlaybackStatusUpdate - currentSeconds: ${currentSeconds}`)
console.log(`In onPlaybackStatusUpdate - recordedDuration: ${recordedDuration}`)
if(currentSeconds >= recordedDuration){
stopAudio()
}
else{
setCurrentSeconds(+(playbackStatus.positionMillis / 1000).toFixed(1))
}
}
}
Ok so there was nothing wrong with the playback callback. The thing is that both the playback callback is an arrow functions which means that the only property value that will change inside the callback is the one of the argument playbackStatus, the other properties' values will remain the same as the time the function was created.
A walkaround in React is to use useEffect in the following way, which allows to access the state values currentSeconds and recordedDuration:
useEffect(() => {
if(currentSeconds >= recordedDuration)
stopAudio()
}, [currentSeconds]);
const onPlaybackStatusUpdate = playbackStatus => {
if (playbackStatus.isPlaying)
setCurrentSeconds(+(playbackStatus.positionMillis / 1000).toFixed(1))
}

Expo : Cannot load an AV asset from a null playback source

Hi I was trying to use Expo-AV but keep getting the warning
[Unhandled promise rejection: Error: Cannot load an AV asset from a null playback source]
When the sound play function first called it show this warning and dosen't plays but after it when I again recall the function it plays without the warning.
const [sound, setSound] = useState();
const [isPlaying, setIsPlaying] = useState(false);
async function playSound() {
console.log("Loading Sound");
const { sound } = await Audio.Sound.createAsync(
{ uri },
{ shouldPlay: true }
);
setSound(sound);
console.log("Playing Sound");
setIsPlaying(true);
await sound.playAsync();
sound._onPlaybackStatusUpdate = (status) => {
if (status.didJustFinish) {
setIsPlaying(false);
console.log("Finished");
}
};
}
<TouchableOpacity onPress={playSound()}>
<Text>Play</Text>
</TouchableOpacity>
Is there anyway to play after loading properly.
This works for me. Try it out
Over here I'm playing an audio that was selected by the user with the DocumentPicker library from Expo.
const prepareToPlay = async (audioPath) => {
const sound = new Audio.Sound();
try {
await sound.loadAsync({
uri: audioPath.file,
shouldPlay: true,
});
await sound.playAsync();
// Your sound is playing!
// Dont forget to unload the sound from memory
// when you are done using the Sound object
// await sound.unloadAsync();
} catch (error) {
// An error occurred!
console.error('AUDIO PLAY: ', error);
}
};
const audioToBePlayed = {
"file": "file:///Users/crosbyroads/Library/Developer/CoreSimulator/Devices/54AE93CW-1667-491F-A9C7-B457N8BC1207/data/Containers/Data/Application/5C442DA1-4EEA-4F24-93C2-38131484EE9E/Library/Caches/ExponentExperienceData/%crosbyroads%252FPlaySounds/DocumentPicker/EE3ECE8E-DFDC-46C9-8B68-4A8E0A8BB808.wav",
"name": "assets_audio_OS_SB_160_Dm_Cream_Stack_1.wav",
"size": 3180496,
"type": "audio/wav",
}
...
<Button onPress={() => prepareToPlay(audioToBePlayed)} title="Play"></Button>
/** Subscribe to my channel # https://youtube.com/crosbyroads */

Updating useState array from callback

I am building a React Native (Expo) app that scans for Bluetooth devices. The Bluetooth API exposes a callback for when devices are detected, which I use to put non-duplicate devices into an array:
const DeviceListView = () => {
const [deviceList, setDeviceList] = useState([]);
const startScanning = () => {
manager.startDeviceScan(null, null, (error, device) => {
// Add to device list if not already in list
if(!deviceList.some(d => d.device.id == device.id)){
console.log(`Adding ${device.id} to list`);
const newDevice = {
device: device,
...etc...
};
setDeviceList(old => [...old, newDevice]);
}
});
}
// map deviceList to components
componentList = deviceList.map(...);
return <View> {componentList} </View>
}
The problem is that the callback is called many many times faster than setDeviceList updates, so the duplicate checking doesn't work (if I log deviceList, it's just empty).
If I use an additional, separate regular (non-useState) array, the duplicate checking works, but the state doesn't update consistently:
const DeviceListView = () => {
const [deviceList, setDeviceList] = useState([]);
var deviceList2 = [];
const startScanning = () => {
manager.startDeviceScan(null, null, (error, device) => {
// Add to device list if not already in list
if(!deviceList2.some(d => d.device.id == device.id)){
console.log(`Adding ${device.id} to list`);
const newDevice = {
device: device,
...etc...
};
deviceList2.push(newDevice);
setDeviceList(old => [...old, newDevice]);
}
});
}
// map deviceList to components
componentList = deviceList.map(...);
return <View> {componentList} </View>
}
This code almost works, but the deviceList state doesn't update correctly: it shows the first couple of devices but then doesn't update again unless some other component causes a re-render.
What do I need to do to make this work as expected?
I would suggest wrap your duplicate check within the state set function itself, and then return the same device list if no new devices have been found. This offloads race condition handling to the underlying react implementation itself, which I've found to be good enough for most cases.
Thus it would look something like this:
const DeviceListView = () => {
const [deviceList, setDeviceList] = useState([]);
const startScanning = () => {
manager.startDeviceScan(null, null, (error, device) => {
// Add to device list if not already in list
setDeviceList(old => {
if(!old.some(d => d.device.id == device.id)){
console.log(`Adding ${device.id} to list`);
const newDevice = {
device: device,
// ...etc...
};
return [...old, newDevice]
}
return old
});
});
}
// map deviceList to components
componentList = deviceList.map(...);
return <View> {componentList} </View>
}
Since old is unchanged if no new unique devices are found it will also skip next re-render according to the docs ( which is a neat optimisation :) )
This is the preferred way to implement state updates that are dependant on previous state according to the docs
https://reactjs.org/docs/hooks-reference.html#functional-updates
convert your callback to promise so that until you get completed device list, checkout below code (PS. not tested, please change as you need)
const [deviceList, setDeviceList] = useState([]);
const [scanning, setScanning] = useState(false);
useEffect(() => {
if(scanning) {
setDeviceList([]);
startScanning();
}
}, [scanning]);
const subscription = manager.onStateChange(state => {
if (state === "PoweredOn" && scanning === false) {
setCanScan(true);
subscription.remove();
}
}, true);
const fetchScannedDevices = () => {
return new Promise((resolve, reject) => {
manager.startDeviceScan(null, null, (error, device) => {
// Add to device list if not already in list
if (!deviceList.some(d => d.device.id == device.id)) {
console.log(`Adding ${device.id} to list`);
const newDevice = {
device: device,
// ...etc...
};
resolve(newDevice);
}
if (error) {
reject({});
}
});
});
};
const startScanning = async () => {
try {
const newDevice = await fetchScannedDevices();
setDeviceList(old => [...old, newDevice]);
} catch (e) {
//
}
};
const handleScan = () => {
setScanning(true);
};
// map deviceList to components
componentList = deviceList.map(() => {});
return (
<View>
<Button
onPress={() => handleScan()}>
Scan
</Button>
<View>{componentList}</View>
</View>
);
};

wait for onSnapshot fetching data

I'm currently learning React Native (Expo).
I want to use redux and react-native-firebase.
When I subscribe to firebase (onSnapshot) at startup of my app, it returns the data from firebase. But since onSnapchot doesn't return a promise, I can't use it for my app-loading component.
Therefore, I also need to fetch the data from firebase to prevent the app from flicker.
The result is that at startup of my app I fetch the data twice.
So my question is:
How can I wait for onSnapshot loading my data from firebase?
Thanks
const Manager = (props) => {
//STATE
const [init, setInit] = useState(false);
//HOOKS
const fetchData = useFetchData();
useInitFirebaseSubscriptions();
//FUNCTIONS
async function onInit() {
console.log('[MANAGER]: loading app...');
await Promise.all([fetchData()]);
}
function onFinishedInit() {
console.log('[MANAGER]: ...app loading successfull!');
setInit(true);
}
//RETURN
if (!init) {
return <AppLoading startAsync={onInit} onFinish={onFinishedInit} onError={console.warn} />;
} else {
return props.children;
}
};
export default Manager;
//INITIAL FETCH BEFORE RENDERING
export function useFetchData() {
const dispatch = useDispatch();
return async function () {
try {
await firestore()
.collection('users')
.get()
.then((querySnapshot) => dispatch(actions.fetch(querySnapshot)));
} catch (err) {
console.log(err.message);
}
};
}
//INIT SUBSCRIPTIONS TO FIREBASE
export function useInitFirebaseSubscriptions() {
const dispatch = useDispatch();
useEffect(() => {
console.log('[CONTROLLER]: subscribed to Firebase');
const unsubscribe = firestore()
.collection('users')
.onSnapshot(
(querySnapshot) => dispatch(action.fetch(querySnapshot)),
(error) => console.log(error)
);
return () => {
unsubscribe();
console.log('[CONTROLLER]: unsubscribed from Firebase');
};
}, []);
}
[MANAGER]: loading app...
[MANAGER]: subscribed to Firebase
[USER_REDUCER]: fetched data
[USER_REDUCER]: fetched data
[MANAGER]: ...app loading successfull!
I think you can accomplish your goal by adding some "loading" state in redux for when you are actively fetching data from firebase. Add the state and reducer cases specific to this data fetching/loading.
Example code:
export function useInitFirebaseSubscriptions() {
const dispatch = useDispatch();
useEffect(() => {
console.log('[CONTROLLER]: subscribed to Firebase');
dispatch(action.startFetch()); // <-- dispatch starting data fetch
const unsubscribe = firestore()
.collection('users')
.onSnapshot(
(querySnapshot) => {
dispatch(action.fetch(querySnapshot));
dispatch(action.completedFetch()); // <-- done fetching
},
(error) => {
console.log(error);
dispatch(action.completedFetch()); // <-- done fetching
},
);
return () => {
unsubscribe();
console.log('[CONTROLLER]: unsubscribed from Firebase');
};
}, []);
};
Select the loading state from the redux store and conditionally render the loading UI, otherwise render the passed children.
const Manager = (props) => {
const isFetchingData = useSelector(state => state.isFetchingData);
if (isFetchingData) {
return <AppLoadingIndicator />;
}
return props.children; // *
};
* Generally you may use some additional conditional rendering here depending on if data was actually fetched/returned and is just empty, or if there was an error, etc... basically provide a bit of a result status.

Making predictions on live video feed using React Native and Tensorflow.js

I have setup my react native app done all the installations and configurations of unimodules and packages are working as expected. No problem with dependency etc.
Now I want to implement a tensorflow model that I've trained from teachablemachine by google and I couldn't understand how to use it with camera because I'd like to process the frames from real time just like tensorflow react native api docs say.
This is a code I found online and I will change it with my model but the problem is it only detects the model when user takes picture. I want my camera to understand model in real time just like face detection, barcode scanner.
Main.js
import React, {useRef, useEffect, useState} from 'react';
import {View, StyleSheet, Dimensions} from 'react-native';
import {
getModel,
convertBase64ToTensor,
startPrediction,
} from '../../helpers/tensor-helper';
import {Camera} from 'expo-camera';
import * as tf from '#tensorflow/tfjs';
import '#tensorflow/tfjs-react-native';
import {
cameraWithTensors,
bundleResourceIO,
} from '#tensorflow/tfjs-react-native';
const TensorCamera = cameraWithTensors(Camera);
const Main = () => {
const [model, setModel] = useState();
const [prediction, setPredictions] = useState();
const cameraRef = useRef(null);
let requestAnimationFrameId = 0;
let frameCount = 0;
let makePredictionsEveryNFrame = 1;
const modelJson = require('../../model/model.json');
const modelWeights = require('../../model/weights.bin');
const getModel = async () => {
try {
await tf.ready();
const model = await tf.loadLayersModel(
bundleResourceIO(modelJson, modelWeights),
);
return model;
} catch (error) {
console.log('Could not load model', error);
}
};
useEffect(() => {
setModel(getModel());
}, []);
useEffect(() => {
return () => {
cancelAnimationFrame(requestAnimationFrameId);
};
}, [requestAnimationFrameId]);
const handleCameraStream = tensors => {
if (!tensors) {
console.log('Image not found!');
}
const loop = async () => {
if (frameCount % makePredictionsEveryNFrame === 0) {
const imageTensor = tensors.next().value;
if (model) {
const results = await startPrediction(model, imageTensor);
setPredictions(results);
console.log(`prediction: ${JSON.stringify(prediction)}`);
}
tf.dispose(tensors);
}
frameCount += 1;
frameCount = frameCount % makePredictionsEveryNFrame;
requestAnimationFrameId = requestAnimationFrame(loop);
};
console.log(`prediction: ${JSON.stringify(prediction)}`);
loop();
console.log(`prediction: ${JSON.stringify(prediction)}`);
};
let textureDims;
if (Platform.OS === 'ios') {
textureDims = {
height: 1920,
width: 1080,
};
} else {
textureDims = {
height: 1200,
width: 1600,
};
}
return (
<View style={styles.container}>
<TensorCamera
ref={cameraRef}
// Standard Camera props
style={styles.camera}
type={Camera.Constants.Type.back}
flashMode={Camera.Constants.FlashMode.off}
// Tensor related props
cameraTextureHeight={textureDims.height}
cameraTextureWidth={textureDims.width}
resizeHeight={50}
resizeWidth={50}
resizeDepth={3}
onReady={tensors => handleCameraStream(tensors)}
autorender={true}
/>
</View>
);
};
export default Main;
tensorhelper.js:
import * as tf from '#tensorflow/tfjs';
import {bundleResourceIO, decodeJpeg} from '#tensorflow/tfjs-react-native';
import * as tfc from '#tensorflow/tfjs-core';
import {Base64Binary} from '../utils/utils';
const BITMAP_DIMENSION = 224;
const modelJson = require('../model/model.json');
const modelWeights = require('../model/weights.bin');
// 0: channel from JPEG-encoded image
// 1: gray scale
// 3: RGB image
const TENSORFLOW_CHANNEL = 3;
export const getModel = async () => {
try {
await tf.ready();
const model = await tf.loadLayersModel(
bundleResourceIO(modelJson, modelWeights),
);
return model;
} catch (error) {
console.log('Could not load model', error);
}
};
export const convertBase64ToTensor = async base64 => {
try {
const uIntArray = Base64Binary.decode(base64);
// decode a JPEG-encoded image to a 3D Tensor of dtype
const decodedImage = decodeJpeg(uIntArray, 3);
// reshape Tensor into a 4D array
return decodedImage.reshape([
1,
BITMAP_DIMENSION,
BITMAP_DIMENSION,
TENSORFLOW_CHANNEL,
]);
} catch (error) {
console.log('Could not convert base64 string to tesor', error);
}
};
export const startPrediction = async (model, tensor) => {
try {
// predict against the model
const output = await model.predict(tensor);
// return typed array
return tfc.tensor().dataSync();
} catch (error) {
console.log('Error predicting from tesor image', error);
}
};
I edited files and get this as output:
LOG prediction: undefined
LOG prediction: undefined
WARN Possible Unhandled Promise Rejection (id: 1):
Error: When using targetShape.depth=3, targetShape.width must be a multiple of 4. Alternatively do not call detectGLCapabilities()
fromTexture#http://localhost:8081/index.bundle?platform=android&dev=true&minify=false&app=com.AppName&modulesOnly=false&runModule=true:267911:24
nextFrameGenerator$#http://localhost:8081/index.bundle?platform=android&dev=true&minify=false&app=com.AppName&modulesOnly=false&runModule=true:268598:67
tryCatch#http://localhost:8081/index.bundle?platform=android&dev=true&minify=false&app=com.AppName&modulesOnly=false&runModule=true:26537:23
invoke#http://localhost:8081/index.bundle?platform=android&dev=true&minify=false&app=com.AppName&modulesOnly=false&runModule=true:26710:32
loop$#http://localhost:8081/index.bundle?platform=android&dev=true&minify=false&app=com.AppName&modulesOnly=false&runModule=true:126503:43
tryCatch#http://localhost:8081/index.bundle?platform=android&dev=true&minify=false&app=com.AppName&modulesOnly=false&runModule=true:26537:23
invoke#http://localhost:8081/index.bundle?platform=android&dev=true&minify=false&app=com.AppName&modulesOnly=false&runModule=true:26710:32
tryCatch#http://localhost:8081/index.bundle?platform=android&dev=true&minify=false&app=com.AppName&modulesOnly=false&runModule=true:26537:23
invoke#http://localhost:8081/index.bundle?platform=android&dev=true&minify=false&app=com.AppName&modulesOnly=false&runModule=true:26610:30
http://localhost:8081/index.bundle?platform=android&dev=true&minify=false&app=com.AppName&modulesOnly=false&runModule=true:26640:19
tryCallTwo#http://localhost:8081/index.bundle?platform=android&dev=true&minify=false&app=com.AppName&modulesOnly=false&runModule=true:31390:9
doResolve#http://localhost:8081/index.bundle?platform=android&dev=true&minify=false&app=com.AppName&modulesOnly=false&runModule=true:31554:25
Promise#http://localhost:8081/index.bundle?platform=android&dev=true&minify=false&app=com.AppName&modulesOnly=false&runModule=true:31413:14
callInvokeWithMethodAndArg#http://localhost:8081/index.bundle?platform=android&dev=true&minify=false&app=com.AppName&modulesOnly=false&runModule=true:26639:33
enqueue#http://localhost:8081/index.bundle?platform=android&dev=true&minify=false&app=com.AppName&modulesOnly=false&runModule=true:26644:157
async#http://localhost:8081/index.bundle?platform=android&dev=true&minify=false&app=com.AppName&modulesOnly=false&runModule=true:26661:69
loop#http://localhost:8081/index.bundle?platform=android&dev=true&minify=false&app=com.AppName&modulesOnly=false&runModule=true:126494:42
handleCameraStream#http://localhost:8081/index.bundle?platform=android&dev=true&minify=false&app=com.AppName&modulesOnly=false&runModule=true:126535:11
onReady#http://localhost:8081/index.bundle?platform=android&dev=true&minify=false&app=com.AppName&modulesOnly=false&runModule=true:126572:34
onGLContextCreate$#http://localhost:8081/index.bundle?platform=android&dev=true&minify=false&app=com.AppName&modulesOnly=false&runModule=true:268641:37
tryCatch#http://localhost:8081/index.bundle?platform=android&dev=true&minify=false&app=com.AppName&modulesOnly=false&runModule=true:26537:23
invoke#http://localhost:8081/index.bundle?platform=android&dev=true&minify=false&app=com.AppName&modulesOnly=false&runModule=true:26710:32
__callImmediates#http://localhost:8081/index.bundle?platform=android&dev=true&minify=false&app=com.AppName&modulesOnly=false&runModule=true:3317:35
http://localhost:8081/index.bundle?platform=android&dev=true&minify=false&app=com.AppName&modulesOnly=false&runModule=true:3096:34
__guard#http://localhost:8081/index.bundle?platform=android&dev=true&minify=false&app=com.AppName&modulesOnly=false&runModule=true:3300:15
flushedQueue#http://localhost:8081/index.bundle?platform=android&dev=true&minify=false&app=com.AppName&modulesOnly=false&runModule=true:3095:21
flushedQueue#[native code]
invokeCallbackAndReturnFlushedQueue#[native code]```
Ok so I did this awhile back(last year) so I might have forgotten something but you can just refer to the code here, uses Expo and makes predictions on a live video feed just pardon the really bad code(I write better code now).
Anyway this a simple update on what you need to do which is mainly about handleCameraStream(). You will need to run two different useEffect hooks, one for initially loading up the model and the other one for canceling the animation frames which you will need to use when make predictions constantly.
Set the model into state then you can access it using model from any part in the file. I also did the same for predictions.
I have also added the ability to make predictions every N number of frames , by setting makePredictionsEveryNFrames to 1 it basically passes the tensors from TensorCamera to the function to make predictions every single frame. After making predictions you will also want to dispose of the tensors using tf.dispose(). This function loop() needs to be run infinitely to make predictions on oncoming frames continuously.
const Main = () => {
const [model, setModel] = useState();
const [predictions, setPredictions] = useState();
let requestAnimationFrameId = 0;
let frameCount = 0;
let makePredictionsEveryNFrames = 1;
useEffect(() => {
setModel(await getModel());
}, []);
useEffect(() => {
return () => {
cancelAnimationFrame(requestAnimationFrameId);
};
}, [requestAnimationFrameId]);
const handleCameraStream = (tensors) => {
if (!tensors) {
console.log("Image not found!");
}
const loop = async () => {
if (frameCount % makePredictionsEveryNFrame === 0) {
const imageTensor = tensors.next().value;
if (model) {
const results = await startPrediction(model, imageTensor);
setPredictions(results);
}
tf.dispose(tensors);
}
frameCount += 1;
frameCount = frameCount % makePredictionsEveryNFrame;
requestAnimationFrameId = requestAnimationFrame(loop);
};
loop();
};
}
I updated the getModel() to return the model when it is loaded this way we can set it in state.
export const getModel = async () => {
try {
await tf.ready();
const model = await tf.loadLayersModel(
bundleResourceIO(modelJson, modelWeights)
);
return model;
} catch (error) {
console.log("Could not load model", error);
}
};
So you would then just need to access the predictions and render them.
Edit 1:
Looking back at the code there are some issues with the startPredictions function, you were not actually returning the predictions from the model and you would need to make predictions on a single batch of images at a time.
export const startPrediction = async (model, tensor) => {
try {
// predict against the model
const output = await model.predict(tensor, {batchSize: 1});
return output.dataSync();
} catch (error) {
console.log('Error predicting from tesor image', error);
}
};
Edit 2:
Looking at the model input shape here the expected input shape is (batch_size, 224,224,3). But you are passing in an image of (batch_size, 50,50,3). So try updating the parameters resizeWidth and resizeHeight to 224.
<TensorCamera
ref={cameraRef}
// Standard Camera props
style={styles.camera}
type={Camera.Constants.Type.back}
flashMode={Camera.Constants.FlashMode.off}
// Tensor related props
cameraTextureHeight={textureDims.height}
cameraTextureWidth={textureDims.width}
resizeHeight={224}
resizeWidth={224}
resizeDepth={3}
onReady={tensors => handleCameraStream(tensors)}
autorender={true}
/>
In addition to that, you will need to also convert the 3D tensor to a 4D tensor before passing it to the model for predictions also known as expanding one of the dimensions. Update the handleCameraStream function to this as well. The size of the tensor is (224,224,3) and after expanding the first dimension it will be (1,224,224,3).
const handleCameraStream = (tensors) => {
if (!tensors) {
console.log("Image not found!");
}
const loop = async () => {
if (frameCount % makePredictionsEveryNFrame === 0) {
const imageTensor = tensors.next().value;
if (model) {
const imageTensorReshaped = imageTensor.expandDims(axis=0);
const results = await startPrediction(model, imageTensorReshaped);
setPredictions(results);
}
tf.dispose(imageTensorReshaped);
}
frameCount += 1;
frameCount = frameCount % makePredictionsEveryNFrame;
requestAnimationFrameId = requestAnimationFrame(loop);
};
loop();
};