tfjs-react-native from tensor to jpeg - react-native

There is decodeJpeg from #tensorflow/tfjs-react-native but no encodeJpeg. How to write a tensor into a local jpeg file then?
I have tried to look at the code and "inverse" the function, I ended up writing:
import * as tf from '#tensorflow/tfjs';
import * as FileSystem from 'expo-file-system';
import * as jpeg from 'jpeg-js';
export const encoderJpeg = async (tensor, name) => {
// add alpha channel if missing
const shape = [...tensor.shape]
shape.pop()
shape.push(4)
const tensorWithAlpha = tf.concat([tensor, tensor], [-1]).slice([0], shape)
const array = new Uint8Array(tensorWithAlpha.dataSync())
const rawImageData = {
data: array.buffer,
width: shape[1],
height: shape[0],
};
const jpegImageData = jpeg.encode(rawImageData, 50);
const imgBase64 = tf.util.decodeString(jpegImageData.data, "base64")
const uri = FileSystem.documentDirectory + name;
await FileSystem.writeAsStringAsync(uri, imgBase64, {
encoding: FileSystem.EncodingType.Base64,
});
return uri
}
but when I show the images with an <Image /> I see that there are all plain green.

This is my final util for doing this:
import * as tf from '#tensorflow/tfjs';
import * as FileSystem from 'expo-file-system';
import * as jpeg from 'jpeg-js';
export const encodeJpeg = async (tensor) => {
const height = tensor.shape[0]
const width = tensor.shape[1]
const data = new Buffer(
// concat with an extra alpha channel and slice up to 4 channels to handle 3 and 4 channels tensors
tf.concat([tensor, tf.ones([height, width, 1]).mul(255)], [-1])
.slice([0], [height, width, 4])
.dataSync(),
)
const rawImageData = {data, width, height};
const jpegImageData = jpeg.encode(rawImageData, 100);
const imgBase64 = tf.util.decodeString(jpegImageData.data, "base64")
const salt = `${Date.now()}-${Math.floor(Math.random() * 10000)}`
const uri = FileSystem.documentDirectory + `tensor-${salt}.jpg`;
await FileSystem.writeAsStringAsync(uri, imgBase64, {
encoding: FileSystem.EncodingType.Base64,
});
return {uri, width, height}
}

You can use imgBase64 directly into your image component as follows:
<Image source={{uri: 'data:image/jpeg;base64,' + imgBase64}} />

Related

shading between two boundary boxes

I have model prediction which detects the end box of verses of quran. I need to shade the area between them as shown in pictures which when the endbox hits the edge it start again from the another edge and the two drawn shadows have the same id. I know it seems complicated but you will save my day if you figure it out here is some of my code to help you reference
"use client";
import LABELS from "#app-datasets/coco/classes.json";
import {
Box,
Button,
ButtonGroup,
Center,
chakra,
Container,
Heading,
Icon,
Text,
useBoolean,
VisuallyHiddenInput,
VStack,
} from "#app-providers/chakra-ui";
import * as tf from "#tensorflow/tfjs";
import "#tensorflow/tfjs-backend-webgl";
import { useEffect, useRef, useState } from "react";
import { FaTimes } from "react-icons/fa";
const ZOO_MODEL = [{ name: "yolov5", child: ["yolov5n", "yolov5s"] }];
function Home() {
// LET RHE USEWER CHOOSE THE MINIMUM SCORE TO SHOW
const [model, setModel] = useState(null);
const [aniId, setAniId] = useState(null);
const [modelName, setModelName] = useState(ZOO_MODEL[0]);
const [loading, setLoading] = useState(0);
const imageRef = useRef(null);
const videoRef = useRef(null);
const canvasRef = useRef(null);
const inputImageRef = useRef(null);
const [singleImage, setSingleImage] = useBoolean();
const [liveWebcam, setliveWebcam] = useBoolean();
useEffect(() => {
tf.loadGraphModel(`/model/${modelName.name}/${modelName.child[1]}/model.json`, {
onProgress: (fractions) => {
setLoading(fractions);
},
}).then(async (mod) => {
// warming up the model before using real data
const dummy = tf.ones(mod.inputs[0].shape);
const res = await mod.executeAsync(dummy);
// clear memory
tf.dispose(res);
tf.dispose(dummy);
// save to state
setModel(mod);
});
}, [modelName]);
// helper for drawing into canvas //IoU threshold 0.5
const renderPrediction = (boxesData, scoresData, classesData , ) => {
const ctx = canvasRef.current.getContext("2d");
// clean canvas
ctx.clearRect(0, 0, ctx.canvas.width, ctx.canvas.height);
const font = "16px sans-serif";
ctx.font = font;
ctx.textBaseline = "top";
// LET RHE USEWER CHOOSE minval SCORE TO SHOW
const minval = 0.5;
for (let i = 0; i < scoresData.length; ++i) {
if (scoresData[i] < minval) continue;
const klass = LABELS[classesData[i]];
const score = (scoresData[i] * 100).toFixed(1);
let [x1, y1, x2, y2] = boxesData.slice(i * 4, (i + 1) * 4);
x1 *= canvasRef.current.width;
x2 *= canvasRef.current.width;
y1 *= canvasRef.current.height;
y2 *= canvasRef.current.height;
const width = x2 - x1;
const height = y2 - y1;
// draw the bounding box
ctx.strokeStyle = "#C53030";
ctx.lineWidth = 2;
ctx.strokeRect(x1, y1, width, height);
// fill the area between the boxes
const label = klass + " - " + score + "%";
const textWidth = ctx.measureText(label).width;
const textHeight = parseInt(font, 10); // base 10
// draw the label background
ctx.fillStyle = "#C53030";
ctx.fillRect(x1 - 1, y1 - (textHeight + 4), textWidth + 6, textHeight + 4);
// draw the label text
ctx.fillStyle = "#FFFFFF";
ctx.fillText(label, x1 + 2, y1 - (textHeight + 2));
}
};
I was expecting a shade with the method above

Argument must be a Tensor or TensorLike, but got 'Tensor'

I'm trying to integrate a ML model into a React Native application. The first step was to transform my image into a Tensor. I followed this tutorial but I'm getting the following error:
Argument 'images' passed to 'resizeNearestNeighbor' must be a Tensor or TensorLike, but got 'Tensor'
import * as tf from '#tensorflow/tfjs'
import { bundleResourceIO, decodeJpeg, fetch } from '#tensorflow/tfjs-react-native'
import * as fs from 'expo-file-system'
const App = () => {
const transformImageToTensor = async (uri) => {
const img64 = await fs.readAsStringAsync(uri, { encoding: fs.EncodingType.Base64 })
const imgBuffer = tf.util.encodeString(img64, 'base64').buffer
const raw = new Uint8Array(imgBuffer)
let imgTensor = decodeJpeg(raw)
const scalar = tf.scalar(255)
//resize the image
imgTensor = tf.image.resizeNearestNeighbor(imgTensor, [300, 300])
//normalize; if a normalization layer is in the model, this step can be skipped
const tensorScaled = imgTensor.div(scalar)
//final shape of the rensor
const img = tf.reshape(tensorScaled, [1, 300, 300, 3])
return imgTensor
}
useEffect(()=> { transformImageToTensor('/Users/roxana/Desktop/rndigits/project/model/example.jpeg')
})
}
I found a mention that maybe #tfjs and #tfjs-react-native have different implementations of the Tensor object, but I'm not sure how to code it differently. No other tutorial out there worked

Loading an .obj file with Expo-three?

I am trying to insert a .obj file into a React Native app built using Expo.
From the examples I've found that are successfully working, most of these seem to rely on building spheres or cubes within the rendering. I haven't found a good example with a successful rendering of a local file, specifically .obj.
I'm using the expo-three documentation which describes rendering with obj files, but no working examples.
This is what I have so far, which is not producing any rendered object. But want to know if I am on the right track with this, and what I am missing to get the object to render.
Below is the current file code.
import { Renderer, TextureLoader } from 'expo-three';
import * as React from 'react';
import { OBJLoader } from 'three/examples/jsm/loaders/OBJLoader';
import {
AmbientLight,
Fog,
GridHelper,
PerspectiveCamera,
PointLight,
Scene,
SpotLight,
} from 'three';
import { Asset } from 'expo-asset';
import { MTLLoader } from 'three/examples/jsm/loaders/MTLLoader';
export default function ThreeDPhytosaur() {
return (
<GLView
style={{ flex: 1 }}
onContextCreate={async (gl) => {
const { drawingBufferWidth: width, drawingBufferHeight: height } = gl;
const sceneColor = 0x6ad6f0;
const renderer = new Renderer({ gl });
renderer.setSize(width, height);
renderer.setClearColor(sceneColor);
const camera = new PerspectiveCamera(70, width / height, 0.01, 1000);
camera.position.set(2, 5, 5);
const scene = new Scene();
scene.fog = new Fog(sceneColor, 1, 10000);
scene.add(new GridHelper(10, 10));
const ambientLight = new AmbientLight(0x101010);
scene.add(ambientLight);
const pointLight = new PointLight(0xffffff, 2, 1000, 1);
pointLight.position.set(0, 200, 200);
scene.add(pointLight);
const spotLight = new SpotLight(0xffffff, 0.5);
spotLight.position.set(0, 500, 100);
spotLight.lookAt(scene.position);
scene.add(spotLight);
const asset = Asset.fromModule(model['phytosaur']);
await asset.downloadAsync();
const objectLoader = new OBJLoader();
const object = await objectLoader.loadAsync(asset.uri);
object.scale.set(0.025, 0.025, 0.025);
scene.add(object);
camera.lookAt(object.position);
const render = () => {
timeout = requestAnimationFrame(render);
renderer.render(scene, camera);
gl.endFrameEXP();
};
render();
}}
/>
);
}
const model = {
'phytosaur': require('../assets/phytosaur.obj'),
};
Thanks very much!
This is the code that I got to render the obj file. Changed the structure of the original file based on some other examples found.
But this might help someone else!
import { OBJLoader } from 'three/examples/jsm/loaders/OBJLoader.js';
import { Asset } from 'expo-asset';
import { Renderer} from 'expo-three';
import * as React from 'react';
import {
AmbientLight,
Fog,
PerspectiveCamera,
PointLight,
Scene,
SpotLight,
} from 'three';
export default function ThreeDTwo() {
let timeout;
React.useEffect(() => {
// Clear the animation loop when the component unmounts
return () => clearTimeout(timeout);
}, []);
return (
<GLView
style={{ flex: 1 }}
onContextCreate={async (gl) => {
const { drawingBufferWidth: width, drawingBufferHeight: height } = gl;
const sceneColor = 668096;
// Create a WebGLRenderer without a DOM element
const renderer = new Renderer({ gl });
renderer.setSize(width, height);
renderer.setClearColor(0x668096);
const camera = new PerspectiveCamera(70, width / height, 0.01, 1000);
camera.position.set(2, 5, 5);
const scene = new Scene();
scene.fog = new Fog(sceneColor, 1, 10000);
const ambientLight = new AmbientLight(0x101010);
scene.add(ambientLight);
const pointLight = new PointLight(0xffffff, 2, 1000, 1);
pointLight.position.set(0, 200, 200);
scene.add(pointLight);
const spotLight = new SpotLight(0xffffff, 0.5);
spotLight.position.set(0, 500, 100);
spotLight.lookAt(scene.position);
scene.add(spotLight);
const asset = Asset.fromModule(require("../assets/phytosaur_without_mtl.obj"));
await asset.downloadAsync();
// instantiate a loader
const loader = new OBJLoader();
// load a resource
loader.load(
// resource URL
asset.localUri,
// called when resource is loaded
function ( object ) {
object.scale.set(0.065, 0.065, 0.065)
scene.add( object );
camera.lookAt(object.position)
//rotate my obj file
function rotateObject(object, degreeX=0, degreeY=0, degreeZ=0) {
object.rotateX(THREE.Math.degToRad(degreeX));
object.rotateY(THREE.Math.degToRad(degreeY));
object.rotateZ(THREE.Math.degToRad(degreeZ));
}
// usage:
rotateObject(object, 0, 0, 70);
//animate rotation
function update() {
object.rotation.x += 0.015
}
const render = () => {
timeout = requestAnimationFrame(render);
update();
renderer.render(scene, camera);
gl.endFrameEXP();
};
render();
},
// called when loading is in progresses
function ( xhr ) {
console.log( ( xhr.loaded / xhr.total * 100 ) + '% loaded' );
},
// called when loading has errors
function ( error ) {
console.log( error );
}
);
}}
/>
);
}

Tensor to JPEG: Improve quality

I'm converting a tensor returned from a tensorflow camera into a jpeg. I've been able to do so but the jpeg image quality is terrible. This is for a mobile react-native app developed with expo. The library I'm using to convert the tensor to an image is jpeg-js.
This is the code I'm using to transform the tensor into an image:
const handleCameraStream = async (imageAsTensors) => {
const loop = async () => {
const tensor = await imageAsTensors.next().value;
console.log(tensor)
const [height, width] = tensor.shape
const data = new Buffer(
// concat with an extra alpha channel and slice up to 4 channels to handle 3 and 4 channels tensors
tf.concat([tensor, tf.ones([height, width, 1]).mul(255)], [-1])
.slice([0], [height, width, 4])
.dataSync(),
)
const rawImageData = {data, width, height};
const jpegImageData = jpeg.encode(rawImageData, 200);
const imgBase64 = tf.util.decodeString(jpegImageData.data, "base64")
const salt = `${Date.now()}-${Math.floor(Math.random() * 10000)}`
const uri = FileSystem.documentDirectory + `tensor-${salt}.jpg`;
await FileSystem.writeAsStringAsync(uri, imgBase64, {
encoding: FileSystem.EncodingType.Base64,
});
setUri(uri)
// return {uri, width, height}
};
// requestAnimationFrameId = requestAnimationFrame(loop);
!uri ? setTimeout(() => loop(), 2000) : null;
}
The picture in the top half of the image is the camera stream. The picture in the bottom half of the image below is the transformed tensor.
I tried your code with increased resize height and width and was able to get an image with good quality. If that doesn't work, maybe you can post the rest of your code and I'll try to help.
Example Image
<TensorCamera
type={Camera.Constants.Type.front}
resizeHeight={cameraHeight}
resizeWidth={cameraWidth}
resizeDepth={3}
onReady={handleCameraStream}
autorender={true}
/>

Problem with script to change number of Flatlist columns depending on rotation/size

I'm working on some code to calculate numColumns for a flatlist- intention is 3 on landscape tablet, 2 on portrait tablet, and 1 on portrait phone.
Here's my code:
const [width, setWidth] = useState(Dimensions.get('window').width);
const [imageWidth, setImageWidth] = useState(100);
const [imageHeight, setImageHeight] = useState(100);
const [columns, setColumns] = useState(3);
useEffect(() => {
function handleChange() {
setWidth(Dimensions.get('window').width);
}
Dimensions.addEventListener("change", handleChange);
return () => Dimensions.removeEventListener("change", handleChange);
}, [width]);
useEffect(() => {
if (width > 1100) {
setColumns(3);
} else if (width <= 1099 && width > 600) {
setColumns(2);
} else {
setColumns(1);
}
setImageWidth((width - (64 * columns) + 15) / columns);
setImageHeight(((width - (64 * columns) + 15) / columns) * .6);
}, [width]);
imageWidth and imageHeight are passed to the render component of the flatlist to size an image.
It seems to work fine when I load it in landscape mode, but if I rotate to portrait, I get this:
Then, if I go back to landscape, it stays as 2 columns?
Any idea how I can fix this?
You're not returning the height of the device, afterwards, you need to calculate the orientation of the device (isLandscape).
Logically it flows as the following:
is the device landscape? (setColumns 3)
is the device wide and portrait? (set columns 2)
others (setColumns 1)
From there you can pass that into the second useEffect, (say, useColumnsHook). This should be able to set the height/width based on orientation of the device.
I also recommend setting the height/width based on percentages rather than exact pixels for devices (100%, 50%, 33.3%).
const [width, setWidth] = useState(Dimensions.get('window').width);
const [imageWidth, setImageWidth] = useState(100);
const [imageHeight, setImageHeight] = useState(100);
const [columns, setColumns] = useState(3);
/**
* orientation
*
* return {
* width,
* height
* }
*/
const useScreenData = () => {
const [screenData, setScreenData] = useState(Dimensions.get("screen"))
useEffect(() => {
const onChange = (result) => {
setScreenData(result.screen)
}
Dimensions.addEventListener("change", onChange)
return () => Dimensions.removeEventListener("change", onChange)
})
return {
...screenData,
}
}
const { width, height } = useScreenData()
const isLandscape = width > height
useEffect(() => {
if (isLandscape && width > 1100) {
// handle landscape
setColumns(3)
} else if (!isLandscape && (width <= 1099 && width > 600)) {
setColumns(2)
} else {
setColumns(1)
}
setImageWidth((width - (64 * columns) + 15) / columns);
setImageHeight(((width - (64 * columns) + 15) / columns) * .6);
}, [width, isLandscape]);
1:Tablet or Phone
You can use the react-native-device-info package along with the Dimensions API. Check the isTablet() method and apply different styles according on the result.
if you care Expo user then you have to user expo-constants userInterfaceIdiom method to detect isTablet Reference
import DeviceInfo from 'react-native-device-info';
let isTablet = DeviceInfo.isTablet();
2:Portrait mode or Landscape mode
then you can find portrait mode or landscape mode by
const isPortrait = () => {
const dim = Dimensions.get('screen');
return dim.height >= dim.width;
};
3:Code:
import React, { useState, useEffect } from "react";
import { View, Text, Dimensions } from "react-native";
import DeviceInfo from "react-native-device-info";
const App = () => {
const isLandscapeFunction = () => {
const dim = Dimensions.get("screen");
return dim.width >= dim.height;
};
const [width, setWidth] = useState(Dimensions.get("window").width);
const [imageWidth, setImageWidth] = useState(100);
const [imageHeight, setImageHeight] = useState(100);
const [columns, setColumns] = useState(3);
let isTabletPhone = DeviceInfo.isTablet();;
useEffect(() => {
function handleChange(result) {
setWidth(result.screen.width);
const isLandscape = isLandscapeFunction();
if (isLandscape && isTabletPhone) {
setColumns(3);
} else if (!isLandscape && isTabletPhone) {
setColumns(2);
} else {
setColumns(1);
}
}
Dimensions.addEventListener("change", handleChange);
return () => Dimensions.removeEventListener("change", handleChange);
});
useEffect(() => {
console.log(columns);
setImageWidth((width - 64 * columns + 15) / columns);
setImageHeight(((width - 64 * columns + 15) / columns) * 0.6);
}, [columns, width]);
return (
<View style={{ justifyContent: "center", alignItems: "center", flex: 1 }}>
<Text>imageWidth {imageWidth}</Text>
<Text>imageHeight {imageHeight}</Text>
</View>
);
};
export default App;