shading between two boundary boxes - tensorflow

I have model prediction which detects the end box of verses of quran. I need to shade the area between them as shown in pictures which when the endbox hits the edge it start again from the another edge and the two drawn shadows have the same id. I know it seems complicated but you will save my day if you figure it out here is some of my code to help you reference
"use client";
import LABELS from "#app-datasets/coco/classes.json";
import {
Box,
Button,
ButtonGroup,
Center,
chakra,
Container,
Heading,
Icon,
Text,
useBoolean,
VisuallyHiddenInput,
VStack,
} from "#app-providers/chakra-ui";
import * as tf from "#tensorflow/tfjs";
import "#tensorflow/tfjs-backend-webgl";
import { useEffect, useRef, useState } from "react";
import { FaTimes } from "react-icons/fa";
const ZOO_MODEL = [{ name: "yolov5", child: ["yolov5n", "yolov5s"] }];
function Home() {
// LET RHE USEWER CHOOSE THE MINIMUM SCORE TO SHOW
const [model, setModel] = useState(null);
const [aniId, setAniId] = useState(null);
const [modelName, setModelName] = useState(ZOO_MODEL[0]);
const [loading, setLoading] = useState(0);
const imageRef = useRef(null);
const videoRef = useRef(null);
const canvasRef = useRef(null);
const inputImageRef = useRef(null);
const [singleImage, setSingleImage] = useBoolean();
const [liveWebcam, setliveWebcam] = useBoolean();
useEffect(() => {
tf.loadGraphModel(`/model/${modelName.name}/${modelName.child[1]}/model.json`, {
onProgress: (fractions) => {
setLoading(fractions);
},
}).then(async (mod) => {
// warming up the model before using real data
const dummy = tf.ones(mod.inputs[0].shape);
const res = await mod.executeAsync(dummy);
// clear memory
tf.dispose(res);
tf.dispose(dummy);
// save to state
setModel(mod);
});
}, [modelName]);
// helper for drawing into canvas //IoU threshold 0.5
const renderPrediction = (boxesData, scoresData, classesData , ) => {
const ctx = canvasRef.current.getContext("2d");
// clean canvas
ctx.clearRect(0, 0, ctx.canvas.width, ctx.canvas.height);
const font = "16px sans-serif";
ctx.font = font;
ctx.textBaseline = "top";
// LET RHE USEWER CHOOSE minval SCORE TO SHOW
const minval = 0.5;
for (let i = 0; i < scoresData.length; ++i) {
if (scoresData[i] < minval) continue;
const klass = LABELS[classesData[i]];
const score = (scoresData[i] * 100).toFixed(1);
let [x1, y1, x2, y2] = boxesData.slice(i * 4, (i + 1) * 4);
x1 *= canvasRef.current.width;
x2 *= canvasRef.current.width;
y1 *= canvasRef.current.height;
y2 *= canvasRef.current.height;
const width = x2 - x1;
const height = y2 - y1;
// draw the bounding box
ctx.strokeStyle = "#C53030";
ctx.lineWidth = 2;
ctx.strokeRect(x1, y1, width, height);
// fill the area between the boxes
const label = klass + " - " + score + "%";
const textWidth = ctx.measureText(label).width;
const textHeight = parseInt(font, 10); // base 10
// draw the label background
ctx.fillStyle = "#C53030";
ctx.fillRect(x1 - 1, y1 - (textHeight + 4), textWidth + 6, textHeight + 4);
// draw the label text
ctx.fillStyle = "#FFFFFF";
ctx.fillText(label, x1 + 2, y1 - (textHeight + 2));
}
};
I was expecting a shade with the method above

Related

Why additional space is appears in right side of pdf?

I am using dom-to-image and jsPDF to generate a A5 PDF. Additional space appears on every page of the pdf, but the canvasImageWidth and pdfWidth are same, by right the content should fit the pdf width exactly. How to remove those spaces?
const margin = 0;
const htmlWidth = 419.52755906 - (margin * 2); // a5 size in pt
const ratio = document.getElementById('article-body').offsetWidth / htmlWidth;
const htmlHeight = document.getElementById('article-body').offsetHeight / ratio;
let pdfWidth = 419.52755906; // a5 size in pt
let pdfHeight = 595.27559055; // a5 size in pt
const totalPDFPages = Math.ceil(htmlHeight / pdfHeight) - 1;
const data = this.document.getElementById('article-body');
const canvasImageWidth = htmlWidth;
const canvasImageHeight = htmlHeight;
console.log("canvasImageWidth : " + canvasImageWidth );
console.log("pdfWidth: " + pdfWidth);
domtoimage.toJpeg(data, { quality: 0.95, bgcolor: "#ffffff" }).then (function (dataUrl) {
let pdf = new jsPDF('p', 'pt', [pdfWidth, pdfHeight]);
pdf.addImage(dataUrl, 'png', margin, margin, canvasImageWidth, canvasImageHeight);
for (let i = 1; i <= totalPDFPages; i++) {
pdf.addPage([pdfWidth, pdfHeight], 'p');
pdf.addImage(dataUrl, 'png', margin, - (pdfHeight * i) + margin, canvasImageWidth, canvasImageHeight);
}
pdf.save("<?php echo $this->item->alias; ?>" + '.pdf');
})
.catch(function (error) {
console.error('oops, something went wrong!', error);
});
PDF
Log
canvasImageWidth:
419 .52755906
pdfWidth: 419.52755906

tfjs-react-native from tensor to jpeg

There is decodeJpeg from #tensorflow/tfjs-react-native but no encodeJpeg. How to write a tensor into a local jpeg file then?
I have tried to look at the code and "inverse" the function, I ended up writing:
import * as tf from '#tensorflow/tfjs';
import * as FileSystem from 'expo-file-system';
import * as jpeg from 'jpeg-js';
export const encoderJpeg = async (tensor, name) => {
// add alpha channel if missing
const shape = [...tensor.shape]
shape.pop()
shape.push(4)
const tensorWithAlpha = tf.concat([tensor, tensor], [-1]).slice([0], shape)
const array = new Uint8Array(tensorWithAlpha.dataSync())
const rawImageData = {
data: array.buffer,
width: shape[1],
height: shape[0],
};
const jpegImageData = jpeg.encode(rawImageData, 50);
const imgBase64 = tf.util.decodeString(jpegImageData.data, "base64")
const uri = FileSystem.documentDirectory + name;
await FileSystem.writeAsStringAsync(uri, imgBase64, {
encoding: FileSystem.EncodingType.Base64,
});
return uri
}
but when I show the images with an <Image /> I see that there are all plain green.
This is my final util for doing this:
import * as tf from '#tensorflow/tfjs';
import * as FileSystem from 'expo-file-system';
import * as jpeg from 'jpeg-js';
export const encodeJpeg = async (tensor) => {
const height = tensor.shape[0]
const width = tensor.shape[1]
const data = new Buffer(
// concat with an extra alpha channel and slice up to 4 channels to handle 3 and 4 channels tensors
tf.concat([tensor, tf.ones([height, width, 1]).mul(255)], [-1])
.slice([0], [height, width, 4])
.dataSync(),
)
const rawImageData = {data, width, height};
const jpegImageData = jpeg.encode(rawImageData, 100);
const imgBase64 = tf.util.decodeString(jpegImageData.data, "base64")
const salt = `${Date.now()}-${Math.floor(Math.random() * 10000)}`
const uri = FileSystem.documentDirectory + `tensor-${salt}.jpg`;
await FileSystem.writeAsStringAsync(uri, imgBase64, {
encoding: FileSystem.EncodingType.Base64,
});
return {uri, width, height}
}
You can use imgBase64 directly into your image component as follows:
<Image source={{uri: 'data:image/jpeg;base64,' + imgBase64}} />

Problem with script to change number of Flatlist columns depending on rotation/size

I'm working on some code to calculate numColumns for a flatlist- intention is 3 on landscape tablet, 2 on portrait tablet, and 1 on portrait phone.
Here's my code:
const [width, setWidth] = useState(Dimensions.get('window').width);
const [imageWidth, setImageWidth] = useState(100);
const [imageHeight, setImageHeight] = useState(100);
const [columns, setColumns] = useState(3);
useEffect(() => {
function handleChange() {
setWidth(Dimensions.get('window').width);
}
Dimensions.addEventListener("change", handleChange);
return () => Dimensions.removeEventListener("change", handleChange);
}, [width]);
useEffect(() => {
if (width > 1100) {
setColumns(3);
} else if (width <= 1099 && width > 600) {
setColumns(2);
} else {
setColumns(1);
}
setImageWidth((width - (64 * columns) + 15) / columns);
setImageHeight(((width - (64 * columns) + 15) / columns) * .6);
}, [width]);
imageWidth and imageHeight are passed to the render component of the flatlist to size an image.
It seems to work fine when I load it in landscape mode, but if I rotate to portrait, I get this:
Then, if I go back to landscape, it stays as 2 columns?
Any idea how I can fix this?
You're not returning the height of the device, afterwards, you need to calculate the orientation of the device (isLandscape).
Logically it flows as the following:
is the device landscape? (setColumns 3)
is the device wide and portrait? (set columns 2)
others (setColumns 1)
From there you can pass that into the second useEffect, (say, useColumnsHook). This should be able to set the height/width based on orientation of the device.
I also recommend setting the height/width based on percentages rather than exact pixels for devices (100%, 50%, 33.3%).
const [width, setWidth] = useState(Dimensions.get('window').width);
const [imageWidth, setImageWidth] = useState(100);
const [imageHeight, setImageHeight] = useState(100);
const [columns, setColumns] = useState(3);
/**
* orientation
*
* return {
* width,
* height
* }
*/
const useScreenData = () => {
const [screenData, setScreenData] = useState(Dimensions.get("screen"))
useEffect(() => {
const onChange = (result) => {
setScreenData(result.screen)
}
Dimensions.addEventListener("change", onChange)
return () => Dimensions.removeEventListener("change", onChange)
})
return {
...screenData,
}
}
const { width, height } = useScreenData()
const isLandscape = width > height
useEffect(() => {
if (isLandscape && width > 1100) {
// handle landscape
setColumns(3)
} else if (!isLandscape && (width <= 1099 && width > 600)) {
setColumns(2)
} else {
setColumns(1)
}
setImageWidth((width - (64 * columns) + 15) / columns);
setImageHeight(((width - (64 * columns) + 15) / columns) * .6);
}, [width, isLandscape]);
1:Tablet or Phone
You can use the react-native-device-info package along with the Dimensions API. Check the isTablet() method and apply different styles according on the result.
if you care Expo user then you have to user expo-constants userInterfaceIdiom method to detect isTablet Reference
import DeviceInfo from 'react-native-device-info';
let isTablet = DeviceInfo.isTablet();
2:Portrait mode or Landscape mode
then you can find portrait mode or landscape mode by
const isPortrait = () => {
const dim = Dimensions.get('screen');
return dim.height >= dim.width;
};
3:Code:
import React, { useState, useEffect } from "react";
import { View, Text, Dimensions } from "react-native";
import DeviceInfo from "react-native-device-info";
const App = () => {
const isLandscapeFunction = () => {
const dim = Dimensions.get("screen");
return dim.width >= dim.height;
};
const [width, setWidth] = useState(Dimensions.get("window").width);
const [imageWidth, setImageWidth] = useState(100);
const [imageHeight, setImageHeight] = useState(100);
const [columns, setColumns] = useState(3);
let isTabletPhone = DeviceInfo.isTablet();;
useEffect(() => {
function handleChange(result) {
setWidth(result.screen.width);
const isLandscape = isLandscapeFunction();
if (isLandscape && isTabletPhone) {
setColumns(3);
} else if (!isLandscape && isTabletPhone) {
setColumns(2);
} else {
setColumns(1);
}
}
Dimensions.addEventListener("change", handleChange);
return () => Dimensions.removeEventListener("change", handleChange);
});
useEffect(() => {
console.log(columns);
setImageWidth((width - 64 * columns + 15) / columns);
setImageHeight(((width - 64 * columns + 15) / columns) * 0.6);
}, [columns, width]);
return (
<View style={{ justifyContent: "center", alignItems: "center", flex: 1 }}>
<Text>imageWidth {imageWidth}</Text>
<Text>imageHeight {imageHeight}</Text>
</View>
);
};
export default App;

could not train model by calculating and applying gradient

I'm trying to train a simple sequential model. I want to decompose fit algorithm to gather gradient phase and apply gradient' phase in order to use reinforcement learning.
this idea is taken from cart pole example.
cart pole example
anyway I can not receive good result even if the problem (defined in func()) is comparatively easy to solve by model trained by fit method. The best I can achieve is something like this :
I must be missing something. Could anyone can direct me what's wrong?
Here you can find github source with net output visualization
import {layers, sequential, Sequential} from "#tensorflow/tfjs-layers";
import {
tensor2d,
Tensor,
losses,
variableGrads,
tidy,
train,
NamedTensorMap,
stack,
mean,
concat
} from "#tensorflow/tfjs";
import {ActivationIdentifier} from "#tensorflow/tfjs-layers/src/keras_format/activation_config";
import {NamedTensor} from "#tensorflow/tfjs-core/dist/tensor_types";
import {InitializerIdentifier} from "#tensorflow/tfjs-layers/src/initializers";
import {addHeatmap} from "./vis/heatmap";
const func = (...x) => {
const y1 = x[0] * x[1] * 0.9 + (1 - x[0]) * (1 - x[1]) * 0.9;
return tensor2d([y1], [1, 1])
}
const activation: ActivationIdentifier = "tanh"
const kernelInitializer: InitializerIdentifier = null
const model: Sequential = sequential();
const inputLayer = layers.dense({
units: 2,
inputShape: [2],
kernelInitializer,
});
const hiddenLayer1 = layers.dense({
units: 16,
activation: activation,
//kernelInitializer,
useBias: true
});
const outputLayer = layers.dense({
units: 1,
activation: "sigmoid",
kernelInitializer,
useBias: true
});
const dim = 10; // error sampling density
model.add(inputLayer);
model.add(hiddenLayer1);
model.add(outputLayer);
const optimizer = train.adam(0.1);
const calculateGradient = () => {
return tidy(() => {
const vGrads = variableGrads(() => tidy(() => {
const x1 = Math.random();
const x2 = Math.random();
const labels = func(x1, x2)
const input = tensor2d([x1, x2], [1, 2])
return losses.meanSquaredError(
labels,
model.predict(input) as Tensor
).asScalar();
}));
return vGrads.grads;
})
}
const createBatch = (n: number) => {
return tidy(() => {
const gradientsArrays = {}
for (let i = 0; i < n; i++) {
const gradient = calculateGradient();
Object.keys(gradient).forEach((entry) => {
gradientsArrays[entry] ? gradientsArrays[entry].push(gradient[entry]) : gradientsArrays[entry] = [gradient[entry]]
})
}
const gradientsMeans = {}
Object.keys(gradientsArrays).forEach(key => {
gradientsMeans[key] = mean(stack(gradientsArrays[key], 0))
})
return gradientsMeans;
})
}
const epoch = (iterations: number) => {
for (let i = 0; i < iterations; i++) {
let batch = createBatch(16);
optimizer.applyGradients(batch)
}
}
const calculateDesiredOutputs = () => {
const desiredOutputs = [];
for (let y = 0; y < 1; y += 1 / dim) {
for (let x = 0; x < 1; x += 1 / dim) {
desiredOutputs.push({x, y, value: func(x, y).dataSync()[0]});
}
}
return desiredOutputs;
}
const calculateNetOutputs = () => {
const netOutputs = [];
for (let y = 0; y < 1; y += 1 / dim) {
for (let x = 0; x < 1; x += 1 / dim) {
const value = (<any>model.predict(tensor2d([x, y], [1, 2]))).dataSync()[0];
netOutputs.push({x, y, value});
}
}
return netOutputs
}
const calculateError = (a: { value: number }[], b: { value: number }[]) => {
let error = 0;
for (let i = 0; i < a.length; i++) {
let e = a[i].value - b[i].value;
error += e * e
}
return Math.sqrt(error) / (dim * dim);
}
const run = async () => {
const desiredOutputs = calculateDesiredOutputs();
const desiredOutputsHeatmap = addHeatmap({dim});
desiredOutputsHeatmap.update(desiredOutputs)
const netOutputHeatmap = addHeatmap({dim});
let i = 0;
while (i < 256) {
epoch(20);
let netOutputs = calculateNetOutputs();
console.log("epoch: ", i)
console.log(calculateError(desiredOutputs, netOutputs))
netOutputHeatmap.update(netOutputs);
await new Promise((r) => setTimeout(() => r(), 100));
i++;
}
}
run();

OrbitControls are not working react-native

I am trying to implement orbitControls in my react application. I have loaded 3d model GraphicsView
of expo-graphics. Model loaded perfectly, now i need to rotate the 3D model with screen drag. For
this i have added orbitControls, which is not working properly. Model did'nt rotate with screen
drag.Please help what i need to do to do to rotate 3D model in my react app.
Here is my model class.
```
import React from 'react';
import ExpoTHREE, { THREE } from 'expo-three';
import { GraphicsView } from 'expo-graphics';
import OrbitControls from 'three-orbitcontrols';
class Model extends React.Component {
componentDidMount() {
THREE.suppressExpoWarnings();
}
// When our context is built we can start coding 3D things.
onContextCreate = async ({ gl, pixelRatio, width, height }) => {
// Create a 3D renderer
this.renderer = new ExpoTHREE.Renderer({
gl,
pixelRatio,
width,
height,
});
// We will add all of our meshes to this scene.
this.scene = new THREE.Scene();
this.scene.background = new THREE.Color(0x3d392f);
//this.scene.fog = new THREE.Fog( 0xa0a0a0, 10, 50 );
this.camera = new THREE.PerspectiveCamera(12, width/height, 1, 1000);
this.camera.position.set(3, 3, 3);
this.camera.lookAt(0, 0, 0);
this.controls = new OrbitControls(this.camera, this.renderer.domElement);
this.controls.rotateSpeed = 2.0;
this.controls.zoomSpeed = 1.2;
this.controls.panSpeed = 0.8;
this.controls.noPan = false;
this.controls.staticMoving = false;
this.controls.dynamicDampingFactor = 0.3;
this.controls.keys = [ 65, 83, 68 ];
this.controls.addEventListener( 'change', this.onRender );
//clock = new THREE.Clock();
//this.scene.add(new THREE.AmbientLight(0xffffff));
var hemiLight = new THREE.HemisphereLight( 0xffffff, 0x444444 );
hemiLight.position.set( 0, 20, 0 );
this.scene.add( hemiLight );
var dirLight = new THREE.DirectionalLight( 0xffffff );
dirLight.position.set( - 3, 10, - 10 );
dirLight.castShadow = true;
dirLight.shadow.camera.top = 2;
dirLight.shadow.camera.bottom = - 2;
dirLight.shadow.camera.left = - 2;
dirLight.shadow.camera.right = 2;
dirLight.shadow.camera.near = 0.1;
dirLight.shadow.camera.far = 40;
this.scene.add( dirLight );
await this.loadModel();
};
loadModel = async () => {
const obj = {
"f.obj": require('../assets/models/f.obj')
}
const model = await ExpoTHREE.loadAsync(
obj['f.obj'],
null,
obj
);
// this ensures the model will be small enough to be viewed properly
ExpoTHREE.utils.scaleLongestSideToSize(model, 1);
this.scene.add(model)
};
// When the phone rotates, or the view changes size, this method will be called.
onResize = ({ x, y, scale, width, height }) => {
// Let's stop the function if we haven't setup our scene yet
if (!this.renderer) {
return;
}
this.camera.aspect = width / height;
this.camera.updateProjectionMatrix();
this.renderer.setPixelRatio(scale);
this.renderer.setSize(width, height);
};
// Called every frame.
onRender = delta => {
// Finally render the scene with the Camera
this.renderer.render(this.scene, this.camera);
};
render() {
return (
<GraphicsView
onContextCreate={this.onContextCreate}
onRender={this.onRender}
onResize={this.onResize}
/>
);
}
}
export default Model;
```