I use tensorflow-models/mobilenet for application Reat native of me.
The application works well when there is a network. But when the device is offline, on IOS applications it does not load model.
async function predictMobilenet() {
try {
setTextshow(' LOADING...')
var start = new Date().getTime();
console.log(' Load model...')
//const tfReady = await tf.ready();
const tfReady = await tf.ready();
console.log('Uffect load model')
const model = await mobilenet.load();
const imgB64 = await FileSystem.readAsStringAsync(selectedImage.localUri, {
encoding: FileSystem.EncodingType.Base64,
});
const imgBuffer = tf.util.encodeString(imgB64, 'base64').buffer;
const raw = new Uint8Array(imgBuffer)
const imageTensor = decodeJpeg(raw);
const prediction = await model.classify(imageTensor);
var end = new Date().getTime();
var time = end - start;
setTextshow(JSON.stringify(prediction))
setTime(time)
}catch (e) {
setTextshow(e.toString())
console.log(e)
}
}
code line: const model = await mobilenet.load(); not working on ios when device offline
Error on iOS:
error : network request failed.
But on Android, my application works well.
Related
I'm setting the value of a global variable from a fetched value and I have it used all over the app.
When I first run the app using expo I get an error TypeError: undefined is not an object (evaluating 'global.value') but when I hit CMD+S on any page of the app the app get reloaded and everything works fine.
Here's the code of global.js:
var language = new Object();
var url = new Object();
module.exports = { language, url }
and here's how I set the values in App.js:
async checkServiceLevel(){
const response = await fetch(Config.backendAPI+`/service.php`);
const data = await response.json();
if(data['servicelevel'] === "up"){
global.url['url'] = Config.backendAPI;
}else{
const new_response = await fetch('https://backendrl0112.fra1.digitaloceanspaces.com/backendrl.json');
const new_data = await new_response.json();
global.url['url'] = new_data['url'];
}
}
async getLanguage(langue){
const response = await fetch(global.url['url']+`/getLanguageJson.php`);
const data = await response.json();
if(langue == null){
global.language['language'] = data['Languages']['en'];
await AsyncStorage.setItem('jsonLanguage',JSON.stringify(data['Languages']));
}else{
global.language['language'] = data['Languages'][langue];
await AsyncStorage.setItem('jsonLanguage',JSON.stringify(data['Languages']));
}
}
Here's the app behavior:
I was using ffmpeg.min.js through <script src='https://unpkg.com/#ffmpeg/ffmpeg#0.9.6/dist/ffmpeg.min.js'></script>
async function mergeVideo(video, audio) {
let { createFFmpeg, fetchFile } = FFmpeg;
let ffmpeg = createFFmpeg();
await ffmpeg.load();
ffmpeg.FS('writeFile', 'video.mp4', await fetchFile(video));
ffmpeg.FS('writeFile', 'audio.mp4', await fetchFile(audio));
await ffmpeg.run('-i', 'video.mp4', '-i', 'audio.mp4', '-c', 'copy', 'output.mp4');
let data = await ffmpeg.FS('readFile', 'output.mp4');
return new Uint8Array(data.buffer);
};
function saveVideo(fileName, byte) {
var blob = new Blob([byte], { type: "video/mp4" });
var link = document.createElement('a');
link.href = window.URL.createObjectURL(blob);
link.download = fileName;
link.click();
};
mergeVideo("/videoplayback.webm", "/videoplayback2.webm").then(r => {
saveVideo("mergedVideo.mp4", r);
});
I was using above code for website but i am not getting how to do it in react native.
I did tried FFmpegKit its different there.
Can someone code similar to that in react native?
I am attempting to write a script using the puppeteer library for node and I'm running it on the microtask platform rapidworkers.com
I have run into a challenge when writing part of the script that solves the captcha on the rapidworkers login page, when I try to run my script with node index.js I get the error: Cannot find module './get-username'
here is my code: (copied from https://jsoverson.medium.com/bypassing-captchas-with-headless-chrome-93f294518337)
const puppeteer = require('puppeteer');
const request = require('request-promise-native');
const poll = require('promise-poller').default;
const siteDetails = {
sitekey: '6LeEP20UAAAAAPYQsP7kIhE7XUT_LsDYjzO46uG3',
pageurl: 'https://rapidworkers.com/Login'
}
const getUsername = require('./get-username');
const getPassword = require('./get-password');
const apiKey = require('./api-key');
const chromeOptions = {
executablePath:'/Applications/Google Chrome.app/Contents/MacOS/Google
Chrome',
headless:false,
slowMo:10,
defaultViewport: null
};
(async function main() {
const browser = await puppeteer.launch(chromeOptions);
const page = await browser.newPage();
await page.goto('https://rapidworkers.com/Login');
const requestId = await initiateCaptchaRequest(apiKey);
await page.type('#username', getUsername());
const password = getPassword();
await page.type('#password', password);
const response = await pollForRequestResults(apiKey, requestId);
await page.evaluate(`document.getElementById("g-recaptcha-
response").innerHTML="${response}";`);
page.click('#register-form button[type=submit]');
})()
async function initiateCaptchaRequest(apiKey) {
const formData = {
method: 'userrecaptcha',
googlekey: siteDetails.sitekey,
key: apiKey,
pageurl: siteDetails.pageurl,
json: 1
};
const response = await request.post('http://2captcha.com/in.php', {form:
formData});
return JSON.parse(response).request;
}
async function pollForRequestResults(key, id, retries = 30, interval = 1500,
delay = 15000) {
await timeout(delay);
return poll({
taskFn: requestCaptchaResults(key, id),
interval,
retries
});
}
function requestCaptchaResults(apiKey, requestId) {
const url = `http://2captcha.com/res.php?
key=${apiKey}&action=get&id=${requestId}&json=1`;
return async function() {
return new Promise(async function(resolve, reject){
const rawResponse = await request.get(url);
const resp = JSON.parse(rawResponse);
if (resp.status === 0) return reject(resp.request);
resolve(resp.request);
});
}
}
const timeout = millis => new Promise(resolve => setTimeout(resolve,
millis))
I need way to upload image to firebase
i tried to use react-native-fetch-blob library
but I think there is something wrong with installing the library
No need to use react-native-fetch-blob. Here is how I do it on my project.
Install both react-native-firebase and react-native-image-picker. Follow the installation steps from their documentation guide.
Then implement 2 small functions to do image pick and upload to firebase. Here is the sample code.
// 1. Import required library
import firebase from 'react-native-firebase';
import ImagePicker from 'react-native-image-picker';
// 2. Create a function to pick the image
const pickImage = () => {
return new Promise((resolve, reject) => {
ImagePicker.showImagePicker(pickerOptions, response => {
if (response.didCancel) return;
if (response.error) {
const message = `An error was occurred: ${response.error}`;
reject(new Error(message));
return;
}
const { path: uri } = response;
resolve(uri);
});
});
};
// 3. Create a function to upload to firebase
const uploadImage = async (fileName, uri) {
return new Promise(
(resolve, reject) => {
firebase
.storage()
.ref(`uploads/${filename}`)
.putFile(uri)
.then(resolve)
.catch(reject);
}
);
}
Then simply firing both function as you need, here is the sample to pick and immediately upload it.
const pickImageAndUpload = async () => {
const uri = await pickImage();
const fileName = 'someImage.jpg';
const { state, downloadURL } = await uploadImage(fileName, uri);
}
async function uploadImageAsync(itemImage, passedParameter, ItemName, ItemDesc, ItemPrice, ItemWeight) {
const response = await fetch(itemImage);
const blob = await response.blob();
console.log("uri of the elements ius", blob)
var storageRef = firebase.storage().ref();
var file = blob
var metadata = {
contentType: 'image/jpeg',
};
const timeStamp = Date.now();
var uploadTask = storageRef.child('CategoryDescription' + "/" + `${passedParameter}` + "/" + `${ItemName}`).put(file, metadata);
//For image pick
pickImage = async () => {
const { CAMERA, CAMERA_ROLL } = Permissions;
const permissions = {
[CAMERA]: await Permissions.askAsync(CAMERA),
[CAMERA_ROLL]: await Permissions.askAsync(CAMERA_ROLL),
};
if (permissions[CAMERA].status === 'granted' && permissions[CAMERA_ROLL].status === 'granted') {
let result = await ImagePicker.launchImageLibraryAsync({
allowsEditing: false,
aspect:[4,3],
quality: 0.5,
});
// console.log(result);
if (!result.cancelled) {
this.setState({ itemImage: result.uri });
}
}
Hello i am creating an express service that prints to a pos printer at http request. The problem is that my solution does not print images on the first print but it does at the second and so on.
I am using https://github.com/song940/node-escpos
Here is my server.js
var print = require('./print')
var express = require('express')
var cors = require('cors')
var bodyParser = require('body-parser');
var app = express();
var corsOptions = {
origin: '*',
optionsSuccessStatus: 200 // some legacy browsers (IE11, various SmartTVs) choke on 204
}
app.get('/print', cors(corsOptions), (req, res) => {
print.demoPrint();
return res.send(req.body);
});
app.listen(3000, () => {
console.log('Express server is started')
});
Here is my print.js
const escpos = require('escpos');
const device = new escpos.USB(0x1504,0x003d);
const options = { encoding: "GB18030"};
const printer = new escpos.Printer(device);
const fs = require('fs')
const printSettings = JSON.parse(fs.readFileSync('printConfig.json', 'utf8'));
exports.demoPrint = () => {
device.open(function() {
printSettings.forEach((currentLine) => {
if(currentLine.printType === "text") {
console.log("PRINTING TEXT");
printer
.font(currentLine.font)
.align(currentLine.align)
.style('bu')
.size(currentLine.size, currentLine.size)
.text(currentLine.text)
}
else if(currentLine.printType === "image") {
escpos.Image.load(currentLine.path, (image) => {
console.log("PRINTING IMAGE");
printer
.align(currentLine.align)
.size(currentLine.size, currentLine.size)
.image(image)
});
}
else if(currentLine.printType === "barcode") {
console.log("PRINTING BARCODE");
printer
.align(currentLine.align)
.barcode(currentLine.text, 'CODE39', {
height: 64,
font: 'B'
});
}
});
printer
.text('\n')
.text('\n')
.cut()
.close();
});
};
Since this hasn't been answered yet I will provide the solution that worked for me. The problem was that the images were starting to load on the first time I submitted a request. By the second time I submitted a request the images were already loaded and were successfully printed.
I solved the problem by a adding a check that would not allow to print until the images were loaded.