Doing multiple fetch not working React native - react-native

I'm doing a network scanner in react-native,
The problem is my api request are working but not in the for loop,
if I just made a for loop with 30loop its ok, but if I got more than 30loops
nothing is working.
if the IP to find is 192.168.1.79
and I set num_two to 60 and num_one to 1, this work good.
But if I set num_two to 50 and num_one to 1, this isn't working anymore.
My searching function:
searchInNetwork(net) {
return new Promise(async (resolve, reject) => {
for (let num_one = 1 ; num_one < 255 ; num_one++) {
let reseau = net + String(num_one);
for (let num_two = 50 ; num_two < 255 ; num_two++) {
let url = 'http://' + reseau + '.' + String(num_two) + ':3000/connect';
await ApiService.getRequest(url, 100)
.then((data) => {
console.log("SUCCESS");
ApiService.url = 'http://' + reseau + '.' + String(num_two) + ':3000';
ApiService.connected = true;
resolve();
})
` .catch((error) => {
if (url === "http://192.168.1.79:3000/connect")
console.log(error);
})
}
}
});
}
My ApiService class:
class ApiService {
static url = 'http://0.0.0.0:3000';
static connected = false;
static getUrl() {
return (this.url);
}
static getRequest(route, timeout) {
return new Promise(async (resolve, reject) => {
fetch(route)
.then((response) => response.json())
.then((responseJson) => {
ApiService.connected = true;
resolve(responseJson);
})
.catch((error) => {
ApiService.connected = false;
reject(error);
});
if (timeout) {
const error = new Error("Timeout net");
setTimeout(reject, timeout, error);
}
});
};
};
module.exports = ApiService;

Related

vue method for loop wait for function complete

In this vue component, I have a method containing a for loop, calling another method. The second method does a request to the appserver. I need the first function waiting for the second to continue the for-loop. I've tried several async await options but doesn't understand how to implement it.
methods: {
selectFiles(files) {
this.progressInfos = [];
this.selectedFiles = files;
},
uploadFiles() {
this.message = "";
//var result = 0;
for (let i = 0; i < this.selectedFiles.length; i++) {
console.log(i)
//result = await this.upload(i, this.selectedFiles[i]);
this.upload(i, this.selectedFiles[i]);
}
},
upload(idx, file) {
this.progressInfos[idx] = { percentage: 0, fileName: file.name };
//console.log("FinDocuNum:" + financialDocument.finDocId)
FinancialDocumentDataService.upload(1, file, (event) => {
this.progressInfos[idx].percentage = Math.round(100 * event.loaded / event.total);
}).then((response) => {
let prevMessage = this.message ? this.message + "\n" : "";
this.message = prevMessage + response.status;
return 1;
}).catch(() => {
this.progressInfos[idx].percentage = 0;
this.message = "Could not upload the file:" + file.name;
return 0;
});
}
}
The upload function must be async and return a promise like this:
async upload(file) {
return new Promise((resolve, reject) => {
axios({url: url, data: file, method: 'POST'})
.then(resp => {
resolve(resp)
})
.catch(err => {
reject(err)
})
})
},

stream s3 to dynamodb with fast-csv : not all data inserted

When a csv file is uploaded on my s3 bucket, my lambda will be triggered to insert my data into DynamoDB.
I need a stream because the file is too large to be downloaded as full object.
const batchWrite = async (clientDynamoDB, itemsToProcess) => {
const ri = {};
ri[TABLE_DYNAMO] = itemsToProcess.map((itm) => toPutRequest(itm));
const params = { RequestItems: ri };
await clientDynamoDB.batchWriteItem(params).promise();
};
function runStreamPromiseAsync(stream, clientDynamoDB) {
return new Promise((resolve, reject) => {
const sizeChunk = 25;
let itemsToProcess = [];
stream
.pipe(fastCsv.parse({headers: Object.keys(schemaGeData), trim: true}))
.on("data", (row) => {
stream.pause();
itemsToProcess.push(row);
if (itemsToProcess.length === sizeChunk) {
batchWrite(clientDynamoDB, itemsToProcess).finally(() => {
stream.resume();
});
itemsToProcess = [];
}
})
.on("error", (err) => {
console.log(err);
reject("Error");
})
.on("end", () => {
stream.pause();
console.log("end");
batchWrite(clientDynamoDB, itemsToProcess).finally(() => {
resolve("OK");
});
});
});
}
module.exports.main = async (event, context, callback) => {
context.callbackWaitsForEmptyEventLoop = false;
const AWS = require('aws-sdk');
const s3 = new AWS.S3();
const object = event.Records[0].s3;
const bucket = object.bucket.name;
const file = object.object.key;
const agent = new https.Agent({
keepAlive: true
});
const client = new AWS.DynamoDB({
httpOptions: {
agent
}
});
try {
//get Stream csv data
const stream = s3
.getObject({
Bucket: bucket,
Key: file
})
.createReadStream()
.on('error', (e) => {
console.log(e);
});
await runStreamPromiseAsync(stream, client);
} catch (e) {
console.log(e);
}
};
When my file is 1000 lines everything is inserted but when I have 5000 lines, my function insert only around 3000 lines and this number is random... Sometimes more sometimes less..
So I'd like to understand what am I missing here ?
I also read this article but to be honest even if you pause the second stream, the first one is still running.. So if someone have any ideas on how to do this, it would be greatly appreciated !
Thanks
I found out why It was not fully processed, it's because the callback of batchWriteItem can return unprocess Items. So I change the function batchWrite and also the runPromiseStreamAsync a little bit because i might not have all the items processed from itemsToProcess.
Anyway here is the full code :
const batchWrite = (client, itemsToProcess) => {
const ri = {};
ri[TABLE_DYNAMO] = itemsToProcess.map((itm) => toPutRequest(itm));
const items = { RequestItems: ri };
const processItemsCallback = function(err, data) {
return new Promise((resolve, reject) => {
if(!data || data.length === 0){
return resolve();
}
if(err){
return reject(err);
}
let params = {};
params.RequestItems = data.UnprocessedItems;
return client.batchWriteItem(params, processItemsCallback);
});
};
return client.batchWriteItem(items, processItemsCallback );
};
function runStreamPromiseAsync(stream, clientDynamoDB) {
return new Promise((resolve, reject) => {
const sizeChunk = 25;
let itemsToProcess = [];
let arrayPromise = [];
stream
.pipe(fastCsv.parse({headers: Object.keys(schemaGeData), trim: true}))
.on("error", (err) => {
console.log(err);
reject("Error");
})
.on('data', data => {
itemsToProcess.push(data);
if(itemsToProcess.length === sizeChunk){
arrayPromise.push(batchWrite(clientDynamoDB, itemsToProcess));
itemsToProcess = [];
}
})
.on('end', () => {
if(itemsToProcess.length !== 0){
arrayPromise.push(batchWrite(clientDynamoDB, itemsToProcess));
}
resolve(Promise.all(arrayPromise).catch(e => {
reject(e)
}));
});
});
}

Lambda Edge Origin Request 502

I am getting in the invocation of lambda edge in origin request for Cloudfront, this error, I am trying to change the metatags of a Single Page Application done to React:
Validation error: The Lambda function returned an invalid entry in the headers object, the header must have a value field, header: cache-control has one or more entries which doesn't have a value field.
And this is my lambda edge function
const path = require('path');
const https = require('https');
const zlib = require('zlib');
let originalUri;
const downloadContent = (url, callback) => {
https.get(url, (res) => {
let response;
let body = '';
if (res.headers['content-encoding'] === 'gzip') {
response = res.pipe(zlib.createGunzip());
} else {
response = res;
}
response.on('data', (chunk) => {
body += chunk;
});
response.on('end', () => {
callback(true, body, res.headers);
});
}).on('error', (e) => callback(false, e));
};
const fetchMetaData = (url, callback) => {
downloadContent(url, (isOk, result, headers) => {
if (!isOk) {
console.log('Error fetching meta data:', result);
callback(false);
} else {
const metaData = JSON.parse(result);
let metaTags = '';
console.log('metaData whate:', metaData);
if (metaData) {
if (metaData.title) {
metaTags += '<title>' + metaData.title + '</title>';
metaTags += '<meta property=\"og:title\" content=\"' + metaData.title + '\" />';
}
if (metaData.description) {
metaTags += '<meta name=\"description\" content=\"' + metaData.description + '\" />';
metaTags += '<meta property=\"og:description\" content=\"' + metaData.description + '\" />';
}
if (metaData.images) {
for (let i = 0; i < metaData.images.length; i++) {
const image = metaData.images[i];
metaTags += '<meta property=\"og:image\" content=\"' + image + '\" />';
}
}
}
metaTags += '<meta property=\"og:url\" content=\"' + originalUri + '" />';
metaTags += '<meta property=\"og:type\" content=\"website\" />';
callback(true, metaTags, headers);
}
});
};
const fetchIndexHtmlAndCreateCloudFrontResponse = (url, metaTags, metaHeaders, callback) => {
downloadContent(url, (isOk, result, headers) => {
if (!isOk) {
callback(false);
} else {
var finalBody = result.replace(/(<title>title<\/title>)/gi, metaTags);
const responseHeaders = {
'content-type': [{key:'Content-Type', value: 'text/html'}],
'content-encoding' : [{key:'Content-Encoding', value: 'gzip'}],
'accept-ranges': [{key:'Accept-Ranges', value: 'bytes'}]
};
let eTag = '';
if (metaHeaders) {
const metaEtag = metaHeaders['etag'];
if (metaEtag) {
eTag = metaEtag.replace(/"/g, '');
}
}
if (headers) {
const lastModified = headers['last-modified'];
const cacheControl = headers['cache-control'];
const contentETag = headers['etag'];
if (lastModified) {
responseHeaders['last-modified'] = [{key:'Last-Modified', value: lastModified}]
}
if (lastModified) {
responseHeaders['cache-control'] = [{key:'Cache-Control', value: cacheControl}]
}
if (contentETag) {
eTag += contentETag.replace(/"/g, '');;
}
}
if (eTag !== '') {
responseHeaders['etag'] = [{key:'ETag', value: eTag}]
}
const newResponse = {
status: '200',
statusDescription: 'OK',
headers: responseHeaders,
body: finalBody,
};
callback(true, newResponse);
}
});
};
exports.handler = (event, context, callback) => {
const { request, response, config } = event.Records[0].cf;
originalUri = request.uri;
const parsedPath = path.parse(originalUri);
if (parsedPath.ext === '' && parsedPath.dir === '/collections') {
console.log('parsedPath:', parsedPath.base);
request.uri = '/index.html';
let metaUrl = 'https://a2mfja.execute-api.eu-west-1.amazonaws.com/testenv/metatags';
fetchMetaData(metaUrl, (isOk, metaTags, metaHeaders) => {
if (!isOk) {
return callback(null, request); // Return same request so CloudFront can process as usual.
} else {
const contentUrl = 'https://d3lyhnumbmrole.cloudfront.net/index.html';
fetchIndexHtmlAndCreateCloudFrontResponse(contentUrl, metaTags, metaHeaders, (isOk, newResponse) => {
if (!isOk) {
return callback(null, request);
} else {
//newResponse.headers = request.headers;
return callback(null, newResponse);
}
});
}
});
} else {
return callback(null, request);
}
};
The Error was here, cacheControl was undefined in the headers:
if (lastModified) {
responseHeaders['cache-control'] = [{key:'Cache-Control', value: cacheControl}]
}

vuejs Pagination GET Url Params

I am using https://github.com/gilbitron/laravel-vue-pagination pagination it works fine.I have included pagination in the page as below
<pagination :data="posts" #pagination-change-page="getResults"></pagination>
and the method
getResults(page = 1) {
axios.get('api/post?page=' + page)
.then(response => {
this.posts = response.data;
});
},
Now when i searchBy Category the pagination links shows api/findByCategoy?category=5&page=1
selectCatgory(e) {
axios.get('api/findByCategoy?category=' + e.target.value)
.then((data) => {
this.posts = data.data
})
.catch(() => {
})
},
I have inclded the GET params in the url . How to change the path in the getResults
public function searchByCategory(){
if ($search = \Request::get('category')) {
$posts = Post::where('category_id',$search)->orderBy('created_at','desc')->paginate(20);
$querystringArray = Input::only(['category']);
$posts->appends($querystringArray);
}else{
$posts = Post::latest()->paginate(10);
}
return $posts;
}
axios.get('api/findByCategoy?category=' + e.target.value + '&page=1')
https://en.wikipedia.org/wiki/Query_string

Continual Sqlite Corruption when Using express.js on repl.it Platform

I'm frequently getting corruption of my Sqlite3 db when connecting with express on the repl.it platform. I'm using a data access class -> repository pattern
snippet from app.js:
const AppDAO = require('./data/dao');
...
const dao = new AppDAO('./db.sqliteMaptack');
const mapTierRepo = new MapTierRepository(dao);
mapTierRepo.createTable();
The dao class:
const sqlite3 = require('sqlite3')
const Promise = require('bluebird')
class AppDAO {
constructor(dbFilePath) {
this.db = new sqlite3.Database(dbFilePath, (err) => {
if (err) {
console.log('Could not connect to database', err);
} else {
console.log('Connected to database');
}
})
}
run(sql, params = []) {
return new Promise((resolve, reject) => {
this.db.run(sql, params, function (err) {
if (err) {
console.log('Error running sql ' + sql);
console.log(err);
reject(err);
} else {
resolve({ id: this.lastID });
}
})
})
}
get(sql, params = []) {
return new Promise((resolve, reject) => {
this.db.get(sql, params, (err, result) => {
if (err) {
console.log('Error running sql: ' + sql);
console.log(err);
reject(err);
} else {
resolve(result);
}
})
})
}
all(sql, params = []) {
return new Promise((resolve, reject) => {
this.db.all(sql, params, (err, rows) => {
if (err) {
console.log('Error running sql: ' + sql);
console.log(err);
reject(err)
} else {
resolve(rows);
}
})
})
}
}
module.exports = AppDAO;
The repository:
class MapTierRepository {
constructor(dao) {
this.dao = dao;
}
createTable() {
const sql =
'CREATE TABLE IF NOT EXISTS MapTier (id INTEGER PRIMARY KEY AUTOINCREMENT,name TEXT)';
return this.dao.run(sql);
}
create(name) {
return this.dao.run(
'INSERT INTO MapTier (name) VALUES (?)',
[name]);
}
update(mapTier) {
const { id, name } = mapTier
return this.dao.run(
'UPDATE MapTier SET name = ? WHERE id = ?',
[name, id]
);
}
delete(id) {
return this.dao.run(
'DELETE FROM MapTier WHERE id = ?',
[id]
);
}
getById(id) {
return this.dao.get(
'SELECT * FROM MapTier WHERE id = ?',
[id]);
}
getAll() {
return this.dao.all('SELECT * FROM MapTier');
}
}
module.exports = MapTierRepository;
All of the repository methods work. However, when the repl.it instance crashes, its likely to cause the db to become corrupted, any thoughts?