How to continue download multiple files (AWS server images) in background in react-native iOS - react-native

I created an app where user can download multiple images on one click using react-native-fs which is working perfectly in Android. But in iOS when app is inactive then download stopped and user have to start download again.
async.eachSeries(DownloadData, async function (tourData, finish) {
console.log("# resumable : 655612", tourData);
var fileExtension = '';
var fileURL = tourData.path;
var fileExtension = "/" + tourData.name + "Image" + p + ".png
p = p + 1;
const downloadDest = RNFS.DocumentDirectoryPath + fileExtension;
let dirs = RNFetchBlob.fs.dirs;
var v = dirs.DocumentDir;
var jobId = -1;
const ret = RNFS.downloadFile({
fromUrl: encodeURI(fileURL),
toFile: downloadDest,
connectionTimeout: 1000 * 10,
readTimeout: 1000 * 10,
background: true,
discretionary: true,
progressDivider: 1,
resumable: (res) => {
console.log("# resumable", res);
},
begin: (res) => {
console.log(res)
},
progress: (data) => {
console.log(data)
},
});
jobId = ret.jobId;
RNFS.isResumable(jobId).then(true);
if (await RNFS.isResumable(jobId)) {
console.log("# resumable : # resumable : # resumable :",jobId);
RNFS.resumeDownload(jobId)
}
ret.promise.then((res) => {
finish();
}).catch(err => {
finish();
})
},function (err) {
if (!err) {
callback(true)
} else {
callback(false)
}
}));

Running download in background in IOS require few extra settings check this section https://github.com/itinance/react-native-fs#background-downloads-tutorial-ios
they also mentioned that IOS will give you 30 sec after handleEventsForBackgroundURLSession
BE AWARE! iOS will give about 30 sec. to run your code after handleEventsForBackgroundURLSession is called and until completionHandler is triggered so don't do anything that might take a long time (like unzipping), you will be able to do it after the user re-launces the app, otherwide iOS will terminate your app.
I hope this helps

Related

React native create a custom folder and save photos using react-native-crop-picker android

I'm using react-native-image-crop-picker. I want to save the photos to the gallery or to a custom folder taken from the camera using react-native-image-crop-picker. Normally the photos taken from the react-native-image-crop-picker are saved in the cache folder. I want to change it to a custom folder and display the images in the gallery with an auto-generated name like the first photo - p1.jpg, second photo - p2.jpg, Is there any way to implement it.
const takePhotoFromCamera = () => {
ImagePicker.openCamera({
width: 300,
height: 400,
cropping: true,
includeBase64: true,
freeStyleCropEnabled: true,
compressImageQuality: 0,
enableRotationGesture: true,
}).then(image => {
console.log(image);
});
console.warn('Take photo');
}
react-native-image-crop-picker this is what i used
You can achieve that by using rn-fetch-blob - writeFile method
Below is a sample functons to achieve above combining both react-native-image-crop-picker and rn-fetch-blob
import {PermissionsAndroid} from 'react-native';
import ImagePicker from 'react-native-image-crop-picker';
import RNFetchBlob from 'rn-fetch-blob';
const imageCropper = () => {
ImagePicker.openCamera({
cropping: true,
mediaType: 'photo',
includeBase64: true,
}).then((imageData) => {
const base64Data = imageData.data;
const fileName = getUniqueFileName('jpg');
writeFileToStorage(base64Data, fileName);
});
};
const getUniqueFileName = (fileExt) => {
//It is better naming file with current timestamp to achieve unique name
var d = new Date();
var year = d.getFullYear();
var month = d.getMonth() + 1;
var date = d.getDate();
var hour = d.getHours();
var minute = d.getMinutes();
var fileName = 'IMG' + year + month + date + hour + minute + '.' + fileExt;
return fileName;
};
const writeFileToStorage = async (base64Data, fileName) => {
try {
const result = await PermissionsAndroid.request(WRITE_EXTERNAL_STORAGE);
if (result == PermissionsAndroid.RESULTS.GRANTED) {
const dirs = RNFetchBlob.fs.dirs;
var folderPath = dirs.SDCardDir + '/Custom Folder/';
var fullPath = folderPath + fileName;
RNFetchBlob.fs.mkdir(folderPath).then((res) => {
console.log('res', res);
});
RNFetchBlob.fs.writeFile(fullPath, base64Data, 'base64').then((res) => {
console.log('file saved :', res);
});
} else {
console.log('Permission Not Granted');
}
} catch (err) {
console.log(err);
}
};

Discord.JS Purge.js command issue

Ok so my bot got rebuilt with a somewhat different code.
I'm using a somewhat more simplified fs command and events handler. My command works as intended.
But I'm wanting to add the amount pruned into the fields for the richEmbed and it keeps erroring out.
Here is my purge.js file
const Discord = require('discord.js')
module.exports = {
name: 'purge',
description: 'Purge up to 99 messages.',
execute(message, args) {
console.log("purging messages")
const embed = new Discord.RichEmbed()
.setTitle("Success")
.setColor(0x00AE86)
.setFooter("Guardian", "https://raw.githubusercontent.com/phantomdev-github/Resources/master/Discord%20Bots/Guardian/src/avatar.png")
.setThumbnail("https://raw.githubusercontent.com/phantomdev-github/Resources/master/Discord%20Bots/Guardian/src/avatar.png")
.setTimestamp()
.setURL("https://github.com/phantomdev-github/Resources/tree/master/Discord%20Bots/Guardian")
.addField("Bot Messages Purged", "missing code here", false)
.addField("User Pins Purged", "missing code here", false)
.addField("User Messages Purged", "missing code here", false)
.addField("Total Messages Purged", "missing code here", false)
message.channel.send({ embed });
const amount = parseInt(args[0]) + 1;
if (isNaN(amount)) {
return message.reply('that doesn\'t seem to be a valid number.');
} else if (amount <= 1 || amount > 100) {
return message.reply('you need to input a number between 1 and 99.');
}
message.channel.bulkDelete(amount, true).catch(err => {
console.error(err);
message.channel.send('there was an error trying to prune messages in this channel!');
});
},
};
If it helps this i my index.js
const fs = require('fs');
const Discord = require('discord.js');
const client = new Discord.Client();
const { token } = require('./token.json');
client.commands = new Discord.Collection();
const commandFiles = fs.readdirSync('./commands').filter(file => file.endsWith('.js'));
for (const file of commandFiles) {
const command = require(`./commands/${file}`);
client.commands.set(command.name, command);
console.log(file,command)
}
fs.readdir('./events/', (err, files) => {
if (err) return console.error(err);
files.forEach(file => {
if(!file.endsWith('.js')) return;
const eventFunction = require(`./events/${file}`);
console.log(eventFunction)
eventFunction.execute(client)
});
});
client.login(token);
and this is my message.js
const { prefix } = require('./prefix.json');
module.exports = {
name: 'message',
description: '',
execute:function(client) {
client.on('message',message => {
if (!message.content.startsWith(prefix) || message.author.bot) return;
const args = message.content.slice(prefix.length).split(/ +/);
const command = args.shift().toLowerCase();
if (!client.commands.has(command)) return;
try {
client.commands.get(command).execute(message, args);
} catch (error) {
console.error(error);
message.reply('there was an error trying to execute that command!');
}
})
}};
Basically I'm trying to figure out what to place into the "missing code here" sections. Also any way to lock it to people with Administrator permissions only would be useful as well. I attempted that but it failed to work with the embed.
If I understand you right you want to know how to get the amount of the purged pins, bot msgs and user msgs. For this you need to put your embed after you deleted the messages.
purge.js
const Discord = require('discord.js')
module.exports = {
name: 'purge',
description: 'Purge up to 99 messages.',
execute(message, args) {
console.log("purging messages")
const amount = parseInt(args[0]) + 1;
if (isNaN(amount)) {
return message.reply('that doesn\'t seem to be a valid number.');
} else if (amount <= 1 || amount > 100) {
return message.reply('you need to input a number between 1 and 99.');
}
message.channel.bulkDelete(amount, true).then(deletedMessages => {
// Filter the deleted messages with .filter()
var botMessages = deletedMessages.filter(m => m.author.bot);
var userPins = deletedMessages.filter(m => m.pinned);
var userMessages = deletedMessages.filter(m => !m.author.bot);
const embed = new Discord.RichEmbed()
.setTitle("Success")
.setColor(0x00AE86)
.setFooter("Guardian", "https://raw.githubusercontent.com/phantomdev-github/Resources/master/Discord%20Bots/Guardian/src/avatar.png")
.setThumbnail("https://raw.githubusercontent.com/phantomdev-github/Resources/master/Discord%20Bots/Guardian/src/avatar.png")
.setTimestamp()
.setURL("https://github.com/phantomdev-github/Resources/tree/master/Discord%20Bots/Guardian")
.addField("Bot Messages Purged", botMessages.size, false)
.addField("User Pins Purged", userPins.size, false)
.addField("User Messages Purged", userMessages.size, false)
.addField("Total Messages Purged", deletedMessages.size, false);
message.channel.send(embed);
}).catch(err => {
console.error(err);
message.channel.send('there was an error trying to prune messages in this channel!');
});
},
};

Downloading remote image to local device is not listed in the Photo Library

I am developing a React Native application. Now, I am trying to download a remote image into the local device.
This is my code:
downloadFile = () => {
var date = new Date();
var url = 'https://static.standard.co.uk/s3fs-public/thumbnails/image/2016/05/22/11/davidbeckham.jpg?w968';
var ext = this.extention(url);
ext = "." + ext[0];
const { config, fs } = RNFetchBlob
let PictureDir = fs.dirs.PictureDir
let options = {
fileCache: true,
addAndroidDownloads : {
useDownloadManager : true,
notification : true,
path: PictureDir + "/image_"+Math.floor(date.getTime() + date.getSeconds() / 2)+ext,
description : 'Image'
}
}
config(options).fetch('GET', url).then((res) => {
Alert.alert("Success Downloaded");
});
}
extention = (filename) => {
return (/[.]/.exec(filename)) ? /[^.]+$/.exec(filename) : undefined;
}
I am following this tutorial, https://medium.com/#derrybernicahyady/simple-download-file-react-native-2a4db7d51597?fbclid=IwAR1XR75fivHPtE8AfpXKUuFdaLIOehii4ahI4u0lMVgu7ee62yVmnqDnd04. When I run my code, it says the download was successful. But when I checked my photo library, the image is not there. What is wrong with my code?
Use CameraRoll.saveToCameraRoll method for this instead.

Ionic 4 Image upload using Angular HTTP

I use Ionic 4 and Angular 7 with PHP as Back-end.
I am trying to upload files (images/videos/PDFs/audio).
Is there a general way to send it.
I tried to send image using camera plugin it returns the URI and it works on the app using img tag.
But I can't get the file it self to send it using formData
openCamera() {
const options: CameraOptions = {
quality: 100,
destinationType: this.camera.DestinationType.FILE_URI,
encodingType: this.camera.EncodingType.JPEG,
mediaType: this.camera.MediaType.PICTURE,
sourceType: this.camera.PictureSourceType.PHOTOLIBRARY
};
this.camera.getPicture(options).then((imageData) => {
this.imageData = imageData;
this.image = (<any>window).Ionic.WebView.convertFileSrc(imageData);
// this.image works fine in img tag
this.sendMsg(this.image);
}, (err) => {
// Handle error
alert('error ' + JSON.stringify(err));
});
}
sendMsg(file?) {
const data = new FormData();
data.set('group_id', this.groupId);
data.set('text', this.msg);
if (file) {
data.set('file', this.image);
data.set('text', '');
}
this.messeges.push(data);
this._messengerService.postMsg(data).subscribe(
res => {
console.log('res ', res);
if (res.success === true) {
console.log('data added ', res);
}
}
);
}
I want the use the URI to get the actual file
Ionic Native plugin will return only base64. As per your question, you need to convert formdata. so, You need to convert base64 to formdata externally.
dataURItoBlob(dataURI) {
// convert base64/URLEncoded data component to raw binary data held in a string
var byteString;
if (dataURI.split(',')[0].indexOf('base64') >= 0)
byteString = atob(dataURI.split(',')[1]);
else
byteString = unescape(dataURI.split(',')[1]);
// separate out the mime component
var mimeString = dataURI.split(',')[0].split(':')[1].split(';')[0];
// write the bytes of the string to a typed array
var ia = new Uint8Array(byteString.length);
for (var i = 0; i < byteString.length; i++) {
ia[i] = byteString.charCodeAt(i);
}
return new Blob([ia], { type: mimeString });
}
and
profileUpdate(options) {
this.camera.getPicture(options).then((imageData) => {
let base64Image = 'data:image/jpg;base64,' + imageData;
let data = this.dataURItoBlob(base64Image);
let formData = new FormData();
formData.append('profile', data, "filename.jpg");
//here you pass the formdata to to your API
})

Modify Kurento group call example to support only audio

I need to modify the Kurento group call example from Link
to send only audio if one participant has no camera. Right now only audio is received when a camera is used. When only a microphone is available I receive a DeviceMediaError.
I managed to filter whether a camera device is connected or not and then send only audio, but this doesn't work. Maybe the participant should've an audio tag instead of a video tag?
EDIT: It's only working on Firefox and not in Chrome. Any ideas?
in file - https://github.com/Kurento/kurento-tutorial-java/blob/master/kurento-group-call/src/main/java/org/kurento/tutorial/groupcall/UserSession.java.
change following line -
sender.getOutgoingWebRtcPeer().connect(incoming, MediaType.AUDIO);
and set offer media constraints to video:false in browser js file.
updated code -
let constraints = {
audio: true,
video: false
};
let localParticipant = new Participant(sessionId);
participants[sessionId] = localParticipant;
localVideo = document.getElementById('local_video');
let video = localVideo;
let options = {
localVideo: video,
mediaConstraints: constraints,
onicecandidate: localParticipant.onIceCandidate.bind(localParticipant),
configuration : { iceServers : [
{"url":"stun:74.125.200.127:19302"},
] }
};
localParticipant.rtcPeer = new kurentoUtils.WebRtcPeer.WebRtcPeerSendonly(options, function(error) {
if (error) {
return console.error(error);
}
localVideoCurrentId = sessionId;
localVideo = document.getElementById('local_video');
localVideo.src = localParticipant.rtcPeer.localVideo.src;
localVideo.muted = true;
this.generateOffer(localParticipant.offerToReceiveVideo.bind(localParticipant));
});
server.js code
function join(socket, room, callback) {
let userSession = userRegister.getById(socket.id);
userSession.setRoomName(room.name);
room.pipeline.create('WebRtcEndpoint', {mediaProfile : 'WEBM_AUDIO_ONLY'}, (error, outgoingMedia) => {
if (error) {
console.error('no participant in room');
if (Object.keys(room.participants).length === 0) {
room.pipeline.release();
}
return callback(error);
}
// else
outgoingMedia.setMaxAudioRecvBandwidth(100);
add media profile parameter on server side while joining room.
function getEndpointForUser(userSession, sender, callback) {
if (userSession.id === sender.id) {
return callback(null, userSession.outgoingMedia);
}
let incoming = userSession.incomingMedia[sender.id];
if (incoming == null) {
console.log(`user : ${userSession.id} create endpoint to receive video from : ${sender.id}`);
getRoom(userSession.roomName, (error, room) => {
if (error) {
return callback(error);
}
room.pipeline.create('WebRtcEndpoint', {mediaProfile : 'WEBM_AUDIO_ONLY'}, (error, incomingMedia) => {
if (error) {
if (Object.keys(room.participants).length === 0) {
room.pipeline.release();
}
return callback(error);
}
console.log(`user: ${userSession.id} successfully create pipeline`);
incomingMedia.setMaxAudioRecvBandwidth(0);
incomingMedia.getMaxAudioRecvBandwidth(0);
add media profile parameter when accepting call.
hope this helps.