AzureMap Error:atlas.min.js:55 Uncaught (in promise) TypeError: Cannot read properties of undefined (reading 'defaultStyle') - typeerror

I am getting AzureMap error and seeing this error, can you help me to fix this error?
I do not see any issue to see AzureMap, its working fine but not sure why its giving me this error.
Here is my code for AzureMap.
const AzureMap = ({ azureMapKey, selectedSite }) => {
const mapNodeRef = useRef();
const isNavExpanded = useSelector(commonSelectors.getIsNavigationExpanded);
const initializeMap = () => {
const mapNode = mapNodeRef && mapNodeRef.current;
const longitude =
!isNullEmptyOrUndefined(selectedSite?.longitude) &&
parseFloat(selectedSite?.longitude);
const latitude =
!isNullEmptyOrUndefined(selectedSite?.latitude) &&
parseFloat(selectedSite?.latitude);
if (!mapNode || !selectedSite) {
return false;
}
const map = new atlas.Map(mapNode, {
center: [longitude, latitude],
view: 'auto',
zoom: 10,
authOptions: {
authType: 'subscriptionKey',
subscriptionKey: azureMapKey
}
});
map.setStyle({
autoResize: true,
showLogo: false,
showFeedbackLink: false
});
map.events.add('ready', function() {
// Create a HTML marker and add it to the map.
map.markers.add(
new atlas.HtmlMarker({
htmlContent: atlas.getImageTemplate('pin'),
color: '#1471DA',
anchor: 'top-left',
pixelOffset: [-5, -30],
position: [longitude, latitude]
})
);
});
};
useEffect(() => {
initializeMap();
}, [selectedSite?.siteId]);
useEffect(() => {
setTimeout(initializeMap, 200);
}, [isNavExpanded]);
return (
<div
ref={mapNodeRef}
className="bve-map-container"
data-testid="azure-map"
/>
);
};
export default AzureMap;
this is error I am seeing.
enter image description here

Related

Vue JS Google Places API - Map not showing

Maps were not showing on the right after following the instructions step by step. The google api map itself is not showing.
(The API key is already in the script tag in my code, but I placed a placeholder instead here.)
Here is the video reference:
https://youtu.be/ID-_D0zJlSM
<div class="ten wide column segment ui m-0" ref="map"></div>
methods: {
locatorButtonPressed() {
navigator.geolocation.getCurrentPosition(
position => {
this.lat = position.coords.latitude;
this.lng = position.coords.longitude;
},
error => {
console.log("Error getting location");
}
);
},
findCloseByButtonPressed() {
const URL = `https://cors-anywhere.herokuapp.com/https://maps.googleapis.com/maps/api/place/nearbysearch/json?
location=${this.lat},${this.lng}
&type=${this.type}
&radius=${this.radius * 1000}
&key=`[KEY HERE]`;
axios
.get(URL)
.then(response => {
this.places = response.data.results;
this.addLocationsToGoogleMaps();
})
.catch(error => {
console.log(error.message);
});
},
addLocationsToGoogleMaps() {
var map = new google.maps.Map(this.$refs['map'], {
zoom: 15,
center: new google.maps.LatLng(this.lat, this.lng),
mapTypeId: google.maps.MapTypeId.ROADMAP
});
var infowindow = new google.maps.InfoWindow();
this.places.forEach(place => {
const lat = place.geometry.location.lat;
const lng = place.geometry.location.lng;
let marker = new google.maps.Marker({
position: new google.maps.LatLng(lat, lng),
map: map
});
google.maps.event.addListener(marker, "click", () => {
infowindow.setContent(
`<div class="ui header">${place.name}</div><p>${place.vicinity}</p>`
);
infowindow.open(map, marker);
});
});
}
}
}
if you want to implement map i prefer use this
click here

React Native Location Tracker

I'm implementing this code it is showing error like "undefined is not an object (evaluating 'navigator.geolocation.clearWatch')"
watchLocation = () => {
const { coordinate } = this.state;
this.watchID = navigator.geolocation.watchPosition(
position => {
const { latitude, longitude } = position.coords;
const newCoordinate = {
latitude,
longitude,
};
if (Platform.OS === 'android') {
if (this.marker) {
this.marker._component.animateMarkerToCoordinate(newCoordinate, 500); // 500 is the duration to animate the marker
}
} else {
coordinate.timing(newCoordinate).start();
}
this.setState({
latitude,
longitude,
});
},
error => console.log(error),
{
enableHighAccuracy: true,
timeout: 20000,
maximumAge: 1000,
distanceFilter: 30,
}
);
};
Can anyone let me know how to solve this? Im referring this tutorial.
Link
First install :
npm i #react-native-community/geolocation
Then -
import Geolocation from '#react-native-community/geolocation';
watchLocation = () => {
const { coordinate } = this.state;
this.watchID = Geolocation.watchPosition(
//Your Code
);
}
For reference checkout - https://www.npmjs.com/package/#react-native-community/geolocation
I've gone through the blog which you are following so according to that you'll have to do a change likewise :
componentWillUnmount() {
Geolocation.clearWatch(this.watchID);
}

Mobile project with expo and hapi.js

Im strugglling with an error that im not capable of get a solution and i dont know if its happening in the front or in the back of the app.
Im trying to follow a tutorial where the guy is setting up an app that records your voice and return it as text from Google API speech to text.
this is the front part, the app is failing in the axios.post, the request is not passing and keeps loading until the error log says it cannot connect to the path.
import React, { Component } from 'react'
import {
StyleSheet, Text, View, TouchableOpacity, ActivityIndicator, Platform,
} from 'react-native'
// import { Audio, Permissions, FileSystem } from 'expo'
import * as Permissions from 'expo-permissions'
import { Audio } from 'expo-av'
import * as FileSystem from 'expo-file-system';
import axios from 'axios'
const styles = StyleSheet.create({
container: {
marginTop: 40,
backgroundColor: '#fff',
alignItems: 'center',
},
button: {
backgroundColor: '#1e88e5',
paddingVertical: 20,
width: '90%',
alignItems: 'center',
borderRadius: 5,
padding: 8,
marginTop: 20,
},
text: {
color: '#fff',
}
})
/*this.recordingSettings = JSON.parse(JSON.stringify(Audio.RECORDING_OPTIONS_PRESET_HIGH_QUALITY = */let recordingOptions = {
android: {
extension: '.m4a',
outputFormat: Audio.RECORDING_OPTION_ANDROID_OUTPUT_FORMAT_MPEG_4,
audioEncoder: Audio.RECORDING_OPTION_ANDROID_AUDIO_ENCODER_AAC,
sampleRate: 44100,
numberOfChannels: 2,
bitRate: 128000,
},
ios: {
extension: '.m4a',
outputFormat: Audio.RECORDING_OPTION_IOS_OUTPUT_FORMAT_MPEG4AAC,
audioQuality: Audio.RECORDING_OPTION_IOS_AUDIO_QUALITY_HIGH,
sampleRate: 44100,
numberOfChannels: 2,
bitRate: 128000,
linearPCMBitDepth: 16,
linearPCMIsBigEndian: false,
linearPCMIsFloat: false,
},
};
export default class SpeechToTextButton extends Component {
constructor(props) {
super(props)
this.recording = null
this.state = {
isRecording: false,
//we would like to know if data fetching is in progress
isFetching: false,
//we will write the transcript result here
transcript: '',
}
}
startRecording = async () => {
// request permissions to record audio
const { status } = await Permissions.askAsync(Permissions.AUDIO_RECORDING)
// if the user doesn't allow us to do so - return as we can't do anything further :(
if (status !== 'granted') return
// when status is granted - setting up our state
this.setState({ isRecording: true })
// basic settings before we start recording,
// you can read more about each of them in expo documentation on Audio
await Audio.setAudioModeAsync({
allowsRecordingIOS: true,
interruptionModeIOS: Audio.INTERRUPTION_MODE_IOS_DO_NOT_MIX,
playsInSilentModeIOS: true,
interruptionModeAndroid: Audio.INTERRUPTION_MODE_ANDROID_DO_NOT_MIX,
playThroughEarpieceAndroid: true,
})
const recording = new Audio.Recording()
try {
// here we pass our recording options
await recording.prepareToRecordAsync(recordingOptions)
// and finally start the record
await recording.startAsync()
console.log('funcionaaa')
} catch (error) {
console.log(error)
// we will take a closer look at stopRecording function further in this article
this.stopRecording()
console.log('no funca')
}
// if recording was successful we store the result in variable,
// so we can refer to it from other functions of our component
this.recording = recording
}
stopRecording = async () => {
// set our state to false, so the UI knows that we've stopped the recording
this.setState({ isRecording: false })
try {
// stop the recording
await this.recording.stopAndUnloadAsync()
console.log('aca tiene que parar')
} catch (error) {
console.log(error)
}
}
getTranscription = async () => {
// set isFetching to true, so the UI knows about it
this.setState({ isFetching: true })
try {
// take the uri of the recorded audio from the file system
const { uri } = await FileSystem.getInfoAsync(this.recording.getURI())
// now we create formData which will be sent to our backend
const formData = new FormData()
formData.append('file', {
uri,
// as different audio types are used for android and ios - we should handle it
type: Platform.OS === 'ios' ? 'audio/x-wav' : 'audio/m4a',
name: Platform.OS === 'ios' ? `${Date.now()}.wav` : `${Date.now()}.m4a`,
})
// post the formData to our backend
const { data } = await axios.post('http://190.19.68.120/api/speech', formData, {
headers: {
'Content-Type': 'multipart/form-data',
},
})
console.log(data)
// set transcript from the data which we received from the api
this.setState({ transcript: data.transcript })
} catch (error) {
console.log('There was an error reading file', error.request)
this.stopRecording()
// we will take a closer look at resetRecording function further down
this.resetRecording()
}
// set isFetching to false so the UI can properly react on that
this.setState({ isFetching: false })
}
deleteRecordingFile = async () => {
// deleting file
try {
const info = await FileSystem.getInfoAsync(this.recording.getURI())
await FileSystem.deleteAsync(info.uri)
} catch (error) {
console.log('There was an error deleting recorded file', error)
}
}
resetRecording = () => {
this.deleteRecordingFile()
this.recording = null
}
handleOnPressOut = () => {
// first we stop the recording
this.stopRecording()
console.log('para en el pressout')
// second we interact with our backend
this.getTranscription()
}
render() {
const {
isRecording, transcript, isFetching,
} = this.state
return (
<View style={styles.container}>
<TouchableOpacity
style={styles.button}
onPressIn={this.startRecording}
onPressOut={this.handleOnPressOut}
>
{isFetching && <ActivityIndicator color="#ffffff" />}
{!isFetching &&
<Text style={styles.text}>
{isRecording ? 'Recording...' : 'Start recording'}
</Text>
}
</TouchableOpacity>
<Text>
{transcript}
</Text>
</View>
)
}
}
Here it is the back part, where i receive the request from the front and send the audio to the google API
'use strict';
const Hapi = require('#hapi/hapi');
const fs = require('fs')
const axios = require('axios')
const speech = require('#google-cloud/speech');
const ffmpeg = require('fluent-ffmpeg');
const client = new speech.SpeechClient();
const init = async () => {
const server = Hapi.server({
port: 3005,
host: 'localhost'
});
server.route({
method: 'POST',
path: '/speech',
config: {
handler: async (request, h) => {
const data = request.payload;
console.log(data)
if (data.file) {
const name = data.file.hapi.filename;
const path = __dirname + "/uploads/" + name;
const encodedPath = __dirname + "/uploads/encoded_" + name;
const file = fs.createWriteStream(path);
file.on('error', (err) => console.error(err));
data.file.pipe(file);
return new Promise(resolve => {
data.file.on('end', async (err) => {
const ret = {
filename: data.name,
headers: data.file.hapi.headers
}
ffmpeg()
.input(path)
.outputOptions([
'-f s16le',
'-acodec pcm_s16le',
'-vn',
'-ac 1',
'-ar 41k',
'-map_metadata -1'
])
.save(encodedPath)
.on('end', async () => {
const savedFile = fs.readFileSync(encodedPath)
const audioBytes = savedFile.toString('base64');
const audio = {
content: audioBytes,
}
const sttConfig = {
enableAutomaticPunctuation: false,
encoding: "LINEAR16",
sampleRateHertz: 41000,
languageCode: "en-US",
model: "default"
}
const request = {
audio: audio,
config: sttConfig,
}
const [response] = await client.recognize(request);
const transcription = response.results
.map(result => result.alternatives[0].transcript)
.join('\n');
fs.unlinkSync(path)
fs.unlinkSync(encodedPath)
resolve(JSON.stringify({ ...ret, transcript: transcription }))
})
})
})
}
},
payload: {
output: 'stream',
parse: true,
}
}
})
await server.start();
console.log('Server running on %s', server.info.uri);
};
process.on('unhandledRejection', (err) => {
console.log(err);
});
init();
i thought i could try migrating the server to express.js but under handler, i got payload key, and i dont know what to do with that in an express server.

WebRTC in react-native (hooks), redux - Unhandled Promise Rejections

I'm developing a react-native application, which uses webRTC.
I extremely liked the minimal version I found here (kudos to baconcheese113!) and I decided to refactor it to create my react component.
I have set up a backend (DynamoDB, Appsync) and a redux store that allows me to:
dispatch an action sendCreateUserControlMsg, which down the line calls the Appsync endpoint to create a new ControlUserMsg
subscribe to a ControlUserMsg, set the flag triggerWebrtcData and save webrtcData in the Redux state
The following component (which for now calls itself), sometimes works, but mostly doesn't. I feel that the problem is related to JS Promises, but I do not fully understand how I should design the component to avoid race conditions.
import React, { useState, useEffect } from 'react';
import { View, SafeAreaView, Button, StyleSheet } from 'react-native';
import { RTCPeerConnection, RTCView, mediaDevices } from 'react-native-webrtc';
import { sendCreateUserControlMsg } from '../redux/actions/UserControlMsgActions';
import controlMsgActions from './../model/control_msg_actions';
import webrtcActionTypes from './../model/webrtc_action_types';
import { useDispatch, useSelector } from "react-redux";
import * as triggersMatch from '../redux/actions/TriggersMatchActions';
var IS_LOCAL_USER = true //manual flag I temporarily set
var localUserID = '00';
var localUser = 'localUser'
var remoteUserID = '01';
var remoteUser = 'remoteUser'
if (IS_LOCAL_USER) {
var matchedUserId = remoteUserID
var user_id = localUserID;
var user = localUser
}
else {
var matchedUserId = localUserID
var user_id = remoteUserID;
var user = remoteUser
}
export default function App() {
const dispatch = useDispatch();
var triggersMatchBool = useSelector(state => state.triggers_match)
var webrtcData = useSelector(state => state.webrtc_description.webrtcData)
const [localStream, setLocalStream] = useState();
const [remoteStream, setRemoteStream] = useState();
const [cachedLocalPC, setCachedLocalPC] = useState();
const [cachedRemotePC, setCachedRemotePC] = useState();
const sendICE = (candidate, isLocal) => {
var type
isLocal ? type = webrtcActionTypes["NEW_ICE_CANDIDATE_FROM_LOCAL"] : type = webrtcActionTypes["NEW_ICE_CANDIDATE_FROM_REMOTE"]
var payload = JSON.stringify({
type,
candidate
})
console.log(`Sending ICE to ${matchedUserId}`)
dispatch(sendCreateUserControlMsg(matchedUserId, user_id, user, payload, controlMsgActions["WEBRTC_DATA"]));
}
const sendOffer = (offer) => {
type = webrtcActionTypes["OFFER"]
var payload = JSON.stringify({
type,
offer
})
console.log(`Sending Offer to ${matchedUserId}`)
dispatch(sendCreateUserControlMsg(matchedUserId, user_id, user, payload, controlMsgActions["WEBRTC_DATA"]));
}
const sendAnswer = (answer) => {
type = webrtcActionTypes["ANSWER"]
var payload = JSON.stringify({
type,
answer
})
console.log(`Sending answer to ${matchedUserId}`)
dispatch(sendCreateUserControlMsg(matchedUserId, user_id, user, payload, controlMsgActions["WEBRTC_DATA"]));
}
const [isMuted, setIsMuted] = useState(false);
// START triggers
async function triggerMatchWatcher() {
if (triggersMatchBool.triggerWebrtcData) {
dispatch(triggersMatch.endTriggerWebrtcData());
switch (webrtcData.type) {
case webrtcActionTypes["NEW_ICE_CANDIDATE_FROM_LOCAL"]:
try {
setCachedRemotePC(cachedRemotePC.addIceCandidate(webrtcData.candidate))
} catch (error) {
console.warn('ICE not added')
}
break;
case webrtcActionTypes["NEW_ICE_CANDIDATE_FROM_REMOTE"]:
try {
setCachedLocalPC(cachedLocalPC.addIceCandidate(webrtcData.candidate))
} catch (error) {
console.warn('ICE not added')
}
break;
case webrtcActionTypes["OFFER"]:
console.log('remotePC, setRemoteDescription');
try {
await cachedRemotePC.setRemoteDescription(webrtcData.offer);
console.log('RemotePC, createAnswer');
const answer = await cachedRemotePC.createAnswer();
setCachedRemotePC(cachedRemotePC)
sendAnswer(answer);
} catch (error) {
console.warn(`setRemoteDescription failed ${error}`);
}
case webrtcActionTypes["ANSWER"]:
try {
console.log(`Answer from remotePC: ${webrtcData.answer.sdp}`);
console.log('remotePC, setLocalDescription');
await cachedRemotePC.setLocalDescription(webrtcData.answer);
setCachedRemotePC(cachedRemotePC)
console.log('localPC, setRemoteDescription');
await cachedLocalPC.setRemoteDescription(cachedRemotePC.localDescription);
setCachedLocalPC(cachedLocalPC)
} catch (error) {
console.warn(`setLocalDescription failed ${error}`);
}
}
}
}
useEffect(() => {
triggerMatchWatcher()
}
);
const startLocalStream = async () => {
// isFront will determine if the initial camera should face user or environment
const isFront = true;
const devices = await mediaDevices.enumerateDevices();
const facing = isFront ? 'front' : 'environment';
const videoSourceId = devices.find(device => device.kind === 'videoinput' && device.facing === facing);
const facingMode = isFront ? 'user' : 'environment';
const constraints = {
audio: true,
video: {
mandatory: {
minWidth: 500, // Provide your own width, height and frame rate here
minHeight: 300,
minFrameRate: 30,
},
facingMode,
optional: videoSourceId ? [{ sourceId: videoSourceId }] : [],
},
};
const newStream = await mediaDevices.getUserMedia(constraints);
setLocalStream(newStream);
};
const startCall = async () => {
const configuration = { iceServers: [{ url: 'stun:stun.l.google.com:19302' }] };
const localPC = new RTCPeerConnection(configuration);
const remotePC = new RTCPeerConnection(configuration);
localPC.onicecandidate = e => {
try {
console.log('localPC icecandidate:', e.candidate);
if (e.candidate) {
sendICE(e.candidate, true)
}
} catch (err) {
console.error(`Error adding remotePC iceCandidate: ${err}`);
}
};
remotePC.onicecandidate = e => {
try {
console.log('remotePC icecandidate:', e.candidate);
if (e.candidate) {
sendICE(e.candidate, false)
}
} catch (err) {
console.error(`Error adding localPC iceCandidate: ${err}`);
}
};
remotePC.onaddstream = e => {
console.log('remotePC tracking with ', e);
if (e.stream && remoteStream !== e.stream) {
console.log('RemotePC received the stream', e.stream);
setRemoteStream(e.stream);
}
};
localPC.addStream(localStream);
// Not sure whether onnegotiationneeded is needed
// localPC.onnegotiationneeded = async () => {
// try {
// const offer = await localPC.createOffer();
// console.log('Offer from localPC, setLocalDescription');
// await localPC.setLocalDescription(offer);
// sendOffer(localPC.localDescription)
// } catch (err) {
// console.error(err);
// }
// };
try {
const offer = await localPC.createOffer();
console.log('Offer from localPC, setLocalDescription');
await localPC.setLocalDescription(offer);
sendOffer(localPC.localDescription)
} catch (err) {
console.error(err);
}
setCachedLocalPC(localPC);
setCachedRemotePC(remotePC);
};
const switchCamera = () => {
localStream.getVideoTracks().forEach(track => track._switchCamera());
};
const closeStreams = () => {
if (cachedLocalPC) {
cachedLocalPC.removeStream(localStream);
cachedLocalPC.close();
})
}
if (cachedRemotePC) {
cachedRemotePC.removeStream(localStream);
cachedRemotePC.close();
})
}
setLocalStream();
setRemoteStream();
setCachedRemotePC();
setCachedLocalPC();
};
return (
<SafeAreaView style={styles.container}>
{!localStream && <Button title="Click to start stream" onPress={startLocalStream} />}
{localStream && <Button title="Click to start call" onPress={startCall} disabled={!!remoteStream} />}
{localStream && (
<View style={styles.toggleButtons}>
<Button title="Switch camera" onPress={switchCamera} />
</View>
)}
<View style={styles.rtcview}>
{localStream && <RTCView style={styles.rtc} streamURL={localStream.toURL()} />}
</View>
<View style={styles.rtcview}>
{remoteStream && <RTCView style={styles.rtc} streamURL={remoteStream.toURL()} />}
</View>
<Button title="Click to stop call" onPress={closeStreams} disabled={!remoteStream} />
</SafeAreaView>
);
}
const styles = StyleSheet.create({
// omitted
});
The most common errors I receive are:
Error: Failed to add ICE candidate
Possible Unhandled Promise Rejection
and
setLocalDescription failed TypeError: Cannot read property 'sdp' of
undefined
If I console.log I can see that are JS Promise, but since are not a functions I cannot use .then().
How can I call the addIceCandidate method or setLocalDescription method without incurring in the Unhandled Promise Rejection errors?
What are the best practices to work with WebRTC in react-native?

How to use vuex with Konva for onDragEnd option

I am using konva with vuex together.
This is a code at '~.vue' for defining image.
There are two options onDragEnd and onTransform in "const yo".
'this.msg' and 'this.msg2' for the two options is defined in methods.
Thus, I can use the two options on realtime.
/gallery.vue
.
.
created() {
const image = new window.Image();
image.src = this.imageUpload.url;
image.onload = () => {
const yo = {
image: image,
name: "yoyo",
draggable: true,
scaleX: this.imageUpload.positions.scaleX,
scaleY: this.imageUpload.positions.scaleY,
x: this.imageUpload.positions._lastPosX,
y: this.imageUpload.positions._lastPosY,
onDragEnd: this.msg,
onTransform: this.msg2
};
this.images.push(yo);
};
},
methods: {
msg(e) {
this.savePositions(e.target.attrs);
},
msg2(e) {
this.savePositions(e.currentTarget.attrs);
},
But I want to move the code inside of 'created()' into 'vuex store' to control by one file.
Therefore, I make that in vuex store again like below.
And when I call this actions into 'gallery.vue', everything works well except the two options function as 'this.msg' and 'this.msg2'.
I guessed the problem would happen from 'e' argument. And I edited with various methods.
But that functions doesn;t work saying this.msg and this.msg2 is not function.
How can I call this function correctly?
Thank you so much for your reading.
/store.js
.
.
const actions = {
bringImage({ commit }) {
axios
.get(`http://localhost:4000/work`)
.then(payload => {
commit('pushWorks', payload);
})
.then(() => {
const image = new window.Image();
image.src = state.url;
image.onload = () => {
// set image only when it is loaded
const yo = {
image: image,
name: state.title,
draggable: true,
scaleX: state.positions.scaleX,
scaleY: state.positions.scaleY,
x: state.positions._lastPosX,
y: state.positions._lastPosY,
onDragEnd: this.msg,
onTransform: this.msg2
};
state.images.push(yo);
};
});
},
msg({ commit }, e) {
commit('savePositions', e.target.attrs);
},
msg2({ commit }, e) {
commit('savePositions', e.currentTarget.attrs);
}
}
You don't have this in your actions. So try to dispatch your actions with e argument as a payload.
bringImage({
commit,
dispatch
}) {
axios
.get(`http://localhost:4000/work`)
.then(payload => {
commit('pushWorks', payload)
})
.then(() => {
const image = new window.Image()
image.src = state.url
image.onload = () => {
// set image only when it is loaded
const yo = {
image: image,
name: state.title,
draggable: true,
scaleX: state.positions.scaleX,
scaleY: state.positions.scaleY,
x: state.positions._lastPosX,
y: state.positions._lastPosY,
onDragEnd: e => dispatch('msg', e),
onTransform: e => dispatch('msg2', e),
}
state.images.push(yo)
}
})
}