I'm trying to plot multiple routes but error is popping up -
networkx.exception.NetworkXError: Node [20461931, 75901933] in sequence nbunch is not a valid node.
Plotting individual route is no more problem but error is raised when plotted together.
import networkx as nx
import osmnx as ox
# Create Graph
G = ox.graph_from_place('Munich, Germany', network_type='drive')
# route1 calc
origin_node = 20461931
destination_node = 75901933
route1 = nx.shortest_path(G, origin_node, destination_node)
# route2 calc
start = (48.1336081, 11.58172095)
end = (48.17822992, 11.53754219)
start_node = ox.get_nearest_node(G, start)
end_node = ox.get_nearest_node(G, end)
route2 = nx.shortest_path(G, start_node, end_node, weight='travel_time')
#plot the route with folium
route_list = [route1,route2]
route_map = ox.plot_route_folium(G,route_list)
# save as html file then display map as an iframe
filepath = 'route.html'
route_map.save(filepath)
You are passing it a list of lists of nodes, rather than a list of nodes. See the docs for usage details.
import osmnx as ox
ox.config(use_cache=True, log_console=True)
G = ox.graph_from_place('Munich, Germany', network_type='drive')
route1 = ox.shortest_path(G, 20461931, 75901933, weight=None)
orig = ox.get_nearest_node(G, (48.1336081, 11.58172095))
dest = ox.get_nearest_node(G, (48.17822992, 11.53754219))
route2 = ox.shortest_path(G, orig, dest, weight='travel_time')
route_map = ox.plot_route_folium(G, route1, route_color='#ff0000', opacity=0.5)
route_map = ox.plot_route_folium(G, route2, route_map=route_map, route_color='#0000ff', opacity=0.5)
route_map.save('route.html')
Related
I'm running through the ASE tutorial and am trying to run a GA optimization (https://wiki.fysik.dtu.dk/ase/tutorials/ga/ga_optimize.html). However, when I run the code, I get an unpacking error. I'm not too sure about how to approach the issue.
Code is as follows:
from random import random
from ase.io import write
from ase.optimize import BFGS
from ase.calculators.emt import EMT
from ase.ga.data import DataConnection
from ase.ga.population import Population
from ase.ga.standard_comparators import InteratomicDistanceComparator
from ase.ga.cutandsplicepairing import CutAndSplicePairing
from ase.ga.utilities import closest_distances_generator
from ase.ga.utilities import get_all_atom_types
from ase.ga.offspring_creator import OperationSelector
from ase.ga.standardmutations import MirrorMutation
from ase.ga.standardmutations import RattleMutation
from ase.ga.standardmutations import PermutationMutation
# Change the following three parameters to suit your needs
population_size = 20
mutation_probability = 0.3
n_to_test = 20
# Initialize the different components of the GA
da = DataConnection('gadb.db')
atom_numbers_to_optimize = da.get_atom_numbers_to_optimize()
n_to_optimize = len(atom_numbers_to_optimize)
slab = da.get_slab()
all_atom_types = get_all_atom_types(slab, atom_numbers_to_optimize)
blmin = closest_distances_generator(all_atom_types,
ratio_of_covalent_radii=0.7)
comp = InteratomicDistanceComparator(n_top=n_to_optimize,
pair_cor_cum_diff=0.015,
pair_cor_max=0.7,
dE=0.02,
mic=False)
pairing = CutAndSplicePairing(slab, n_to_optimize, blmin)
mutations = OperationSelector([1., 1., 1.],
[MirrorMutation(blmin, n_to_optimize),
RattleMutation(blmin, n_to_optimize),
PermutationMutation(n_to_optimize)])
# Relax all unrelaxed structures (e.g. the starting population)
while da.get_number_of_unrelaxed_candidates() > 0:
a = da.get_an_unrelaxed_candidate()
a.calc = EMT()
print('Relaxing starting candidate {0}'.format(a.info['confid']))
dyn = BFGS(a, trajectory=None, logfile=None)
dyn.run(fmax=0.05, steps=100)
a.info['key_value_pairs']['raw_score'] = -a.get_potential_energy()
da.add_relaxed_step(a)
# create the population
population = Population(data_connection=da,
population_size=population_size,
comparator=comp)
# test n_to_test new candidates
for i in range(n_to_test):
print('Now starting configuration number {0}'.format(i))
a1, a2 = population.get_two_candidates()
a3, desc = pairing.get_new_individual([a1, a2])
if a3 is None:
continue
da.add_unrelaxed_candidate(a3, description=desc)
# Check if we want to do a mutation
if random() < mutation_probability:
a3_mut, desc = mutations.get_new_individual([a3])
if a3_mut is not None:
da.add_unrelaxed_step(a3_mut, desc)
a3 = a3_mut
# Relax the new candidate
a3.calc = EMT()
dyn = BFGS(a3, trajectory=None, logfile=None)
dyn.run(fmax=0.05, steps=100)
a3.info['key_value_pairs']['raw_score'] = -a3.get_potential_energy()
da.add_relaxed_step(a3)
population.update()
write('all_candidates.traj', da.get_all_relaxed_candidates())
Really confused on what I should try to fix the issue. Can't return the a1 and a2 values either.
The below functions extract content from 'http://thegreyhoundrecorder.com.au/form-guides/' and append all content to a list. The function works fine, although the speed at which the content is scraped from the website is slow. This line tree = html.fromstring(page.content) in particular slows down the process. Is there a way I can improve on the speed of my request.
import lxml
from lxml import html
import requests
import re
import pandas as pd
from requests.exceptions import ConnectionError
greyhound_url = 'http://thegreyhoundrecorder.com.au/form-guides/'
def get_page(url):
"""fxn take page url and return the links to the acticle(Field) we
want to scrape in a list.
"""
page = requests.get(url)
tree = html.fromstring(page.content)
my_list = tree.xpath('//tbody/tr/td[2]/a/#href') # grab all link
print('Length of all links = ', len(my_list))
my_url = [page.url.split('/form-guides')[0] + str(s) for s in my_list]
return my_url
def extract_data(my_url):
"""
fxn take a list of urls and extract the needed infomation from
greyhound website.
return: a list with the extracted field
"""
new_list = []
try:
for t in my_url:
print(t)
page_detail = requests.get(t)
tree_1 = html.fromstring(page_detail.content)
title = ''.join(tree_1.xpath('//div/h1[#class="title"]/text()'))
race_number = tree_1.xpath("//tr[#id = 'tableHeader']/td[1]/text()")
Distance = tree_1.xpath("//tr[#id = 'tableHeader']/td[3]/text()")
TGR_Grade = tree_1.xpath("//tr[#id = 'tableHeader']/td[4]/text()")
TGR1 = tree_1.xpath("//tbody/tr[#class='fieldsTableRow raceTipsRow']//div/span[1]/text()")
TGR2 = tree_1.xpath("//tbody/tr[#class='fieldsTableRow raceTipsRow']//div/span[2]/text()")
TGR3 = tree_1.xpath("//tbody/tr[#class='fieldsTableRow raceTipsRow']//div/span[3]/text()")
TGR4 = tree_1.xpath("//tbody/tr[#class='fieldsTableRow raceTipsRow']//div/span[4]/text()")
clean_title = title.split(' ')[0].strip()
#clean title and extract track number
Track = title.split(' ')[0].strip()
#clean title and extract track date
date = title.split('-')[1].strip()
#clean title and extract track year
year = pd.to_datetime('now').year
#convert date to pandas datetime
race_date = pd.to_datetime(date + ' ' + str(year)).strftime('%d/%m/%Y')
#extract race number
new_rn = []
for number in race_number:
match = re.search(r'^(.).*?(\d+)$', number)
new_rn.append(match.group(1) + match.group(2))
new_list.append((race_date,Track,new_rn,Distance,TGR_Grade,TGR1,TGR2,TGR3,TGR4))
return new_list
except ConnectionError as e:
print('Connection error, connect to a stronger network or reload the page')
would anyone advise me how to adjust the X axis to better display the date on this graph?
from math import pi
import pandas as pd
from bokeh.io import show
from bokeh.models import LinearColorMapper, BasicTicker, PrintfTickFormatter, ColorBar
from bokeh.plotting import figure
#cesta k souboru
path = "C://Users//Zemi4//Desktop//zpr3//all2.csv"
#nacteni dataframu
data = pd.read_csv(path, delimiter = ",")
data['Cas'] = data['Cas'].astype(str)
data = data.set_index('Cas')
data.columns.name = 'Mistnost'
times = list(data.index)
rooms = list(data.columns)
df = pd.DataFrame(data.stack(), columns=['float']).reset_index()
colors = ['#440154', '#404387', '#29788E', '#22A784', '#79D151', '#FDE724', '#FCFEA4', '#FBA40A', '#DC5039']
mapper = LinearColorMapper(palette=colors, low=df.float.min(), high=df.float.max())
TOOLS = "hover,save,pan,box_zoom,reset,wheel_zoom"
p = figure(title="Heatmap ({0} - {1})".format(times[0], times[-1]),
x_range=times, y_range=list(reversed(rooms)),
x_axis_location="above", plot_width=1500, plot_height=900,
tools=TOOLS, toolbar_location='below',
tooltips=[('Time: ', '#Cas'), ('Temperature: ', '#float'), ('Room: ', '#Mistnost')],
x_axis_type='datetime')
p.grid.grid_line_color = None
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.axis.major_label_text_font_size = "5pt"
p.axis.major_label_standoff = 0
p.xaxis.major_label_orientation = pi / 3
p.rect(x="Cas", y="Mistnost", width=1, height=1,
source=df,
fill_color={'field': 'float', 'transform': mapper},
line_color=None)
color_bar = ColorBar(color_mapper=mapper, major_label_text_font_size="5pt",
ticker=BasicTicker(desired_num_ticks=len(colors)),
formatter=PrintfTickFormatter(format="%f"),
label_standoff=6, border_line_color=None, location=(0, 0))
p.add_layout(color_bar, 'right')
show(p) # show the pl
Try: p.xaxis[0].ticker.desired_num_ticks = <number_ticks_you_want_to_display>.
Or apply a specific ticker (see Bokeh docs) like you did for the ColorBar.
I have list of point feature class. I am trying to write a python script to perform Krigging interpolation. I am getting error massage in this code "Point_Num" is not defined,
Below script i am working
import arcpy
from arcpy import env
from arcpy.sa import *
arcpy.env.overwriteOutput = True
# Check out the ArcGIS Spatial Analyst extension license
arcpy.CheckOutExtension("Spatial")
In_Point = r"D:\NPP_Test\MERRA_TEMP_2012C" #(Point feature name:r001_mean, r002_mean.....r012_mean )
Out_Raster = r"D:\NPP_Test\MERRA_TEMP_2012D"
points = arcpy.ListFeatureClasses()
zFields = "GRID_CODE"
#Kriging Veriable
cellSize = 0.05
lagSize = 0.5780481172534
majorRange = 6
partialSill = 3.304292110
nugget = 0.002701348
kRadius = RadiusFixed(20000, 1)
#Mask region of interest
mask = r"D:\Gujarta Shape file\GUJARATSTATE.shp"
for zField in zFields:
Point = Point_Num[:3]
kModelUniversalObj = KrigingModelUniversal("LINEARDRIFT", lagSize, majorRange, partialSill, nugget)
OutKriging = Kriging(inPointFeatures, zField, kModelUniversalObj, cellSize, kRadius)
#IDWMASk = ExtractByMask(outIDW, mask)
KrigMask = ExtractByMask(OutKriging, mask)
#Save outraster as the same name of input
KrigMask.save("r{}.tif".format(Point_Num))
I'm trying to create one dataframe with data from multiple urls I'm scraping. The code works however I'm unable to store the data in one DataFrame recursively. The DataFrame (called frame) is replaced with a new url's data each time rather than having the new data concatenated to the same frame. Thank you, I deeply appreciate your help!
import urllib
import re
import json
import pandas
import pylab
import numpy
import matplotlib.pyplot
from pandas import *
from pylab import *
from threading import Thread
import sqlite3
urls = ['http://www.ratemyprofessors.com/ShowRatings.jsp?tid=1176131' , 'http://www.ratemyprofessors.com/ShowRatings.jsp?tid=795226', 'http://www.ratemyprofessors.com/ShowRatings.jsp?tid=1176131' , 'http://www.ratemyprofessors.com/ShowRatings.jsp?tid=1807944', 'http://www.ratemyprofessors.com/ShowRatings.jsp?tid=277459' , 'http://www.ratemyprofessors.com/ShowRatings.jsp?tid=1076779' , 'http://www.ratemyprofessors.com/ShowRatings.jsp?tid=971546']
i=0
regex = '<p class="commentText">(.+?)</p>'
regex2 = '<strong>Easiness</strong><span>(.+?)</span></p>'
regex3 = 'Helpfulness</strong><span>(.+?)</span></p>'
regex4 = 'Clarity</strong><span>(.+?)</span></p>'
regex5 = 'Rater Interest</strong><span>(.+?)</span></p>'
regex6 = '<div class="date">(.+?)</div>'
regex7 = '<div class="class"><p style="word-wrap:break-word;">(.+?)</p>'
regex8 = '<meta name="prof_name" content="(.+?)"/>'
pattern = re.compile(regex)
easiness = re.compile(regex2)
helpfulness = re.compile(regex3)
clarity = re.compile(regex4)
interest = re.compile(regex5)
date = re.compile(regex6)
mathclass = re.compile(regex7)
prof_name = re.compile(regex8)
while i < len(urls):
htmlfile = urllib.urlopen(urls[i])
htmltext = htmlfile.read()
content = re.findall(pattern,htmltext)
Easiness = re.findall(easiness,htmltext)
Helpfulness = re.findall(helpfulness, htmltext)
Clarity = re.findall(clarity, htmltext)
Interest = re.findall(interest, htmltext)
Date = re.findall(date, htmltext)
Class = re.findall(mathclass, htmltext)
PROFNAME=re.findall(prof_name, htmltext)
i+=1
frame = DataFrame({'Comments': content, 'Easiness': Easiness, 'Helpfulness': Helpfulness,
'Clarity': Clarity, 'Rater Interest': Interest, 'Class': Class,
'Date': Date[1:len(Date)], 'Professor': PROFNAME[0]})
print frame
Use pd.concat:
frames = []
while i < len(urls):
htmlfile = urllib.urlopen(urls[i])
htmltext = htmlfile.read()
content = re.findall(pattern,htmltext)
Easiness = re.findall(easiness,htmltext)
Helpfulness = re.findall(helpfulness, htmltext)
Clarity = re.findall(clarity, htmltext)
Interest = re.findall(interest, htmltext)
Date = re.findall(date, htmltext)
Class = re.findall(mathclass, htmltext)
PROFNAME=re.findall(prof_name, htmltext)
i+=1
frames.append(DataFrame({'Comments': content, 'Easiness': Easiness, 'Helpfulness': Helpfulness,
'Clarity': Clarity, 'Rater Interest': Interest, 'Class': Class,
'Date': Date[1:len(Date)], 'Professor': PROFNAME[0]}))
pd.concat(frames)
You are overwriting your frame with each iteration of the loop. As Phillip Cloud suggested, you can make a list of frames that you append with each loop. I simplified your code differently, but I think this gives you what you want.
import urllib
import re
import pandas as pd
urls = ['http://www.ratemyprofessors.com/ShowRatings.jsp?tid=1176131',
'http://www.ratemyprofessors.com/ShowRatings.jsp?tid=795226',
'http://www.ratemyprofessors.com/ShowRatings.jsp?tid=1176131',
'http://www.ratemyprofessors.com/ShowRatings.jsp?tid=1807944',
'http://www.ratemyprofessors.com/ShowRatings.jsp?tid=277459',
'http://www.ratemyprofessors.com/ShowRatings.jsp?tid=1076779',
'http://www.ratemyprofessors.com/ShowRatings.jsp?tid=971546']
regex = {'pattern' : re.compile('<p class="commentText">(.+?)</p>'),
'easiness' : re.compile('<strong>Easiness</strong><span>(.+?)</span></p>'),
'helpfulness' : re.compile('Helpfulness</strong><span>(.+?)</span></p>'),
'clarity' : re.compile('Clarity</strong><span>(.+?)</span></p>'),
'interest' : re.compile('Rater Interest</strong><span>(.+?)</span></p>'),
'date' : re.compile('<div class="date">(.+?)</div>'),
'mathclass' : re.compile('<div class="class"><p style="word-wrap:break-word;">(.+?)</p>'),
'prof_name' : re.compile('<meta name="prof_name" content="(.+?)"/>')}
# Make a dictionary with empty lists using the same keys
d = {}
for k in regex.keys():
d[k] = []
# Now fill those lists
for url in urls:
htmlfile = urllib.urlopen(url)
htmltext = htmlfile.read()
for k, v in regex.iteritems():
d[k].append(re.findall(v, htmltext))
frame = pd.DataFrame(d) # Dump the dict into a DataFrame
print frame