Selenium Issue with Exec? - selenium

I am running a webscraper with selenium to get some data on the NBA. I have urls to get to the websites for each of the 30 teams, but when I run the code it only gets through a few of the urls and then crashes with the errors below being shown:
#web scraper
from bs4 import BeautifulSoup
import requests
from selenium import webdriver
import pandas as pd
import os
class NBAScraper:
def __init__(self):
#part 1
url = "https://www.nba.com/teams"
HTML = requests.get(url)
soup = BeautifulSoup(HTML.text, 'html.parser')
text = str(soup.find_all("a", "Anchor_anchor__cSc3P TeamFigureLink_teamFigureLink__uqnNO"))
ids = []
for i in range(0, 30):
hr = text.find("stats")
ids.append(text[(hr+11):(hr+21)])
text = text[(hr+22):]
#part 2
names = []
for j in range(0, 30):
url2 = "https://www.nba.com/stats/team/"+str(ids[j])+"/advanced"
HTML2 = requests.get(url2)
soup2 = BeautifulSoup(HTML2.text, 'html.parser')
##div class="TeamHeader_name__MmHlP
name = str(soup2.find("div", "TeamHeader_name__MmHlP"))
ni = name.find("div>")
ni2 = name.find("<!")
name1 = name[(ni+4):ni2]
name = name[ni2:]
ni3 = name.find("<div>")
name = name[(ni3+5):]
ni4 = name.find("</div>")
name2 = name[:ni4]
n = name1 + " " + name2
names.append(n)
##tbody class="Crom_body__UYOcU"
#part 3
offrtg = []
defrtg = []
reb = []
tov = []
efg = []
for k in range(0, 30):
self.driver = webdriver.Chrome()
url3 = "https://www.nba.com/stats/team/"+str(ids[k])+"/advanced"
self.driver.get(url3)
rndrhtml = self.driver.page_source
self.driver.close()
#self.driver.quit()
soup3 = BeautifulSoup(rndrhtml, 'html.parser')
ovrall = str(soup3.find("tbody", "Crom_body__UYOcU").find_all("td"))
for d in range(0, 13):
di = ovrall.find("<td>")
ovrall = ovrall[(di+4):]
#conditions
if d == 2:
di2 = ovrall.find("</td>")
offrtg.append(float(ovrall[:di2]))
elif d == 3:
di2 = ovrall.find("</td>")
defrtg.append(float(ovrall[:di2]))
elif d == 10:
di2 = ovrall.find("</td>")
reb.append(float(ovrall[:di2]))
elif d == 11:
di2 = ovrall.find("</td>")
tov.append(float(ovrall[:di2]))
elif d == 12:
di2 = ovrall.find("</td>")
efg.append(float(ovrall[:di2]))
#writing to excel
os.remove(r"C:\Users\jackm\OneDrive\Desktop\NBA\NBASTATS.xlsx")
d = {'Name': names, 'OFFRTG': offrtg, 'DEFRTG': defrtg, 'REB': reb,
'TOV': tov, 'EFG': efg}
df = pd.DataFrame(data=d)
df.to_excel(r"C:\Users\jackm\OneDrive\Desktop\NBA\NBASTATS.xlsx", sheet_name="STATS")
NBAScraper()
I tried to play around with the closing and quitting functions for the driver, or put the driver in a separate function and run it outside the class, but none of that worked. I realized through some testing that even if it's not inside a loop, selenium will throw the error for a url but run it fine the second time. I tried using implicit waits to solve this but to no avail.
Traceback (most recent call last):
File "C:\Program Files\Spyder\pkgs\spyder_kernels\py3compat.py", line 356, in compat_exec
exec(code, globals, locals)
File "c:\users\jackm\spyder\nba.py", line 104, in <module>
NBAScraper()
File "c:\users\jackm\spyder\nba.py", line 71, in __init__
ovrall = str(soup3.find("tbody", "Crom_body__UYOcU").find_all("td"))
AttributeError: 'NoneType' object has no attribute 'find_all'

Related

AttributeError:'str' object has no attribute 'unique' (Pandas.unique)

In my script, I use pandas module. When I execute my file.py - everything works well. But I've converted my file.py to file.exe with auto-py-to-exe and got an error: AttributeError:'str' object has no attribute 'unique'. It's strange because it worked normally. The line where becomes an error: wells=list(file[0].unique()). Who knows this issue, please help.
import tkinter as tk
import tkinter.filedialog as fd
import pandas as pd
import os
import datetime
from datetime import datetime, date
import numpy as np
pd.set_option('display.notebook_repr_html', False)
pd.set_option('display.max_columns', 80)
pd.set_option('display.max_rows', 200)
pd.set_option('display.width', 800)
def resource_path(relative_path):
try:
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
def open():
global file_excel, name
file_excel = fd.askopenfilename(initialdir='/Desktop', title='Открыть файл', filetypes = [("Excel", "*.xlsx")])
name = os.path.basename(file_excel)
name=os.path.splitext(name)[0]
file_excel=pd.read_excel(file_excel, skiprows=[0], header=None)
win.destroy()
return file_excel, name
win = tk.Tk()
path = resource_path("image.png")
photo = tk.PhotoImage(file=path)
win.iconphoto(False, photo)
win.config(bg='#FFC')
win.title('Конвертация в формат .ev')
win.geometry('400x130+500+500')
win.resizable(False, False)
label_1 = tk.Label(win, text = 'Выберите файл с испытаниями скважин:',
bg = '#FFC',
font=('Arial', 10, 'bold'),
padx=20,
pady=10).pack()
btn_1 = tk.Button(win, text = 'Выбрать Excel',
command = open,
activebackground = '#6F6',
font=('Arial', 12, 'bold'),
padx=20,
pady=10,
relief = tk.RAISED,
bd=2).pack()
win.mainloop()
wells=list(file_excel[0].unique())
file_excel[1] = pd.to_datetime(file_excel[1], errors='coerce').dt.strftime("%d/%m/%Y")
file_excel[4] = np.where(file_excel[1].str, 'Perforation', np.nan)
file_excel.iloc[:,[2,3]]=file_excel.iloc[:,[2,3]].abs()
col_list = list(file_excel)
col_list[4], col_list[2] = col_list[2], col_list[4]
file_excel.columns = col_list
Perforation=pd.DataFrame(data=None)
for i in wells:
well_name=pd.DataFrame({'WELLNAME '+i}, columns=[1])
Perforation=Perforation.append(well_name)
Perforation=Perforation.append(file_excel.iloc[:,[1,2,3,4]][file_excel.iloc[:,0]==i])
Perforation=Perforation.append(pd.Series(dtype = 'object'), ignore_index=True)
def SaveFile():
Save=fd.asksaveasfile(mode='w',defaultextension=".ev", initialfile=name)
Save.write(Perforation.to_string(index=False, header=False, na_rep=' '))
win.destroy()
win = tk.Tk()
path = resource_path("image.png")
photo = tk.PhotoImage(file=path)
win.iconphoto(False, photo)
win.config(bg='#FFC')
win.title('Конвертация в формат .ev')
win.geometry('400x130+500+500')
win.resizable(False, False)
label_1 = tk.Label(win, text = 'Сохранение:',
bg = '#FFC',
font=('Arial', 10, 'bold'),
padx=20,
pady=10).pack()
btn_1 = tk.Button(win, text = 'Сохранить как',
command = SaveFile,
activebackground = '#6F6',
font=('Arial', 12, 'bold'),
padx=20,
pady=10,
relief = tk.RAISED,
bd=2).pack()
win.mainloop()
type of file[0]
Error screen
When I created virtual env I should have added openpyxl module. And I made it and everything is fine now

only when building a .exe from working code: AttributeError: Can only use .dt accessor with datetimelike values

I have a working python script based on pandas.
Converting a similar script into a .exe worked at my computer at work. Unfortunately this isn't the case for my computer at home. I tried pyinstaller and py2exe and both bring up this error.
It seems to me that the conversion puts up a number of errors(I already fixed some of them), so it's not ultimately about the datetime issue I think.
import pandas as pd
import os
import glob
from datetime import datetime
import shutil
import os.path
try:
parentfolder = os.path.dirname(__file__)
parentfolder = os.path.abspath(os.path.join(parentfolder, '..'))#parentfolder der skriptdatei
except NameError: # We are the main py2exe script, not a module
import sys
parentfolder = os.path.dirname(sys.argv[0])
parentfolder = os.path.abspath(os.path.join(parentfolder, '..'))#parentfolder der skriptdatei
today = datetime.now()
day1 = today.strftime("%d-%m-%Y")
time1= today.strftime("%d-%m-%Y_%H-%M-%S")
day1=day1+'_cleaned'
logname="logfile_" + time1 + ".txt"
resultfolder=os.path.join(parentfolder, day1)
logfile = os.path.join(resultfolder, logname)
if os.path.exists(resultfolder):
shutil.rmtree(resultfolder) #deletes folder and all subfolders
os.makedirs(resultfolder)
pd.set_option('display.max_columns', 5)
pd.set_option('display.max_colwidth', 99)
f = open(logfile, "w")
f.close()
all_files = glob.glob(parentfolder + "/*.xls")
filecounter=0
first_run_counter=0
first_day_counter=0
for filename in all_files:
file_name=(os.path.splitext(os.path.basename(filename))[0])
writepath = os.path.join(resultfolder, '{}.xlsx'.format(str(file_name)+"_py"))
writer = pd.ExcelWriter(writepath, engine = 'xlsxwriter')
with open(logfile, "a") as file:
file.write("{} \n".format(str(file_name)))
filecounter += 1
if filecounter > 1:
print("WARNING, JUST CONVERT 1 FILE")
break
list1 = []
dfs_by_day= []
df = pd.read_excel(filename,header=None,parse_dates=False)#ohne header einlesen ,decimal=","
#df=df.convert_dtypes(convert_string=True)
df_help=df.copy()
df_help[1] = df_help[1].astype(str)
df_help[0] = df_help[0].astype(str)
#####datei ordnen,filtern etc
df.dropna(axis=0,how='any',thresh=None,subset=None,inplace=True)#löscht zeilen mit leeren zellen
df.drop_duplicates(inplace=True) #dropt auch doppelte header?!
df.reset_index(drop=True, inplace=True)
new_header = df.iloc[0] #grab the first row for the header
df = df[1:] #take the data less the header row
df.columns = new_header#nimmt 2, reihe als header
df = df.sort_values(['Date (MM/DD/YYYY)','Time (HH:mm:ss)'], ascending=[True,True])
df.reset_index(drop=True, inplace=True)
df.rename(columns={'Date (MM/DD/YYYY)':'Date (DD/MM/YYYY)'}, inplace=True)
#df['Date (DD/MM/YYYY)']=df['Date (DD/MM/YYYY)'].astype(str)#WICHTIG! datumsangabe unabhängig von / oder . machen
#df['Date (DD/MM/YYYY)'] = df['Date (DD/MM/YYYY)'].str.replace('/','.')#/ mit . ersetzen
df_help2=df.copy() #deepcopy vom noch nicht datetime, aber getrimmten dataframe
#################################################################### datei in tage aufspalten
##df_help2['Date (DD/MM/YYYY)'] = pd.to_datetime(df_help2['Date (DD/MM/YYYY)'],format='%d.%m.%Y')#EVTL FORMAT EINFÜGEN ,format='%d.%m.%Y'
df_help2['next day'] = (df_help2['Date (DD/MM/YYYY)'].diff()).dt.days > 0 #ob neue zeile=neuer tag
###############datumsangabe unabhängig von / oder . machen
for i in range(df_help2.shape[0]):
if df_help2.at[i,'next day'] == True:
list1.append(i)
#spaltalgorithmus gesamtfile in tage
l_mod = [0] + list1 + [df.shape[0]]
dfs_by_day = [df.iloc[l_mod[n]:l_mod[n+1]] for n in range(len(l_mod)-1)]
################################################################# tage in runs aufspalten
for j in dfs_by_day:
memo=0
run_counter=1
df1 = j
df1=df1.reset_index(drop=True)
df_help4 = df1.iloc[0:1,0:2].reset_index(drop=True).copy()
df1['Date (DD/MM/YYYY)'] = df1['Date (DD/MM/YYYY)'].dt.strftime('%d.%m.%Y')
list3=[]
dfdate= str(df1.at[0,'Date (DD/MM/YYYY)'])
print(dfdate)
df_help3=df1.copy() #deepcopy für tageszeitanalyse/runs
df_help3['Time (HH:mm:ss)'] = pd.to_datetime(df_help3['Time (HH:mm:ss)'],format='%H:%M:%S')
df_help3['next run'] = (df_help3['Time (HH:mm:ss)'].diff()).dt.seconds > 2000
df_help3.reset_index(drop=True, inplace=True)
for i in range(df_help3.shape[0]):
if df_help3.at[i,'next run'] == True:
list3.append(i)
###algorithmus spaltet tag in runs auf
l_mod2 = [0] + list3 + [df1.shape[0]]
dfs_by_run = [df1.iloc[l_mod2[n]:l_mod2[n+1]] for n in range(len(l_mod2)-1)]
for k in dfs_by_run:
df_run = k
df_run['Depth m'] = pd.to_numeric(df_run['Depth m'])
df_run['depth rounded'] = df_run['Depth m'].astype(int) #rundet
df_run=df_run.reset_index(drop=True)
df_run = df_run.drop_duplicates(subset=['depth rounded'], keep='last')#letzter wert
del df_run['depth rounded']
df_run=df_run.dropna(axis=0,how='any',thresh=2)
df_run=df_run.reset_index(drop=True)
run_name = str(dfdate) +'_run' + str(run_counter)
#####sensortoresultfile
if first_run_counter==0:
last_df=df_run.copy()
last_df=last_df[0:0]
last_df=last_df.append(df_run)
first_run_counter+=1
with open(logfile, "a") as file:
file.write("{0} has {1} last measurement(s) \n".format(run_name,df_run.shape[0]))
run_counter+=1
#alle daten raw aber mit sensor und header pro tag
df_help4['Time (HH:mm:ss)'] = df_help4['Time (HH:mm:ss)'].astype(str)
df_help4['Date (DD/MM/YYYY)'] = df_help4['Date (DD/MM/YYYY)'].astype(str)
for i in range(df_help.shape[0]):
if df_help4.at[0,'Date (DD/MM/YYYY)'] == df_help.at[i,0]:
if df_help4.at[0,'Time (HH:mm:ss)'] == df_help.at[i,1]:
memo=i
break
for n in reversed(list(range(memo))):
if df_help.at[n,3] == 'SENSOR SERIAL NUMBER:':
sensor_info=df_help.iloc[n:n+1,:]
sensor_info.reset_index(drop=True,inplace=True)
break
sensor_info.at[0,0:2]='-'
df1 = df1.columns.to_frame().T.append(df1, ignore_index=True)#fügt header als zeile ganz oben hinzu
df1.columns = range(len(df1.columns))#header neu 0 bis n
if first_day_counter==0:
raw_df=df1.copy()
raw_df=raw_df[0:0]
sensor_info.columns= range(len(df1.columns))
df1 = pd.concat([df1.iloc[:(0)], sensor_info, df1.iloc[0:]]).reset_index(drop=True)
raw_df=raw_df.append(df1)
first_day_counter += 1
last_df.to_excel(writer, sheet_name='{}'.format("last"),header=False, index = False)
#raw_df['Date (DD/MM/YYYY)'] = raw_df['Date (DD/MM/YYYY)'].dt.strftime('%d.%m.%Y')
raw_df.to_excel(writer, sheet_name='{}'.format("raw"),header=False, index = False)
writer.save()
with open(logfile, "a") as file:
file.write("total number of last measurements: {} \n".format(last_df.shape[0]))
file.write("total number of raw measurements: {} \n".format(raw_df.shape[0]))
f.close()
error:
Traceback (most recent call last):
File "tsk-py-convert.py", line 95, in <module>
File "pandas\core\generic.pyc", line 5458, in __getattr__
File "pandas\core\accessor.pyc", line 180, in __get__
File "pandas\core\indexes\accessors.pyc", line 494, in __new__
AttributeError: Can only use .dt accessor with datetimelike values
Within spyder the code was using an old pandas version (0.23.4). My code doesn't seem to work with a new version. I had the latest pandas version pip installed on windows and now manually installed the version of anaconda (0.23.4).
I can now run the code thorugh cmd, IDLE and the .exe that is created with pyinstaller works!

Getting flow infos from switch and copy the info in csv file (Ryu controller)

Hope you help me, I want to get flow info from switch and that by sending a request every 10s and the switch reply with the info but I get the following error when the controller receive the reply by using a flow request reply handler The error is occuring because of flow matching 'eth_type'
CollectTrainingStatsApp: Exception occurred during handler processing. Backtrace from offending handler [_flow_stats_reply_handler] servicing event [EventOFPFlowStatsReply] follows.
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/ryu/base/app_manager.py", line 290, in _event_loop
handler(ev)
File "/home/guenfaf/Documents/Training ryu/data_to_csv/data_to_csv.py", line 59, in _flow_stats_reply_handler
for stat in sorted([flow for flow in body if (flow.priority == 1) ], key=lambda flow:
File "/home/guenfaf/Documents/Training ryu/data_to_csv/data_to_csv.py", line 60, in <lambda>
(flow.match['eth_type'],flow.match['ipv4_src'],flow.match['ipv4_dst'],flow.match['ip_proto'])):
File "/usr/local/lib/python2.7/dist-packages/ryu/ofproto/ofproto_v1_3_parser.py", line 904, in __getitem__
return dict(self._fields2)[key]
KeyError: 'eth_type'
Here is my code :
from ryu.app import simple_switch_13
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER, DEAD_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.lib import hub
from time import time
# class CollectTrainingStatsApp(simple_switch_13.SimpleSwitch13):
class CollectTrainingStatsApp(simple_switch_13.SimpleSwitch13):
def __init__(self, *args, **kwargs):
super(CollectTrainingStatsApp, self).__init__(*args, **kwargs)
self.datapaths = {}
self.monitor_thread = hub.spawn(self.monitor)
file0 = open("FlowStatsfile.csv","w")
file0.write('datapath_id,flow_id,ip_src,tp_src,ip_dst,tp_dst,ip_proto,flow_duration_sec,flow_duration_nsec,idle_timeout,hard_timeout,flags,packet_count,byte_count,packet_count_per_second,packet_count_per_nsecond,byte_count_per_second,byte_count_per_nsecond,label\n')
file0.close()
#Asynchronous message
#set_ev_cls(ofp_event.EventOFPStateChange,[MAIN_DISPATCHER, DEAD_DISPATCHER])
def state_change_handler(self, ev):
datapath = ev.datapath
if ev.state == MAIN_DISPATCHER:
if datapath.id not in self.datapaths:
self.logger.debug('register datapath: %016x', datapath.id)
self.datapaths[datapath.id] = datapath
elif ev.state == DEAD_DISPATCHER:
if datapath.id in self.datapaths:
self.logger.debug('unregister datapath: %016x', datapath.id)
del self.datapaths[datapath.id]
def monitor(self):
while True:
for dp in self.datapaths.values():
self.request_stats(dp)
hub.sleep(10)
def request_stats(self, datapath):
self.logger.debug('send stats request: %016x', datapath.id)
parser = datapath.ofproto_parser
req = parser.OFPFlowStatsRequest(datapath)
datapath.send_msg(req)
#set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER)
def _flow_stats_reply_handler(self, ev):
#timestamp = time.time()
tp_src = 0
tp_dst = 0
file0 = open("FlowStatsfile.csv","a+")
body = ev.msg.body
for stat in sorted([flow for flow in body if (flow.priority == 1) ], key=lambda flow:
(flow.match['eth_type'],flow.match['ipv4_src'],flow.match['ipv4_dst'],flow.match['ip_proto'])):
ip_src = stat.match['ipv4_src']
ip_dst = stat.match['ipv4_dst']
ip_proto = stat.match['ip_proto']
if stat.match['ip_proto'] == 1:
icmp_code = stat.match['icmpv4_code']
icmp_type = stat.match['icmpv4_type']
elif stat.match['ip_proto'] == 6:
tp_src = stat.match['tcp_src']
tp_dst = stat.match['tcp_dst']
elif stat.match['ip_proto'] == 17:
tp_src = stat.match['udp_src']
tp_dst = stat.match['udp_dst']
flow_id = str(ip_src) + str(tp_src) + str(ip_dst) + str(tp_dst) + str(ip_proto)
try:
packet_count_per_second = stat.packet_count/stat.duration_sec
packet_count_per_nsecond = stat.packet_count/stat.duration_nsec
except:
packet_count_per_second = 0
packet_count_per_nsecond = 0
try:
byte_count_per_second = stat.byte_count/stat.duration_sec
byte_count_per_nsecond = stat.byte_count/stat.duration_nsec
except:
byte_count_per_second = 0
byte_count_per_nsecond = 0
file0.write("{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}\n"
.format(ev.msg.datapath.id, flow_id, ip_src, tp_src,ip_dst, tp_dst,
stat.match['ip_proto'],
stat.duration_sec, stat.duration_nsec,
stat.idle_timeout, stat.hard_timeout,
stat.flags, stat.packet_count,stat.byte_count,
packet_count_per_second,packet_count_per_nsecond,
byte_count_per_second,byte_count_per_nsecond,0))
file0.close()

gem5: RuntimeError: Cycle found in configuration hierarchy

I'm working on a Processing-in-Memory Simulation by gem5.
I try to link the PIM modules that I made, however, a runtime error with "Cycle found in configuration hierarchy happens".
I do not know when this configuration hierarchy error happens exactly & how to solve it.
I read configuration hierarchy document from gem5 documentation web page several times,
but I couldn't understand how to order the initializations of modules and port bindings to avoid this error.
Also, I don't know how to find the exact code that generates the configuration hierarchy cycle error.
All I can get is just tracebacks of python codes.
If you want to know the configuration code, see the following "very complicated" code from MemConfig.py & traceback.
_kernel = PIMMatrix()
_kernel.bridge.num_bridges = 2
_kernel.bridge.slave[0] = _kernel.xbar.master[0]
_kernel.bridge.master[0] = xbar.slave
_kernel.bridge.slave[1] = xbar.master
_kernel.bridge.master[1] = _kernel.xbar.slave[0]
_kernel.row_sched.instrPort = _kernel.xbar.master[1]
_kernel.row_sched.cmdPort = _kernel.xbar.slave[1]
_kernel.prefix.instrPort = _kernel.xbar.master[2]
for i in range(16):
_kernel.prefix.memPort[i] = _kernel.xbar.slave[2 + i]
_kernel.A_row_scratch.instrPort = _kernel.xbar.master[3]
_kernel.A_row_scratch.seqPort = _kernel.xbar.slave[18]
_kernel.B_row_scratch.instrPort = _kernel.xbar.master[4]
_kernel.B_row_scratch.seqPort = _kernel.xbar.slave[19]
_kernel.C_row_scratch.instrPort = _kernel.xbar.master[5]
_kernel.C_row_scratch.seqPort = _kernel.xbar.slave[20]
_kernel.C_int_scratch.instrPort = _kernel.xbar.master[6]
_kernel.C_int_scratch.seqPort = _kernel.xbar.slave[21]
num_row_accs = 8
_kernel.num_accs = num_row_accs
row_accs = []
for i in range(num_row_accs):
row_acc = PIMRowAccelerator()
num_colrow_accs = 8
row_acc.num_accs = num_colrow_accs
row_acc.bridge.num_bridges = 17
row_acc.bridge.master[0] = row_acc.xbar.slave[0]
for j in range(16):
row_acc.bridge.slave[1 + j] = row_acc.xbar.master[j]
row_acc.colRowSched.instrPort = row_acc.xbar.master[16]
row_acc.colRowSched.memPort = row_acc.xbar.slave[1]
row_acc.sorter.instrPort = row_acc.xbar.master[17]
for j in range(16):
row_acc.sorter.memPort = row_acc.xbar.slave[2 + j]
row_acc.ind_hash.instrPort = row_acc.xbar.master[18]
row_acc.ind_hash.seqPort = row_acc.xbar.slave[18]
row_acc.val_hash.instrPort = row_acc.xbar.master[19]
row_acc.val_hash.seqPort = row_acc.xbar.slave[19]
colrow_accs = []
for j in range(num_colrow_accs):
colrow_acc = PIMColRowAcc()
num_col_accs = 8
colrow_acc.num_hashers = 8
colrow_acc.bridge.num_bridges = 17
colrow_acc.bridge.master[0] = colrow_acc.xbar.slave[0]
for k in range(16):
colrow_acc.bridge.slave[1 + k] = colrow_acc.xbar.master[k]
colrow_acc.colSched.instrPort = colrow_acc.xbar.master[16]
colrow_acc.colSched.memPort = colrow_acc.xbar.slave[1]
hashers = []
for k in range(num_col_accs):
hasher = PIMMulHasher()
hasher.instrPort = colrow_acc.xbar.master[17 + k]
hasher.memPort = colrow_acc.xbar.slave[2 + k]
hashers.append(hasher)
colrow_acc.hashers = hashers
colrow_accs.append(colrow_acc)
row_acc.accs = colrow_accs
for j in range(num_colrow_accs):
row_acc.accs[j].bridge.slave[0] = row_acc.xbar.master[20 + j]
for k in range(16):
row_acc.accs[j].bridge.master[1 + k] = row_acc.xbar.slave[20 + 16 * j + k]
for j in range(16):
row_acc.ind_hash.memPort[j] = row_acc.xbar.master[28 + j]
for j in range(16):
row_acc.val_hash.memPort[j] = row_acc.xbar.master[44 + j]
row_accs.append(row_acc)
_kernel.row_accs = row_accs
for i in range(num_row_accs):
_kernel.row_accs[i].bridge.slave[0] = _kernel.xbar.master[7 + i]
for j in range(16):
_kernel.row_accs[i].bridge.master[1 + j] = _kernel.xbar.slave[22 + 16 * i + j]
for i in range(16):
_kernel.cache.pim_side[i] = _kernel.xbar.master[15 + i]
_kernel.cache.mem_side = _kernel.xbar.slave[150]
for i in range(16):
_kernel.A_row_scratch.memPort[i] = _kernel.xbar.master[31 + i]
_kernel.B_row_scratch.memPort[i] = _kernel.xbar.master[47 + i]
_kernel.C_row_scratch.memPort[i] = _kernel.xbar.master[63 + i]
_kernel.C_int_scratch.memPort[i] = _kernel.xbar.master[79 + i]
pim_kernerls.append(_kernel)
system.pim_kernerls = pim_kernerls
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "build/X86/python/m5/main.py", line 457, in main
exec(filecode, scope)
File "configs/example/se.py", line 288, in <module>
Simulation.run(options, root, system, FutureClass)
File "/mnt/d/gem5/configs/common/Simulation.py", line 614, in run
m5.instantiate(checkpoint_dir)
File "build/X86/python/m5/simulate.py", line 120, in instantiate
for obj in root.descendants(): obj.createCCObject()
File "build/X86/python/m5/SimObject.py", line 1648, in createCCObject
self.getCCParams()
File "build/X86/python/m5/SimObject.py", line 1589, in getCCParams
value = value.getValue()
File "build/X86/python/m5/params.py", line 254, in getValue
return [ v.getValue() for v in self ]
File "build/X86/python/m5/SimObject.py", line 1652, in getValue
return self.getCCObject()
File "build/X86/python/m5/SimObject.py", line 1630, in getCCObject
params = self.getCCParams()
File "build/X86/python/m5/SimObject.py", line 1589, in getCCParams
value = value.getValue()
File "build/X86/python/m5/SimObject.py", line 1652, in getValue
return self.getCCObject()
File "build/X86/python/m5/SimObject.py", line 1630, in getCCObject
params = self.getCCParams()
File "build/X86/python/m5/SimObject.py", line 1589, in getCCParams
value = value.getValue()
File "build/X86/python/m5/SimObject.py", line 1652, in getValue
return self.getCCObject()
File "build/X86/python/m5/SimObject.py", line 1630, in getCCObject
params = self.getCCParams()
File "build/X86/python/m5/SimObject.py", line 1589, in getCCParams
value = value.getValue()
File "build/X86/python/m5/params.py", line 254, in getValue
return [ v.getValue() for v in self ]
File "build/X86/python/m5/SimObject.py", line 1652, in getValue
return self.getCCObject()
File "build/X86/python/m5/SimObject.py", line 1634, in getCCObject
% self.path())
RuntimeError: system.pim_kernerls: Cycle found in configuration hierarchy.
I found the error. I just found some of my custom modules are referencing system as parameter. This makes a configuration hierarchy cycle. Use --pdb option to debug python code.

AttributeError: 'numpy.float32' object has no attribute 'to_cpu'

Good day,
I'm developing a deep learning model for wireless signal detection. Below is the snippet of the function that computes the model accuracy and bit error rate (BER):
from chainer.datasets import TupleDataset
import numpy as np
from chainer import cuda
from chainer import function
def get_idp_acc(model, dataset_tuple, comp_ratio, profile = None, batchsize = 128, gpu = -1):
chainer.config.train = True
xp = np if gpu < 0 else cuda.cupy
x, indices, x_zf, HtH, Hty = dataset_tuple._datasets[0], dataset_tuple._datasets[1], dataset_tuple._datasets[2], dataset_tuple._datasets[3], dataset_tuple._datasets[4]
accs = 0
BERs = 0
model.train = False
for j in range(0, len(x), batchsize):
x_batch = xp.array(x[j:j + batchsize])
indices_batch = xp.array(indices[j:j + batchsize])
x_zf_batch = xp.array(x_zf[j:j + batchsize])
HtH_batch = xp.array(HtH[j:j + batchsize])
Hty_batch = xp.array(Hty[j:j + batchsize])
if profile == None:
acc_data = model(x_batch, indices_batch, x_zf_batch, HtH_batch, Hty_batch, comp_ratio = comp_ratio,
ret_param = 'acc')
else:
acc_data = model(x_batch, indices_batch, x_zf_batch, HtH_batch, Hty_batch, comp_ratio = comp_ratio,
ret_param = 'acc', profile = profile)
acc_data.to_cpu()
acc = acc_data.data
BER = 1.0 - acc
accs += acc * len(x_batch)
BERs += BER * len(x_batch)
return (accs / len(x)) * 100.
When the code is run, I get the following error below despite having imported all the required chainer modules. I really need your help on this issue as I'm stuck for nearly two months without making any headways in my project.
Traceback (most recent call last):
File "/Users/mac/Documents/idp_detnet/examples/run_mlp.py", line 14, in <module>
mlp.run(args)
File "/Users/mac/Documents/idp_detnet/examples/mlp.py", line 39, in run
acc_dict[name], BER_dict[name] = util.sweep_idp(model, test, comp_ratios, args)
File "/Users/mac/Documents/idp_detnet/examples/util.py", line 107, in sweep_idp
batchsize=args.batchsize, profile=profile))
File "/Users/mac/Documents/idp_detnet/examples/util.py", line 83, in get_idp_acc
acc_data.to_cpu()
AttributeError: 'numpy.float32' object has no attribute 'to_cpu'
Below is the additional information providing codes for model definition:
K = 10
num_layers = 3*K
def lin_soft_sign(x, t):
'''Linear soft sign activation function from the original paper Eq. (11)'''
y = -1 + F.relu(x + t)/ F.absolute(t) - F.relu(- t)/ F.absolute(t)
return y
def accuracy(x, y):
'''Computes the fraction of elements for which x and y are equal'''
return np.mean(np.equal(x, y)).astype(np.float32)
class MLP(chainer.Chain):
def __init__(self, K, coeff_generator, profiles = None, z_dims = 8*K, v_dims = 2*K):
super(MLP, self).__init__()
if profiles == None:
profiles = [(0, 10)]
self.coeff_generator = coeff_generator
self.z_dims = z_dims
self.v_dims = v_dims
self.K = K
self.profiles = profiles
self.profile = 0
with self.init_scope():
self.p0_l1 = IncompleteLinear(None, self.z_dims)
self.p1_l1 = IncompleteLinear(None, self.z_dims)
self.p2_l1 = IncompleteLinear(None, self.z_dims)
self.p0_lv = IncompleteLinear(None, self.v_dims)
self.p1_lv = IncompleteLinear(None, self.v_dims)
self.p2_lv = IncompleteLinear(None, self.v_dims)
self.p0_l3 = IncompleteLinear(None, self.K)
self.p1_l3 = IncompleteLinear(None, self.K)
self.p2_l3 = IncompleteLinear(None, self.K)
def __call__(self, x, indices, x_zf, HtH, Hty, ret_param = 'loss', profile = None, comp_ratio = None):
if profile == None:
profile = self.profile
# Form Zero-forcing detection
err_rel = F.sum((x - x_zf)**2, axis = 1)
params = layer_profile(self.coeff_generator,
*self.profiles[profile], self.z_dims,
self.v_dims, comp_ratio)
def detnet_layer(x_d, x_logit, v, z_dims, v_dims):
HtH_x = np.matmul(HtH, np.expand_dims(x_d.data, axis = 2).astype(np.float32))
HtH_x = F.squeeze(HtH_x, axis = -1)
#x_concat = np.concatenate([Hty, x, HtH_x, v], axis=1)
x_concat = F.concat([Hty, x_d, HtH_x, v], axis = 1)
if profile == 0:
z = F.relu(self.p0_l1(x_concat))
v += self.p0_lv(z, *params)
x_logit += self.p0_l3(z, *params)
x = lin_soft_sign(x_logit, F.broadcast_to(np.ones(1).astype(np.float32), x_logit.shape))
elif profile == 1:
z = F.relu(self.p1_l1(x_concat))
v += self.p1_lv(z, *params)
x_logit += self.p1_l3(z, *params)
x = lin_soft_sign(x_logit, F.broadcast_to(np.ones(1).astype(np.float32), x_logit.shape))
elif profile == 2:
z = F.relu(self.p2_l1(x_concat))
v += self.p2_lv(z, *params)
x_logit += self.p2_l3(z, *params)
x = lin_soft_sign(x_logit, F.broadcast_to(np.ones(1).astype(np.float32), x_logit.shape))
return x, x_logit, v
x_k = np.zeros((Hty.shape[0], self.K), dtype = np.float32)
x_k_logit = np.zeros((Hty.shape[0], self.K), dtype = np.float32)
v = np.zeros((Hty.shape[0], self.v_dims), dtype = np.float32)
loss = 0
mod = sg.Modulator('BPSK', K)
for k in range(1, num_layers + 1):
x_k, x_k_logit, v = detnet_layer(x_k, x_k_logit, v, self.z_dims, self.v_dims)
err = F.sum((x - x_k)**2, 1)
loss += (np.log(k)).astype(np.float32) * F.mean(err/err_rel)
report = {'loss': loss, 'acc': accuracy(mod.demodulate(x_k.data), indices)}
reporter.report(report, self)
return report[ret_param]
def report_params(self):
return ['validation/main/acc']
def param_names(self):
if len(self.profiles) > 1:
return 'IDPDETNET_{}_{}_{}_p{}'.format(self.z_dims, self.v_dims, self.coeff_generator.__name__, len(self.profiles))
return 'IDPDETNET_{}_{}_{}'.format(self.z_dims, self.v_dims, self.coeff_generator.__name__)
import os
import sys
sys.path.insert(0, os.path.abspath(
os.path.join(os.path.dirname(__file__), '..')))
import numpy as np
import visualize as vz
import idp.coeffs_generator as cg
from net import MLP
import util
K = 10
N = 4
v_dims = 2*K
z_dims = 8*K
SNR_dB_tmin = -4
SNR_dB_tmax = 24
SNR_dB_test = np.linspace(SNR_dB_tmin, SNR_dB_tmax, 8)
num_snr_test = len(SNR_dB_test)
def run(args):
train, test = util.get_dataset(args.modeltype)
names = ['all-one (standard)', 'linear']
colors = [vz.colors.all_one_lg, vz.colors.linear_lg]
models = [
MLP.MLP(K, cg.uniform, z_dims = 8*K, v_dims = 2*K),
MLP.MLP(K, cg.linear, z_dims = 8*K, v_dims = 2*K)
]
comp_ratios = np.linspace(0.1, 1.0, 20)
acc_dict = {}
BER_dict = {}
ratios_dict = {}
for i in range(num_snr_test):
for name, model in zip(names, models):
util.load_or_train_model(model, train, test, args)
acc_dict[name], BER_dict[name] = util.sweep_idp(model, test, comp_ratios, args)
ratios_dict[name] = [100. * cr for cr in comp_ratios]
filename = "IDPDETNET1_{}".format(args.modeltype)
vz.plot(ratios_dict, acc_dict, names, filename, colors = colors,
folder = args.figure_path, ext=args.ext,
title = 'IDPDETNET (BPSK)',
xlabel = 'IDP (%)',
ylabel = 'Test Accuracy (%)', ylim = (0, 100))
filename = "IDPDETNET2_{}".format(args.modeltype)
vz.plot(ratios_dict, BER_dict, names, filename, colors = colors,
folder=args.figure_path, ext=args.ext,
title='IDPDETNET (BPSK)',
xlabel='IDP (%)',
ylabel='BER (bits/sec)')
filename = "IDPDETNET3_{}".format(args.modeltype)
vz.plot(num_snr_test, BER_dict, names, filename, colors = colors,
folder = args.figure_path, ext = args.ext,
title = 'IDPDETNET (BPSK)',
xlabel = 'SNR (dB)',
ylabel = ' BER (bits/sec)')
if __name__ == '__main__':
args = util.default_parser('IDPDETNET Example').parse_args()
run(args)
Hi Seiya Tokui. Thank you for your kind input. Here is the model definition based on the above code:
model = MLP.MLP(K, cg.uniform, z_dims = 8*K, v_dims = 2*K)
OR
model = MLP.MLP(K, cg.linear, z_dims = 8*K, v_dims = 2*K)
Hi #BloodyD. Thank for your brilliant contributions. The model started training, but then later returned the following error:
1 nan nan 0.50108 5.85448
Traceback (most recent call last):
File "run_mlp.py", line 14, in <module>
mlp.run(args)
File "/Users/mac/Documents/idp_detnet/examples/mlp.py", line 38, in run
util.load_or_train_model(model, train, test, args)
File "/Users/mac/Documents/idp_detnet/examples/util.py", line 204, in load_or_train_model
train_model(model, train, test, args)
File "/Users/mac/Documents/idp_detnet/examples/util.py", line 184, in train_model
return eval(fp.read().replace('\n', ''))
File "<string>", line 1, in <module>
NameError: name 'NaN' is not defined
The error occurs in the last line of this snippet code below:
name = model.param_names()
save_model(model, os.path.join(args.model_path, name))
chainer.config.train = False
with open(os.path.join(args.out, 'log'), 'r') as fp:
return eval(fp.read().replace('\n', ''))