Getting flow infos from switch and copy the info in csv file (Ryu controller) - sdn

Hope you help me, I want to get flow info from switch and that by sending a request every 10s and the switch reply with the info but I get the following error when the controller receive the reply by using a flow request reply handler The error is occuring because of flow matching 'eth_type'
CollectTrainingStatsApp: Exception occurred during handler processing. Backtrace from offending handler [_flow_stats_reply_handler] servicing event [EventOFPFlowStatsReply] follows.
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/ryu/base/app_manager.py", line 290, in _event_loop
handler(ev)
File "/home/guenfaf/Documents/Training ryu/data_to_csv/data_to_csv.py", line 59, in _flow_stats_reply_handler
for stat in sorted([flow for flow in body if (flow.priority == 1) ], key=lambda flow:
File "/home/guenfaf/Documents/Training ryu/data_to_csv/data_to_csv.py", line 60, in <lambda>
(flow.match['eth_type'],flow.match['ipv4_src'],flow.match['ipv4_dst'],flow.match['ip_proto'])):
File "/usr/local/lib/python2.7/dist-packages/ryu/ofproto/ofproto_v1_3_parser.py", line 904, in __getitem__
return dict(self._fields2)[key]
KeyError: 'eth_type'
Here is my code :
from ryu.app import simple_switch_13
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER, DEAD_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.lib import hub
from time import time
# class CollectTrainingStatsApp(simple_switch_13.SimpleSwitch13):
class CollectTrainingStatsApp(simple_switch_13.SimpleSwitch13):
def __init__(self, *args, **kwargs):
super(CollectTrainingStatsApp, self).__init__(*args, **kwargs)
self.datapaths = {}
self.monitor_thread = hub.spawn(self.monitor)
file0 = open("FlowStatsfile.csv","w")
file0.write('datapath_id,flow_id,ip_src,tp_src,ip_dst,tp_dst,ip_proto,flow_duration_sec,flow_duration_nsec,idle_timeout,hard_timeout,flags,packet_count,byte_count,packet_count_per_second,packet_count_per_nsecond,byte_count_per_second,byte_count_per_nsecond,label\n')
file0.close()
#Asynchronous message
#set_ev_cls(ofp_event.EventOFPStateChange,[MAIN_DISPATCHER, DEAD_DISPATCHER])
def state_change_handler(self, ev):
datapath = ev.datapath
if ev.state == MAIN_DISPATCHER:
if datapath.id not in self.datapaths:
self.logger.debug('register datapath: %016x', datapath.id)
self.datapaths[datapath.id] = datapath
elif ev.state == DEAD_DISPATCHER:
if datapath.id in self.datapaths:
self.logger.debug('unregister datapath: %016x', datapath.id)
del self.datapaths[datapath.id]
def monitor(self):
while True:
for dp in self.datapaths.values():
self.request_stats(dp)
hub.sleep(10)
def request_stats(self, datapath):
self.logger.debug('send stats request: %016x', datapath.id)
parser = datapath.ofproto_parser
req = parser.OFPFlowStatsRequest(datapath)
datapath.send_msg(req)
#set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER)
def _flow_stats_reply_handler(self, ev):
#timestamp = time.time()
tp_src = 0
tp_dst = 0
file0 = open("FlowStatsfile.csv","a+")
body = ev.msg.body
for stat in sorted([flow for flow in body if (flow.priority == 1) ], key=lambda flow:
(flow.match['eth_type'],flow.match['ipv4_src'],flow.match['ipv4_dst'],flow.match['ip_proto'])):
ip_src = stat.match['ipv4_src']
ip_dst = stat.match['ipv4_dst']
ip_proto = stat.match['ip_proto']
if stat.match['ip_proto'] == 1:
icmp_code = stat.match['icmpv4_code']
icmp_type = stat.match['icmpv4_type']
elif stat.match['ip_proto'] == 6:
tp_src = stat.match['tcp_src']
tp_dst = stat.match['tcp_dst']
elif stat.match['ip_proto'] == 17:
tp_src = stat.match['udp_src']
tp_dst = stat.match['udp_dst']
flow_id = str(ip_src) + str(tp_src) + str(ip_dst) + str(tp_dst) + str(ip_proto)
try:
packet_count_per_second = stat.packet_count/stat.duration_sec
packet_count_per_nsecond = stat.packet_count/stat.duration_nsec
except:
packet_count_per_second = 0
packet_count_per_nsecond = 0
try:
byte_count_per_second = stat.byte_count/stat.duration_sec
byte_count_per_nsecond = stat.byte_count/stat.duration_nsec
except:
byte_count_per_second = 0
byte_count_per_nsecond = 0
file0.write("{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}\n"
.format(ev.msg.datapath.id, flow_id, ip_src, tp_src,ip_dst, tp_dst,
stat.match['ip_proto'],
stat.duration_sec, stat.duration_nsec,
stat.idle_timeout, stat.hard_timeout,
stat.flags, stat.packet_count,stat.byte_count,
packet_count_per_second,packet_count_per_nsecond,
byte_count_per_second,byte_count_per_nsecond,0))
file0.close()

Related

Selenium Issue with Exec?

I am running a webscraper with selenium to get some data on the NBA. I have urls to get to the websites for each of the 30 teams, but when I run the code it only gets through a few of the urls and then crashes with the errors below being shown:
#web scraper
from bs4 import BeautifulSoup
import requests
from selenium import webdriver
import pandas as pd
import os
class NBAScraper:
def __init__(self):
#part 1
url = "https://www.nba.com/teams"
HTML = requests.get(url)
soup = BeautifulSoup(HTML.text, 'html.parser')
text = str(soup.find_all("a", "Anchor_anchor__cSc3P TeamFigureLink_teamFigureLink__uqnNO"))
ids = []
for i in range(0, 30):
hr = text.find("stats")
ids.append(text[(hr+11):(hr+21)])
text = text[(hr+22):]
#part 2
names = []
for j in range(0, 30):
url2 = "https://www.nba.com/stats/team/"+str(ids[j])+"/advanced"
HTML2 = requests.get(url2)
soup2 = BeautifulSoup(HTML2.text, 'html.parser')
##div class="TeamHeader_name__MmHlP
name = str(soup2.find("div", "TeamHeader_name__MmHlP"))
ni = name.find("div>")
ni2 = name.find("<!")
name1 = name[(ni+4):ni2]
name = name[ni2:]
ni3 = name.find("<div>")
name = name[(ni3+5):]
ni4 = name.find("</div>")
name2 = name[:ni4]
n = name1 + " " + name2
names.append(n)
##tbody class="Crom_body__UYOcU"
#part 3
offrtg = []
defrtg = []
reb = []
tov = []
efg = []
for k in range(0, 30):
self.driver = webdriver.Chrome()
url3 = "https://www.nba.com/stats/team/"+str(ids[k])+"/advanced"
self.driver.get(url3)
rndrhtml = self.driver.page_source
self.driver.close()
#self.driver.quit()
soup3 = BeautifulSoup(rndrhtml, 'html.parser')
ovrall = str(soup3.find("tbody", "Crom_body__UYOcU").find_all("td"))
for d in range(0, 13):
di = ovrall.find("<td>")
ovrall = ovrall[(di+4):]
#conditions
if d == 2:
di2 = ovrall.find("</td>")
offrtg.append(float(ovrall[:di2]))
elif d == 3:
di2 = ovrall.find("</td>")
defrtg.append(float(ovrall[:di2]))
elif d == 10:
di2 = ovrall.find("</td>")
reb.append(float(ovrall[:di2]))
elif d == 11:
di2 = ovrall.find("</td>")
tov.append(float(ovrall[:di2]))
elif d == 12:
di2 = ovrall.find("</td>")
efg.append(float(ovrall[:di2]))
#writing to excel
os.remove(r"C:\Users\jackm\OneDrive\Desktop\NBA\NBASTATS.xlsx")
d = {'Name': names, 'OFFRTG': offrtg, 'DEFRTG': defrtg, 'REB': reb,
'TOV': tov, 'EFG': efg}
df = pd.DataFrame(data=d)
df.to_excel(r"C:\Users\jackm\OneDrive\Desktop\NBA\NBASTATS.xlsx", sheet_name="STATS")
NBAScraper()
I tried to play around with the closing and quitting functions for the driver, or put the driver in a separate function and run it outside the class, but none of that worked. I realized through some testing that even if it's not inside a loop, selenium will throw the error for a url but run it fine the second time. I tried using implicit waits to solve this but to no avail.
Traceback (most recent call last):
File "C:\Program Files\Spyder\pkgs\spyder_kernels\py3compat.py", line 356, in compat_exec
exec(code, globals, locals)
File "c:\users\jackm\spyder\nba.py", line 104, in <module>
NBAScraper()
File "c:\users\jackm\spyder\nba.py", line 71, in __init__
ovrall = str(soup3.find("tbody", "Crom_body__UYOcU").find_all("td"))
AttributeError: 'NoneType' object has no attribute 'find_all'

Unable to use 'read_sql' to call a SQL query class

I am trying to pull results from the database with the following code:
import pandas as pd
import pyodbc
class DataManagement(object):
def __init__(self, database = None, server=None, trusted_connection=True, database_driver=ODBC_SQL2005_2012, uid=None, pwd=None):
self.server = server
self.database = database
self.uid = uid
self.pwd = pwd
# Use default server name none supplied - assumed to be localhost
if self.server is None:
self.server = SERVER
if self.database is None:
self.database = DATABASE
# Use default sql credentials if none provided
if self.uid is None or self.pwd is None:
self.uid = DEFAULT_UID
self.pwd = DEFAULT_PASSWORD
if trusted_connection:
self.connectionstring = "DRIVER={0};SERVER={1};DATABASE={2};Trusted_Connection=yes;".format(database_driver, self.server, self.database)
else:
self.connectionstring = 'DRIVER={0};SERVER={1};DATABASE={2};UID={3};PWD={4};'.format(database_driver, self.server, self.database, uid, pwd)
self.connection = pyodbc.connect(self.connectionstring)
self.cursor = self.connection.cursor()
def __enter__(self):
return self
def __exit__(self, ctx_type, ctx_value, ctx_traceback):
self.connection.commit()
self.connection.close()
qq = DataManagement()
sql =("select * from ***** ")
data_df = pd.read_sql(sql, qq)
I get an error:
Traceback (most recent call last):
File "<ipython-input-94-453876631fe0>", line 3, in <module>
data_df = pd.read_sql(sql, qq)
File "***\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\io\sql.py", line 380, in read_sql
chunksize=chunksize)
File "***\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\io\sql.py", line 1468, in read_query
cursor = self.execute(*args)
File "***\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\io\sql.py", line 1426, in execute
cur = self.con.cursor()
TypeError: 'pyodbc.Cursor' object is not callable
I saw a similar question at TypeError: 'pyodbc.Cursor' object is not callable (Python 3.6) but unable to get an answer from there.
I got it to work by editing the class from
self.cursor = self.connection.cursor()
into
self.cursor = self.connection.cursor

How to write custom PySpark serializer

I want to write a custom pyspark serializer. There is very little documentation online apart from details here.
My logic is as follows:
If I receive my special object, then I use custom logic to serialize/deserialize.
Otherwise, use cPickle for the same.
The custom serializer looks like the following:
from pyspark.serializers import FramedSerializer
class CustomSerializer(FramedSerializer):
import cPickle as pickle
import CustomClass
def dumps(self, obj):
if isinstance(obj, CustomClass):
bytes_str = obj.serialize()
bytes_str = '\1' + bytes_str
elif isinstance(obj, CustomClass.Location):
bytes_str = obj.serialize()
bytes_str = '\2' + bytes_str
else:
bytes_str = pickle.dumps(obj)
bytes_str = '\0' + bytes_str
return bytes_str
def loads(self, bytes_str):
c = bytes_str[0]
if c=='\1':
obj = CustomClass()
obj.parse_from_string(bytes_str[1:])
elif c=='\2':
obj = CustomClass.Location()
obj.parse_from_string(bytes_str[1:])
else:
obj = pickle.loads(bytes_str[1:])
return obj
While initiating SparkContext, I make sure I specify the custom serializer:
serializer = CustomSerializer()
sc = SparkContext(appName='MyApp', serializer=serializer)
However, I still get error:
Caused by: org.apache.spark.api.python.PythonException: Traceback (most recent call last):
File "/mnt/yarn/usercache/user/appcache/application_1507044435666_0035/container_1507044435666_0035_01_000002/pyspark.zip/pyspark/worker.py", line 174, in main
process()
File "/mnt/yarn/usercache/user/appcache/application_1507044435666_0035/container_1507044435666_0035_01_000002/pyspark.zip/pyspark/worker.py", line 169, in process
serializer.dump_stream(func(split_index, iterator), outfile)
File "/mnt/yarn/usercache/user/appcache/application_1507044435666_0035/container_1507044435666_0035_01_000002/pyspark.zip/pyspark/serializers.py", line 272, in dump_stream
bytes = self.serializer.dumps(vs)
File "<ipython-input-2-536808351108>", line 14, in dumps
PicklingError: Can't pickle <class 'CustomClass.Location'>: attribute lookup Location failed
What am I missing?
Thanks.

Export Model For Serving But Tensor Type Dismatch

What related GitHub issues or StackOverflow threads have you found by searching the web for your problem?
I am trying to export the model for serving , but it's report type error about inputs tensor.
but in the export and predict part , the inputs are the same type.
If possible, provide a minimal reproducible example (We usually don't have time to read hundreds of lines of your code)
here is a sample code for my exporting
```
named_graph_signature = {
'inputs': exporter.generic_signature({
'sparse_index': tf.placeholder(tf.int64, name="feature_index")
'sparse_ids': tf.placeholder(tf.int64,name = "feature_ids"),
'sparse_values':tf.placeholder(tf.int64, name ="feature_values"),
'sparse_shape':tf.placeholder(tf.int64, name="feature_shape")
}),
'outputs': exporter.generic_signature({
'prob': inference_softmax
})}
model_exporter.init(
sess.graph.as_graph_def(),
#default_graph_signature=named_graph_signature,
named_graph_signatures=named_graph_signature,
init_op=init_op)
model_exporter.export(export_path, tf.constant(export_version), sess)
print('Done exporting!')
```
here is my code for predicting
```
ins = "0 142635:1 250810:1 335229:1 375278:1 392970:1 506983:1 554566:1 631968:1 647823:1 658803:1 733446:1 856305:1 868202:1"
FEATURE_SIZE = 1000000
tokens = ins.split(" ")
feature_num = 0
feature_ids = []
feature_values = []
feature_index = []
for feature in tokens[1:]:
feature_id, feature_value = feature.split(":")
feature_ids.append(int(feature_id))
feature_values.append(float(feature_value))
feature_index.append([1, feature_num])
feature_num += 1
feature_shape = [1, FEATURE_SIZE]
sparse_index = tf.contrib.util.make_tensor_proto(numpy.asarray(feature_index), dtype=tf.int64)
sparse_ids = tf.contrib.util.make_tensor_proto(numpy.asarray(feature_ids), dtype=tf.int64)
sparse_values = tf.contrib.util.make_tensor_proto(numpy.asarray(feature_values), dtype=tf.float32)
sparse_shape= tf.contrib.util.make_tensor_proto(numpy.asarray(feature_shape), dtype=tf.int64)
channel = implementations.insecure_channel(host, port)
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
request = predict_pb2.PredictRequest()
request.model_spec.name = model_name
request.model_spec.version.value = model_version
print model_name,model_version
request.inputs['sparse_index'].CopyFrom(sparse_index)
request.inputs['sparse_ids'].CopyFrom(sparse_ids)
request.inputs['sparse_values'].CopyFrom(sparse_values)
request.inputs['sparse_shape'].CopyFrom(sparse_shape)
# Send request
result = stub.Predict(request, request_timeout)
```
Logs or other output that would be helpful
(If logs are large, please upload as attachment or provide link).
```
Traceback (most recent call last):
File "run.py", line 63, in <module>
main()
File "run.py", line 59, in main
result = stub.Predict(request, request_timeout)
File "/home/serving/.local/lib/python2.7/site-packages/grpc/beta/_client_adaptations.py", line 305, in __call__
self._request_serializer, self._response_deserializer)
File "/home/serving/.local/lib/python2.7/site-packages/grpc/beta/_client_adaptations.py", line 203, in _blocking_unary_unary
raise _abortion_error(rpc_error_call)
grpc.framework.interfaces.face.face.AbortionError: AbortionError(code=StatusCode.INVALID_ARGUMENT, details="input size does not match signature")
```

TypeError: POST data should be bytes or an iterable of bytes. It cannot be of type str

My Code.
#!/usr/bin/env python
#coding: utf-8
userid="NicoNicoCreate#gmail.com"
passwd="********"
import sys, re, cgi, urllib, urllib.request, urllib.error, http.cookiejar, xml.dom.minidom, time, urllib.parse
import simplejson as json
def getToken():
html = urllib.request.urlopen("http://www.nicovideo.jp/my/mylist").read()
for line in html.splitlines():
mo = re.match(r'^\s*NicoAPI\.token = "(?P<token>[\d\w-]+)";\s*',line)
if mo:
token = mo.group('token')
break
assert token
return token
def mylist_create(name):
cmdurl = "http://www.nicovideo.jp/api/mylistgroup/add"
q = {}
q['name'] = name.encode("utf-8")
q['description'] = ""
q['public'] = 0
q['default_sort'] = 0
q['icon_id'] = 0
q['token'] = token
cmdurl += "?" + urllib.parse.urlencode(q).encode("utf-8")
j = json.load( urllib.request.urlopen(cmdurl), encoding='utf-8')
return j['id']
def addvideo_tomylist(mid,smids):
for smid in smids:
cmdurl = "http://www.nicovideo.jp/api/mylist/add"
q = {}
q['group_id'] = mid
q['item_type'] = 0
q['item_id'] = smid
q['description'] = u""
q['token'] = token
cmdurl += "?" + urllib.parse.urlencode(q).encode("utf-8")
j = json.load( urllib.request.urlopen(cmdurl), encoding='utf-8')
time.sleep(0.5)
#Login
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(http.cookiejar.CookieJar()))
urllib.request.install_opener(opener)
urllib.request.urlopen("https://secure.nicovideo.jp/secure/login",
urllib.parse.urlencode( {"mail":userid, "password":passwd}) ).encode("utf-8")
#GetToken
token = getToken()
#MakeMylist&AddMylist
mid = mylist_create(u"Testlist")
addvideo_tomylist(mid, ["sm9","sm1097445", "sm1715919" ] )
MyError.
Traceback (most recent call last):
File "Nico3.py", line 48, in <module>
urllib.parse.urlencode( {"mail":userid, "password":passwd}) ).encode("utf-8")
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/urllib/request.py", line 162, in urlopen
return opener.open(url, data, timeout)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/urllib/request.py", line 463, in open
req = meth(req)
File "/Library/Frameworks/Python.framework/Versions/3.5/lib/python3.5/urllib/request.py", line 1170, in do_request_
raise TypeError(msg)
TypeError: POST data should be bytes or an iterable of bytes. It cannot be of type str.
I've tried encode but it did not help.
I'm japanese accademic students.
It was not able to be settled by my knowledge.
I am aware of this similar question, TypeError: POST data should be bytes or an iterable of bytes. It cannot be str, but am too new for the answer to be much help.
You paren is in the wrong place so you are not actually encoding:
.urlencode({"mail":userid, "password":passwd}).encode("utf-8")) # <- move inside