when test the grid scenario, there is an valueError - sumo

On flow tutorial_8, I tried another scenario: SimpleGridScenario
I already check the grid.py, and followed the rules for the parameters.
My origianl program is that:
from flow.envs import Env
from gym.spaces.box import Box
from gym.spaces.tuple_space import Tuple
from flow.controllers import IDMController, ContinuousRouter,GridRouter
from flow.core.experiment import Experiment
from flow.core.params import SumoParams, EnvParams, \
InitialConfig, NetParams
from flow.core.params import VehicleParams
from flow.scenarios.loop import LoopScenario, ADDITIONAL_NET_PARAMS
from flow.scenarios.grid import SimpleGridScenario
from flow.scenarios import SimpleGridScenario
import numpy as np
from flow.envs import myEnv
ADDITIONAL_ENV_PARAMS={
"max_accel":1,
"max_decel":1,
}
sumo_params = SumoParams(sim_step=0.1,render=True)
vehicles=VehicleParams()
vehicles.add(
veh_id="idm",
acceleration_controller=(IDMController,{}),
routing_controller=(GridRouter,{}),
num_vehicles=22
)
env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS)
additional_net_params = ADDITIONAL_NET_PARAMS.copy()
net_params = NetParams(additional_params=additional_net_params)
initial_config = InitialConfig(bunching=20)
scenario = SimpleGridScenario(
name = 'grid',
vehicles = vehicles,
net_params = NetParams(
additional_params={
'grid_array':{
'row_num':3,
'col_num':2,
'inner_length':500,
'short_length':500,
'long_length':500,
'cars_top':20,
'cars_bot':20,
'cars_left':20,
'cars_right':20,
},
'horizontal_lanes':1,
'vertical_lanes':1,
'speed_limit':{
'vertical':35,
'horizontal':35
}
},
no_internal_links=False
),
initial_config = initial_config
)
env = myEnv(env_params, sumo_params, scenario)
exp = Experiment(env)
_ = exp.run(1, 1500)
And then I ran that, there is an error, the error log is:
(flow) dnl#dnl-Iiyama:~/flow$ python Tutorial_9_1.py
Loading configuration... done.
Success.
Loading configuration... done.
Error in edge length with key bot3_2
Error in edge length with key bot3_2
Traceback (most recent call last):
File "Tutorial_9_1.py", line 72, in <module>
_ = exp.run(1, 1500)
File "/home/dnl/flow/flow/core/experiment.py", line 118, in run
state = self.env.reset()
File "/home/dnl/flow/flow/envs/base_env.py", line 483, in reset
speed=speed)
File "/home/dnl/flow/flow/core/kernel/vehicle/traci.py", line 990, in add
[i for i in range(num_routes)], size=1, p=frac)[0])
File "mtrand.pyx", line 1126, in mtrand.RandomState.choice
ValueError: a must be non-empty
I want to why i am wrong ?
I try to test another scenario expect the LoopScenario. But it doesn't work
Please help.

Related

Allennlp: How to use CPU instead of GPU?

I'm running some code that works when there is GPU. But I'm trying to figure out how to run it locally with CPU. Here's the error:
2022-07-06 17:58:39,042 - INFO - allennlp.common.plugins - Plugin allennlp_models available
Traceback (most recent call last):
File "/Users/xiaoqingwan/opt/miniconda3/envs/absa/bin/allennlp", line 8, in <module>
sys.exit(run())
File "/Users/xiaoqingwan/opt/miniconda3/envs/absa/lib/python3.7/site-packages/allennlp/__main__.py", line 34, in run
main(prog="allennlp")
File "/Users/xiaoqingwan/opt/miniconda3/envs/absa/lib/python3.7/site-packages/allennlp/commands/__init__.py", line 118, in main
args.func(args)
File "/Users/xiaoqingwan/opt/miniconda3/envs/absa/lib/python3.7/site-packages/allennlp/commands/predict.py", line 205, in _predict
predictor = _get_predictor(args)
File "/Users/xiaoqingwan/opt/miniconda3/envs/absa/lib/python3.7/site-packages/allennlp/commands/predict.py", line 105, in _get_predictor
check_for_gpu(args.cuda_device)
File "/Users/xiaoqingwan/opt/miniconda3/envs/absa/lib/python3.7/site-packages/allennlp/common/checks.py", line 131, in check_for_gpu
" 'trainer.cuda_device=-1' in the json config file." + torch_gpu_error
allennlp.common.checks.ConfigurationError: **Experiment specified a GPU but none is available; if you want to run on CPU use the override 'trainer.cuda_device=-1' in the json config file.**
module 'torch.cuda' has no attribute '_check_driver'
Could you give me some guidance on what to do? Where is the config file and what is it called?
Here's the code (originally from: https://colab.research.google.com/drive/1F9zW_nVkwfwIVXTOA_juFDrlPz5TLjpK?usp=sharing):
# Use pretrained SpanModel weights for prediction
import sys
sys.path.append("aste")
from pathlib import Path
from data_utils import Data, Sentence, SplitEnum
from wrapper import SpanModel
def predict_sentence(text: str, model: SpanModel) -> Sentence:
path_in = "temp_in.txt"
path_out = "temp_out.txt"
sent = Sentence(tokens=text.split(), triples=[], pos=[], is_labeled=False, weight=1, id=1)
data = Data(root=Path(), data_split=SplitEnum.test, sentences=[sent])
data.save_to_path(path_in)
model.predict(path_in, path_out)
data = Data.load_from_full_path(path_out)
return data.sentences[0]
text = "Did not enjoy the new Windows 8 and touchscreen functions ."
model = SpanModel(save_dir="pretrained_14lap", random_seed=0)
sent = predict_sentence(text, model)
Try using something like:
device = torch.device("cpu")
model = SpanModel(save_dir="pretrained_14lap", random_seed=0)
model.to(device)
The config file is inside of the model.tar.gz in the pretrained_14lap directory (it is always named config.json). It also contains the param "cuda_device": 0, which may be causing your problem.

Deribit code not working for me.. Does anyone have some suggestions?

I am trying to gather historical prices/data from Deribit using Pycharm and Spyder but I keep getting errors. I used the code below from the following website:
https://www.codearmo.com/python-tutorial/crypto-algo-trading-historical-data1
If anyone has a suggested fix that would be a huge help. I am relatively new to coding.
Thanks.
import asyncio
import websockets
import json
import pandas as pd
import datetime as dt
async def call_api(msg):
async with websockets.connect('wss://test.deribit.com/ws/api/v2') as websocket:
await websocket.send(msg)
while websocket.open:
response = await websocket.recv()
return response
def async_loop(api, message):
return asyncio.get_event_loop().run_until_complete(api(message))
def retrieve_historic_data(start, end, instrument, timeframe):
msg = \
{
"jsonrpc": "2.0",
"id": 833,
"method": "public/get_tradingview_chart_data",
"params": {
"instrument_name": instrument,
"start_timestamp": start,
"end_timestamp": end,
"resolution": timeframe
}
}
resp = async_loop(call_api, json.dumps(msg))
return resp
def json_to_dataframe(json_resp):
res = json.loads(json_resp)
df = pd.DataFrame(res['result'])
df['ticks'] = df.ticks / 1000
df['timestamp'] = [dt.datetime.fromtimestamp(date) for date in df.ticks]
return df
if __name__ == '__main__':
start = 1554373800000
end = 1554376800000
instrument = "BTC-PERPETUAL"
timeframe = '1'
json_resp = retrieve_historic_data(start, end, instrument, timeframe)
df = json_to_dataframe(json_resp)
print(df.head())
Console Message:
/Users/macbookair/PycharmProjects/untitled/venv/bin/python /Users/macbookair/PycharmProjects/Deribit01/Deribit_Options_01.py
Traceback (most recent call last):
File "/Users/macbookair/PycharmProjects/Deribit01/Deribit_Options_01.py", line 2, in <module>
import websockets
File "/Users/macbookair/PycharmProjects/untitled/venv/lib/python3.8/site-packages/websockets/__init__.py", line 4, in <module>
from .client import * # noqa
File "/Users/macbookair/PycharmProjects/untitled/venv/lib/python3.8/site-packages/websockets/client.py", line 20, in <module>
asyncio.get_event_loop().run_until_complete(call_api(json.dumps(msg)))
File "/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/asyncio/base_events.py", line 616, in run_until_complete
return future.result()
File "/Users/macbookair/PycharmProjects/untitled/venv/lib/python3.8/site-packages/websockets/client.py", line 13, in call_api
async with websockets.connect('wss://test.deribit.com/ws/api/v2') as websocket:
AttributeError: partially initialized module 'websockets' has no attribute 'connect' (most likely due to a circular import)
Process finished with exit code 1

TensorFlow Serving + gRPC "Did not read entire message"

I'm trying to call my TensorFlow model which is deployed on a cloud foundry server with an Python 2.7 API using TensorFlow Serving and gRPC. The model expects a 200 dim vector as input, which I hardcoded at the moment. The connection Variables are stored in a virtualenv and checked twice.
The code:
import os
from grpc.beta import implementations
import tensorflow as tf
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2
from grpc._cython import cygrpc
MODEL_NAME = str(os.getenv('MODEL_NAME', ''))
MODEL_SERVER_HOST = str(os.getenv('MODEL_SERVER_HOST', ''))
MODEL_SERVER_PORT = int(os.getenv('MODEL_SERVER_PORT', ''))
ROOT_CERT = str(os.getenv('ROOT_CERT', '')).replace('\\n', '\n')
def metadata_transformer(metadata):
additions = []
token = 'Bearer <my access token>'
additions.append(('authorization', token))
return tuple(metadata) + tuple(additions)
credentials = implementations.ssl_channel_credentials(root_certificates=ROOT_CERT)
channel = implementations.secure_channel(MODEL_SERVER_HOST, MODEL_SERVER_PORT, credentials)
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel, metadata_transformer=metadata_transformer)
import numpy as np
data = np.matrix([0.06222425773739815, 0.08211926370859146, -0.060986146330833435, 0.13920938968658447, 0.10515272617340088, -0.06220443174242973, -0.05927170068025589, -0.054189786314964294, -0.0986655130982399, 0.013334010727703571, -0.05667420104146004, 0.059366412460803986, -0.03483295068144798, -0.05382293462753296, 0.02721281163394451, -0.1428503543138504, 0.029297124594449997, 0.07006879895925522, 0.06501731276512146, 0.028620243072509766, 0.07128454744815826, 0.029960375279188156, 0.0710490494966507, -0.04619687795639038, -0.03106304071843624, -0.04266272485256195, 0.004348727408796549, 0.03099834732711315, 0.09248803555965424, -0.036939311772584915, 0.00017547572497278452, 0.03521900251507759, 0.10932505130767822, -0.019729139283299446, 0.12315405160188675, 0.10092845559120178, -0.12633951008319855, -0.022320391610264778, 0.0870826318860054, -0.06696301698684692, -0.016253307461738586, -0.0413096621632576, -0.040929097682237625, 0.09338817000389099, -0.08800378441810608, 0.015543102286756039, 0.018787918612360954, 0.07351260632276535, 0.038140904158353806, 0.019255049526691437, 0.0875692293047905, -0.07542476058006287, -0.04116508364677429, 0.04507743567228317, -0.06986603885889053, -0.24688798189163208, -0.035459864884614944, 0.06200174242258072, -0.06932217627763748, 0.06320516765117645, -0.023999478667974472, -0.04712359234690666, 0.03672196343541145, -0.02999514900147915, 0.04105519875884056, 0.08891177922487259, 0.15175248682498932, -0.0021488466300070286, 0.04398706927895546, -0.04429445043206215, 0.04708605632185936, 0.043234940618276596, -0.043555982410907745, 0.017381751909852028, 0.048889972269535065, -0.016929129138588905, 0.01731136068701744, -0.04694319888949394, 0.20381565392017365, 0.009074307978153229, 0.004490611143410206, -0.08525945991277695, -0.03385556861758232, 0.017475442960858345, -0.040392760187387466, 0.14970248937606812, 0.042721331119537354, -0.1257765144109726, -0.07097769528627396, -0.10943038016557693, 0.015442096628248692, -0.06519876420497894, -0.07588690519332886, -0.07620779424905777, 0.04572996124625206, -0.058589719235897064, -0.04492143541574478, -0.01922304928302765, -0.008066931739449501, 0.04317406192421913, 0.020763304084539413, -0.025430725887417793, 0.04271349683403969, 0.07393930852413177, 0.0020402593072503805, 0.0783640518784523, 0.047386448830366135, 0.010610940866172314, 0.022059153765439987, 0.034980181604623795, -0.006882485933601856, -0.08911270648241043, -0.001243607490323484, -0.06307544559240341, -0.01352659147232771, -0.24622271955013275, 0.07930449396371841, 0.03659113869071007, -0.05077377334237099, 0.08726480603218079, -0.09274136275053024, -0.05766649544239044, -0.12269984930753708, 0.056026071310043335, -0.0048304214142262936, -0.05568183213472366, -0.08890420943498611, -0.02911136858165264, -0.0944124087691307, 0.0011820291401818395, -0.08908636122941971, -0.008728212676942348, -0.014545259065926075, -0.008866528049111366, 0.02728298306465149, -0.020994992926716805, 0.031155599281191826, 0.036098793148994446, 0.06911332905292511, -0.06691643595695496, -0.00014896543871145695, -0.007080242037773132, 0.0031992685981094837, 0.043563224375247955, 0.02550852671265602, -0.015397937037050724, 0.06041031703352928, -0.08981014788150787, -0.10881254076957703, 0.03226703032851219, -0.02039985917508602, -0.05354547128081322, -0.026514282450079918, 0.09616094827651978, -0.04160488396883011, -0.06793050467967987, -0.17060619592666626, -0.08044841140508652, 0.042605575174093246, 0.08186516910791397, 0.026051705703139305, 0.1254323273897171, 0.09807661175727844, 0.04692094400525093, 0.05536479875445366, 0.004592049401253462, 0.01953544095158577, -0.02827763929963112, 0.11051501333713531, -0.05077047273516655, -0.09987067431211472, 0.025186538696289062, -0.24119670689105988, -0.054666098207235336, 0.03561021387577057, -0.006030901800841093, 0.14740994572639465, 0.09515859931707382, 0.0628485381603241, 0.020558597519993782, -0.04458167776465416, -0.04740617796778679, 0.024550801143050194, -0.09533495455980301, 0.057229768484830856, -0.08855120837688446, 0.027864644303917885, -0.07248448580503464, 0.0647491067647934, 0.09660986065864563, 0.038834456354379654, -0.030274877324700356, -0.024261653423309326, 0.05457066744565964, -0.00860705878585577, 0.04901411384344101, 0.017157232388854027, -0.02722001262009144, 0.012187148444354534, 0.05596058815717697])
request = predict_pb2.PredictRequest()
request.model_spec.name = MODEL_NAME
request.model_spec.signature_name = 'ticketCatFeature2'
request.inputs['input'].CopyFrom(
tf.contrib.util.make_tensor_proto(data, shape=[200]))
print stub.Classify(request, 10)
I'm getting following error message when running the app:
Traceback (most recent call last):
File "app.py", line 36, in
print stub.Classify(request, 10)
File "/home/vagrant/Desktop/Masterarbeit/appDir/venv/local/lib/python2.7/site-packages/grpc/beta/_client_adaptations.py", line 309, in call
self._request_serializer, self._response_deserializer)
File "/home/vagrant/Desktop/Masterarbeit/appDir/venv/local/lib/python2.7/site-packages/grpc/beta/_client_adaptations.py", line 195, in _blocking_unary_unary
raise _abortion_error(rpc_error_call)
grpc.framework.interfaces.face.face.AbortionError: AbortionError(code=StatusCode.INTERNAL, details="Did not read entire message")
Log of grpc Debug: https://ufile.io/owk76

Raspberry pi omxplayer wrapper + Gpio

Omxplayer crashes after pushbuttons
I like to change movies with pushbuttons and this is code that I have so far but Omxplayer crashes after few pushbutton are push!
I am new to raspberry pi and python been looking for a fix but cannot find any. Any help is welcome.
The error I get is:
Traceback (most recent call last):
File "mygpio.py", line 34, in <module>
player.load(vida)
File "build/bdist.linux-armv7l/egg/omxplayer/player.py", line 162, in load
File "build/bdist.linux-armv7l/egg/omxplayer/player.py", line 88, in _load_source
File "build/bdist.linux-armv7l/egg/omxplayer/player.py", line 134, in _setup_dbus_connection
SystemError: DBus cannot connect to the OMXPlayer process
#!/usr/bin/env python2
import os.path
from time import sleep
import subprocess
import os
from omxplayer import OMXPlayer
vida = '/home/pi/Videos/testvids/6.mov'
vidb = '/home/pi/Videos/testvids/3.mov'
vidc = '/home/pi/Videos/testvids/t2.mp4'
default = '/home/pi/Videos/testvids/t1.mp4'
import RPi.GPIO as GPIO
#set up GPIO using BCM numbering
GPIO.setmode(GPIO.BCM)
#All Gpio's as input and pull up
GPIO.setup(2, GPIO.IN, pull_up_down = GPIO.PUD_UP)
GPIO.setup(3, GPIO.IN, pull_up_down = GPIO.PUD_UP)
GPIO.setup(4, GPIO.IN, pull_up_down = GPIO.PUD_UP)
player = OMXPlayer(default,args=['--no-osd','--blank'],)
while True:
if GPIO.input(2) ==0:
player.load(vida)
print("gpio 2")
player.play()
#sleep(5)
if (GPIO.input(3) == 0):
player.load(vidb)
print("gpio 3")
player.play()
# sleep(5)
if (GPIO.input(4) == 0):
player.load(vidc)
print("gpio 4")
player.play()
#sleep(5)
GPIO.cleanup()
looks like this was a bug in the wrapper
https://github.com/willprice/python-omxplayer-wrapper/issues/85

List index out of range error in splunk API call

I am trying to get the result of a search query from splunk. But when i try to get the session key, i am getting the following error.
Traceback (most recent call last):
File "splunkenter.py", line 18, in <module>
sessionkey = minidom.parseString(servercontent).getElementsByTagName('sessionKey')[0].childNodes[0].nodeValue
IndexError: list index out of range
splunkenter.py:
import urllib
import httplib2
import time
import re from time
import localtime,strftime
from xml.dom import minidom
import json
baseurl = 'abc.haihd.com:8000'
username = 'xxxxx'
password = 'xxxxx'
myhttp = httplib2.Http()
#Step 1: Get a session key
servercontent = myhttp.request(baseurl + '/services/auth/login', 'POST', headers={}, body=urllib.urlencode({'username':username, 'password':password}))[1]
sessionkey =
minidom.parseString(servercontent).getElementsByTagName('sessionKey')[0].childNo‌​des[0].nodeValue
print "====>sessionkey: %s <====" % sessionke
Can anybody tell me where is the problem lying. I am very new to API's.