extract API responses for multiple accounts - api

I am new to IBPy and really interested to know how to get account parameters for multiple accounts. The code below only gives me the output in the command line but I couldn't figure out how to store those information into a dataframe. the function updateAccountValue() doesn't have a unique id that i can use as index for the dataframe.
from ib.opt import Connection, message
import pandas as pd
import time
def error_handler(msg):
"""Handles the capturing of error messages"""
print "Server Error: %s" % msg
def updateAccount_handler(msg):
if msg.key in ['AccountType','NetLiquidation']:
print msg.key, msg.value
if __name__ == "__main__":
conn = Connection.create(port=7497, clientId = 93)
conn.connect()
conn.register(error_handler, 'Error')
conn.register(updateAccount_handler,message.updateAccountValue)
# we can do a loop, i am just giving a simple example for 2 accounts
conn.reqAccountUpdates(1,"Uxxxx008")
time.sleep(0.5)
conn.reqAccountUpdates(1,"Uxxxx765")
time.sleep(0.5)
conn.disconnect()
the output looks like this:
Server Version: 76
TWS Time at connection:20150729 12:46:56 EST
Server Error: <error id=-1, errorCode=2104, errorMsg=Market data farm connection is OK:usfuture>
Server Error: <error id=-1, errorCode=2104, errorMsg=Market data farm connection is OK:usfuture.us>
Server Error: <error id=-1, errorCode=2104, errorMsg=Market data farm connection is OK:usfarm>
Server Error: <error id=-1, errorCode=2106, errorMsg=HMDS data farm connection is OK:ushmds>
AccountType INDIVIDUAL
NetLiquidation 2625.24
AccountType IRA-ROTH NEW
NetLiquidation 11313.83
End goal is to store these information into a pandas dataframe format with an unique id of account number.

Ok, The IBpy works in sender/Receiver architecture. What I would like suggest you is to have a global variable to store the account information.
from ib.opt import Connection, message
import pandas as pd
import time
class account_update :
acc_info = [] # list to store all your account info
# you can have list,tuple,panda dataframe or any data strucure I am using normal list to demonstrate
def error_handler(self,msg):
"""Handles the capturing of error messages"""
print "Server Error: %s" % msg
def updateAccount_handler(self,msg):
if msg.key in ['AccountType','NetLiquidation']:
self.acc_info.append(msg.key, msg.value)
def main(self):
conn = Connection.create(port=7497, clientId = 93)
conn.connect()
conn.register(error_handler, 'Error')
conn.register(updateAccount_handler,message.updateAccountValue)
conn.reqAccountUpdates(1,"Uxxxx008")
time.sleep(0.5)
conn.reqAccountUpdates(1,"Uxxxx765")
time.sleep(0.5)
conn.disconnect()
if __name__ == "__main__" :
account_update().main()
So the logic is simple,create a global variable and make your response handler method to update the global variable whenever it receives a response. Hope it works :)

Related

how to read the console output in python without executing any command

I have an API which gets the success or error message on console.I am new to python and trying to read the response. Google throws so many examples to use subprocess but I dont want to run,call any command or sub process. I just want to read the output after below API call.
This is the response in console when success
17:50:52 | Logged in!!
This is the github link for the sdk and documentation
https://github.com/5paisa/py5paisa
This is the code
from py5paisa import FivePaisaClient
email = "myemailid#gmail.com"
pw = "mypassword"
dob = "mydateofbirth"
cred={
"APP_NAME":"app-name",
"APP_SOURCE":"app-src",
"USER_ID":"user-id",
"PASSWORD":"pw",
"USER_KEY":"user-key",
"ENCRYPTION_KEY":"enc-key"
}
client = FivePaisaClient(email=email, passwd=pw, dob=dob,cred=cred)
client.login()
In general it is bad practice to get a value from STDOUT. There are some ways but it's pretty tricky (it's not made for it). And the problem doesn't come from you but from the API which is wrongly designed, it should return a value e.g. True or False (at least) to tell you if you logged in, and they don't do it.
So, according to their documentation it is not possible to know if you're logged in, but you may be able to see if you're logged in by checking the attribute client_code in the client object.
If client.client_code is equal to something then it should be logged in and if it is equal to something else then not. You can try comparing it's value when you successfully login or when it fails (wrong credential for instance). Then you can put a condition : if it is None or False or 0 (you will have to see this by yourself) then it is failed.
Can you try doing the following with a successful and failed login:
client.login()
print(client.client_code)
Source of the API:
# Login function :
# (...)
message = res["body"]["Message"]
if message == "":
log_response("Logged in!!")
else:
log_response(message)
self._set_client_code(res["body"]["ClientCode"])
# (...)
# _set_client_code function :
def _set_client_code(self, client_code):
try:
self.client_code = client_code # <<<< That's what we want
except Exception as e:
log_response(e)
Since this questions asks how to capture "stdout" one way you can accomplish this is to intercept the log message before it hits stdout.
The minimum code to capture a log message within a Python script looks this:
#!/usr/bin/env python3
import logging
logger = logging.getLogger(__name__)
class RequestHandler(logging.Handler):
def emit(self, record):
if record.getMessage().startswith("Hello"):
print("hello detected")
handler = RequestHandler()
logger.addHandler(handler)
logger.warning("Hello world")
Putting it all together you may be able to do something like this:
import logging
from py5paisa import FivePaisaClient
email = "myemailid#gmail.com"
pw = "mypassword"
dob = "mydateofbirth"
cred={
"APP_NAME":"app-name",
"APP_SOURCE":"app-src",
"USER_ID":"user-id",
"PASSWORD":"pw",
"USER_KEY":"user-key",
"ENCRYPTION_KEY":"enc-key"
}
client = FivePaisaClient(email=email, passwd=pw, dob=dob,cred=cred)
class PaisaClient(logging.Handler):
def __init__():
self.loggedin = False # this is the variable we can use to see if we are "logged in"
def emit(self, record):
if record.getMessage().startswith("Logged in!!")
self.loggedin = True
def login():
client.login()
logging.getLogger(py5paisa) # get the logger for the py5paisa library
# tutorial here: https://betterstack.com/community/questions/how-to-disable-logging-from-python-request-library/
logging.basicConfig(handlers=[PaisaClient()], level=0, force=True)
c = PaisaClient()
c.login()

Having trouble running multiple functions in Asyncio

I'm a novice programmer looking to build a script that reads a list of leads from Google Sheets and then messages them on telegram. I want to separate out the first and second message by three days thats why im separating the methods.
import asyncio
from telethon import TelegramClient
from telethon.errors.rpcerrorlist import SessionPasswordNeededError
import logging
from async_class import AsyncClass, AsyncObject, task, link
from sheetdata import *
logging.basicConfig(format='[%(levelname) 5s/%(asctime)s] %(name)s: %(message)s',
level=logging.WARNING)
api_id = id
api_hash = 'hash'
phone='phone'
username='user'
client = TelegramClient(username, api_id, api_hash)
#already been touched once
second_touch_array=[]
#touched twice
third_touch_array=[]
async def messageCount(userid):
count = 0
async for message in client.iter_messages(userid):
count+=1
yield count
async def firstMessage():
#clear prospects from array and readData from google sheet
clearProspects()
readData(sheet)
#loop through prospects and send first message
for user in prospect_array:
#check if we already messaged the prospect. If we haven't, execute function
if(messageCount(user.id) == 0):
await client.send_message(user.id, 'Hi')
second_touch_array.append(prospect(user.name, user.company, user.id))
print("First Message Sent!")
else:
print("Already messaged!")
async def secondMessage():
for user in second_touch_array:
if(messageCount(user.id) == 1):
await client.send_message(user.id, 'Hello')
third_touch_array.append(prospect(user.name, user.company, user.id))
print("Second Message Sent!")
else:
print("Prospect has already replied!")
async def main():
# Getting information about yourself
me = await client.get_me()
await firstMessage()
await secondMessage()
for user in second_touch_array:
print(user.name, user.company, user.id)
with client:
client.loop.run_until_complete(main())
Anyways, when I run my code i'm successfully getting the "Already Messaged!" print statement in my terminal from the firstMessage function.
This is good - it's detecting I've already messaged the one user on my Google Sheets list; however, my second function isn't being called at all. I'm not getting any print statement and every time I try to print the contents of the second array nothing happens.
If you have any advice it would be greatly appreciated :)

I want to get the excel file from the data frame created which automatically changes as written in the code

i have tried two methods and both showing different location as given by me in this image
apikey='abcd'
import pandas as pd
from alpha_vantage.timeseries import TimeSeries
import time
ts=TimeSeries(key=apikey,output_format='pandas')
data,metadata=ts.get_intraday(symbol='name',interval='1min',outputsize='full')
data
while True:
data, metadata=ts.get_intraday(symbol='TCS',interval='1min',outputsize='full')
data.to_excel('livedat.xlsx')
time.sleep(60)
The code is running properly but I don't know how to get the data file in excel.
imp- the method should get the file which is updated timely i.e 1min automaticaly.
Also i am using IBM watson studio to write the code.
I am not familiar with the alpha_vantage wrapper that you are using however this is how i would perform your question. The code works and i have included comments.
To get the file in the python script i would do pd.read_excel(filepath).
import requests
import pandas as pd
import time
import datetime
# Your API KEY and the URL we will request from
API_KEY = "YOUR API KEY"
url = "https://www.alphavantage.co/query?"
def Generate_file(symbol="IBM", interval="1min"):
# URL parameters
parameters = {"function": "TIME_SERIES_INTRADAY",
"symbol": symbol,
"interval": interval,
"apikey": API_KEY,
"outputsize": "compact"}
# get the json response from AlphaVantage
response = requests.get(url, params=parameters)
data = response.json()
# filter the response to only get the time series data we want
time_series_interval = f"Time Series ({interval})"
prices = data[time_series_interval]
# convert the filtered reponse to a Pandas DataFrame
df = pd.DataFrame.from_dict(prices, orient="index").reset_index()
df = df.rename(columns={"index": time_series_interval})
# create a timestampe for our excel file. So that the file does not get overriden with new data each time.
current_time = datetime.datetime.now()
file_timestamp = current_time.strftime("%Y%m%d_%H.%M")
filename = f"livedat_{file_timestamp}.xlsx"
df.to_excel(filename)
# sent a limit on the number of calls we make to prevent infinite loop
call_limit = 3
number_of_calls = 0
while(number_of_calls < call_limit):
Generate_file() # our function
number_of_calls += 1
time.sleep(60)

web2py rest api endpoint gives invalid path output

I have made a web2py web application. The api endpoints exposed are as follows.
"/comments[comments]"
"/comments/id/{comments.id}"
"/comments/id/{comments.id}/:field"
"/comments/user-id/{comments.user_id}"
"/comments/user-id/{comments.user_id}/:field"
"/comments/date-commented/{comments.date_commented.year}"
"/comments/date-commented/{comments.date_commented.year}/:field"
"/comments/date-commented/{comments.date_commented.year}/{comments.date_commented.month}"
"/comments/date-commented/{comments.date_commented.year}/{comments.date_commented.month}/:field"
"/comments/date-commented/{comments.date_commented.year}/{comments.date_commented.month}/{comments.date_commented.day}"
"/comments/date-commented/{comments.date_commented.year}/{comments.date_commented.month}/{comments.date_commented.day}/:field"
"/comments/date-commented/{comments.date_commented.year}/{comments.date_commented.month}/{comments.date_commented.day}/{comments.date_commented.hour}"
"/comments/date-commented/{comments.date_commented.year}/{comments.date_commented.month}/{comments.date_commented.day}/{comments.date_commented.hour}/:field"
"/comments/date-commented/{comments.date_commented.year}/{comments.date_commented.month}/{comments.date_commented.day}/{comments.date_commented.hour}/{comments.date_commented.minute}"
"/comments/date-commented/{comments.date_commented.year}/{comments.date_commented.month}/{comments.date_commented.day}/{comments.date_commented.hour}/{comments.date_commented.minute}/:field"
"/comments/date-commented/{comments.date_commented.year}/{comments.date_commented.month}/{comments.date_commented.day}/{comments.date_commented.hour}/{comments.date_commented.minute}/{comments.date_commented.second}"
"/comments/date-commented/{comments.date_commented.year}/{comments.date_commented.month}/{comments.date_commented.day}/{comments.date_commented.hour}/{comments.date_commented.minute}/{comments.date_commented.second}/:field"
"/comments/complaint-id/{comments.complaint_id}"
"/comments/complaint-id/{comments.complaint_id}/:field"
The comments model is as follows
models/db.py
db.define_table(
'comments',
Field('user_id', db.auth_user),
Field('comment_made', 'string', length=2048),
Field('date_commented', 'datetime', default=datetime.now),
Field('complaint_id', db.complaints),
Field('detailed_status', 'string', length=2048),
)
I have been successful in retriving a single comment via the following request
localhost:8000/api/comments/id/1.json
Now I wish to retrieve all the comments. I am not able to figure out how to use /comments[comments] to retrieve all comments.?
I have tried
localhost:8000/api/comments.json
But it gives an output with "invalid path"
I have realized requests such as http://localhost:8000/api/comments/complaint-id/1.json
also give "invalid path" as output.
Please help.
EDIT:
Controllers/default.py
#request.restful()
def api():
response.view='generic.' + request.extension
def GET(*args,**kargs):
patterns='auto'
parser = db.parse_as_rest(patterns,args,kargs)
if parser.status == 200:
return dict(content=parser.response)
else:
raise HTTP(parser.status,parser.error)
def POST(*args,**kargs):
return dict()
return locals()
routes.py in the main web2py folder to change the default application:
routers = dict(
BASE = dict(
default_application='GRS',
)
)
Another observation:
I added another endpoint as below:
def comm():
"""" Comments api substitute"""
rows=db().select(db.comments.ALL) ## this line shows error
# rows = db(db.comments.id > 0).select()
#rows=[[5,6],[3,4],[1,2]]
#for row in rows:
# print row.id
return dict(message=rows)
Even now I am not able to retrieve all comments with "/comm.json". This gives a web2py error ticket which says "need more than 1 value to unpack" on the line "rows=db.select(db.comments.ALL)". Are the above invalid path and this error related in someway?

See all of the unique IDs related to a vmware virtual machine

I want to see all of the unique IDs that are specific for a virtual machine such as:
hardware ID, CPU ID, UUID , Mac address and etc.
could anybody please help me to find these IDs??
I can help you finding some of these. For rest of the things you have to search the doc.
Install pyVmomi and run the following code.
EDIT: Changed the code to run on esx host. Simply run it by python .py
Now to know how this code is working. You have to learn Manged Objects. For example here we are working with the Manged Object vm and this object has many properties listed in the doc. So to retrieve uuid of a vm we are invoking vm.config.uuid. Regarding other details you have to go through the VirtualMachine object see what all properties you nedd.
import sys
import atexit
import time
from pyVmomi import vim, vmodl
from pyVim.connect import Disconnect
from pyVim import connect
inputs = {'esx_ip': '15.22.10.10',
'esx_password': 'Password123',
'esx_user': 'root',
'vm_name': 'ubuntu',
}
def wait_for_task(task, actionName='job', hideResult=False):
"""
Waits and provides updates on a vSphere task
"""
while task.info.state == vim.TaskInfo.State.running:
time.sleep(2)
if task.info.state == vim.TaskInfo.State.success:
if task.info.result is not None and not hideResult:
out = '%s completed successfully, result: %s' % (actionName, task.info.result)
print out
else:
out = '%s completed successfully.' % actionName
print out
else:
out = '%s did not complete successfully: %s' % (actionName, task.info.error)
raise task.info.error
print out
return task.info.result
def get_obj(content, vimtype, name):
"""
Get the vsphere object associated with a given text name
"""
obj = None
container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
for c in container.view:
if c.name == name:
obj = c
break
return obj
def main():
si = None
try:
print "Trying to connect ..."
si = connect.Connect(inputs['vcenter_ip'], 443, inputs['vcenter_user'], inputs['vcenter_password'])
except IOError, e:
pass
if not si:
print "Cannot connect to specified host using specified username and password"
sys.exit()
print "Connected to vcenter!"
atexit.register(Disconnect, si)
content = si.RetrieveContent()
# Get the VirtualMachine Object
vm = get_obj(content, [vim.VirtualMachine], inputs['vm_name'])
print "GuestID: ", vm.config.guestId
print "UUID: ", vm.config.uuid
print "Version: ", vm.config.version
for device in vm.config.hardware.device:
if isinstance(device, vim.vm.device.VirtualEthernetCard):
print "MAC Address: ", device.macAddress
#Example of changing UUID:
new_uuid = '423ffff0-5d62-d040-248c-4538ae2c734f'
vmconf = vim.vm.ConfigSpec()
vmconf.uuid = new_uuid
task = vm.ReconfigVM_Task(vmconf)
wait_for_task(task, si)
print "Successfully changed UUID"
print "New UUID: ", vm.config.uuid
if __name__ == "__main__":
main()