Handling errors within loops through exceptions - error-handling

Tried my first python program to read temp sensor and output to influxdb
Occasionally temp sensor gives error "IndexError: list index out of range" and loop ends
I want loop to wait 15 seconds on this error and then continue the loop (sensor usually corrects itself by then on the next read)
My code:
import os
import glob
import time
import urllib
import urllib2
import httplib
import json
from influxdb import InfluxDBClient
client = InfluxDBClient(host='192.168.1.7', port=8086)
#client.get_list_database()
client.switch_database('influxdb1')
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
base_dir = '/sys/devices/w1_bus_master1/'
device_folder = glob.glob(base_dir + '28*')[0]
while True:
device_file = device_folder + '/w1_slave'
def read_temp_raw():
f = open(device_file, 'r')
lines = f.readlines()
f.close()
return lines
def read_temp():
lines = read_temp_raw()
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
temp_c = float(temp_string) / 1000.0
return temp_c
temp = float(read_temp())
json_body = [
{
"measurement": "YOUR_MEASUREMENT",
"tags": {
"Device": "YOUR_DEVICE",
"ID": "YOUR_ID"
},
"fields": {
"outside_temp": temp,
}
}
]
client.write_points(json_body)
time.sleep(60)
******************************************************
which works ok :)
When I edit the code to catch the exception.....
******************************************************
while True:
except IndexError:
time.sleep(15)
continue
device_file = device_folder + '/w1_slave' # store the details
def read_temp_raw():
f = open(device_file, 'r')
lines = f.readlines() # read the device details
f.close()
return lines
def read_temp():
lines = read_temp_raw()
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
temp_c = float(temp_string) / 1000.0
return temp_c
temp = float(read_temp())
json_body = [
{
"measurement": "YOUR_MEASUREMENT",
"tags": {
"Device": "YOUR_DEVICE",
"ID": "YOUR_ID"
},
"fields": {
"outside_temp": temp,
}
}
]
client.write_points(json_body)
time.sleep(60)
************************************************************
I get following error...
File "temptoinfluxdb2.py", line 22
except IndexError:
^
SyntaxError: invalid syntax
Where am i going wrong please?

You will always need to use the except block in combination with a try block.
So the code in the try block is executed until an exception (in that case IndexError) occurs.
try:
# Execution block
except IndexError:
# Error handling
You could also use a more general approach with except Exception as e, which catches not just the IndexError but any exception.
Check the official documentation for further information.

Related

Slack automate calendar events

I'm looking for a way to automate the creation of calendar events. I'm part of multiple spaces in my school and they keep on posting some events that are happening on a regular basis.
I was wondering is there's a way to automate these calendar events. I want to write a script with Slack api's that can read the messages from all the spaces I'm part of and scan them to see if there's any event related information and create a new calendar event in my google calendars. I want to run this at the end of the day on all the messages from all the spaces.
from __future__ import print_function
import os
import json
import pprint
import time
import parsedatetime
from datetime import datetime
from datetime import timedelta
from slack_sdk import WebClient
from slack_sdk.errors import SlackApiError
from google.oauth2.credentials import Credentials
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
def get_google_service():
creds = None
SCOPES = ['https://www.googleapis.com/auth/calendar']
if os.path.exists('token.json'):
creds = Credentials.from_authorized_user_file('token.json', SCOPES)
return build('calendar', 'v3', credentials=creds)
def send_google_calendar_invite(service, channel_name, start_time, end_time):
try:
# f = open("template.json", "r")
# template_data = f.read()
template_data = '''
{
"summary": "event_name",
"location": "event_location",
"description": "event_description",
"start": {
"dateTime": "event_start_time",
"timeZone": "America/Los_Angeles"
},
"end": {
"dateTime": "event_end_time",
"timeZone": "America/Los_Angeles"
}
}
'''
template_data = template_data.replace('event_name', channel_name)
template_data = template_data.replace('event_location', channel_name+'-meeting')
template_data = template_data.replace('event_description', channel_name+'-desrpition')
template_data = template_data.replace('event_start_time', start_time)
template_data = template_data.replace('event_end_time', end_time)
json_object = json.loads(template_data)
json_formatted_str = json.dumps(json_object, indent=2)
print(json_formatted_str)
event = service.events().insert(calendarId='primary', body=json_object).execute()
print('Event created: %s' % (event.get('htmlLink')))
except HttpError as error:
print('An error occurred: %s' % error)
def read_slack_messages():
channel_id = "C04QL76V21X"
try:
lastHourDateTime = datetime.now() - timedelta(hours=24)
client = WebClient(token=open("secrets.txt", "r").read())
conversation_history = client.conversations_history(channel=channel_id, oldest=time.mktime(lastHourDateTime.timetuple()))
channel_info_result = client.conversations_info(channel=channel_id)
channel_name = channel_info_result['channel']['name']
conversation_messages = conversation_history["messages"]
print("{} messages found in {}".format(len(conversation_messages), id))
# import pdb; pdb.set_trace();
service = get_google_service()
for message in conversation_messages[:2]:
chat_message = message['text']
try:
cal = parsedatetime.Calendar()
dates = cal.parse(chat_message)
print(dates)
start_time = time.strftime('%Y-%m-%dT%H:%M:%S-%000:00', (dates[0]))
end_time = start_time[:11]+f"{int(start_time[11:13])+1:02}"+start_time[13:]
print(chat_message, ' : ', start_time, ' ||| ', end_time)
send_google_calendar_invite(service, channel_name, start_time, end_time)
except TypeError as e:
print(' : Nope : ', e);
except SlackApiError as e:
print("Error getting conversation: {}".format(e))
if __name__ == '__main__':
read_slack_messages()

requests.exceptions.ConnectionError: ('Connection aborted.', RemoteDisconnected('Remote end closed connection without response'))

I'm trying to make an HTTP request from an API, i have 1000 cells and i made a request by batch of 10, when it arrives at the 16 iteration i get the error requests.exceptions.ConnectionError: ('Connection aborted.', RemoteDisconnected('Remote end closed connection without response'))
i tried this code below when i do it for 100 it works but if i wanted 1000 i get the error:
from typing import Self
import requests
import json
from math import ceil
import pandas as pd
import urllib3
urllib3.disable_warnings()
df = pd.DataFrame()
sector = ["16220T","16242M","16242N","16242O","16242R","16242S","16242T","16259M","16259N","16259O","16259R","16259S","16259T","16261M","16261N","16261O","16261R","16261S","16261T","16272M","16272N","16272O","16272R","16272S","16272T","16276M","16276N","16276O","16276R","16276S","16276T","16307M","16307N","16307R","16307S","16309M","16309N","16309O","16309R","16309S","16309T","16313M","16313R","16314M","16314N","16314O","16314R","16314S","16314T","16315M","16315R","16316M","16316R","16318M","16318R","163194M","163194N","163194O","163194R","163194S","163194T","163198M","163198N","163198O","163198R","163198S","163198T","163202M","163202N","163202O","163202R","163202S","163202T","161209M","161209N","161209R","161209S","L23_161209V","L23_161209Y","16322M","16322R","16329M","16329R","163300R","163300S","16330M","16330N","16330R","16330S","16333M","16333R","16345M","16345N","16345O","16345R","16345S","16345T","16346M","16346N","16346O","16346R","16346S","16346T","16355M","16355N","16355O","16355R","16355S","16355T","16360M","16360N","16360R","16360S","L23_16361U","L23_16361V","L23_16361W","L23_16361X","L23_16361Y","L23_16361Z","16367M","16367R","16370M","16370R","16374M","16374N","16374O","16374R","16374S","16374T","16375I","16375J","16375K","16375L","16375M","16375N","16375R","16375S","16375T","16375X","16375Y","16375Z","16375O"];
headers = {
'cognitiveswsecret': 'MjAyMmMwZ25pdGl2ZWFkY2ludGVncmF0aW9uMTIyMQ==',
'Content-Type': 'application/json'
}
i=0
for i in range(ceil(len(sector)/10)):
l=sector[i*10:(i+1)*10]
print(l)
payload = json.dumps({
"project": "atmdza_elte",
"pixelSize":7,
"parameters": {
"kpi": "dl_tput",
"aggregation": "avg",
"mdt":"1",
"startTime": 1672185600000,
"endTime": 1672264800000,
"temporalreso<lution":"daily",
"sector": l,
"spatialResolution" :"cell"
}
})
response = requests.post("https://c210417.cognitive-software.ericsson.net/api/export/traces/raster/atmdza/Ericsson/lte?apikey=PLDceVe8HHh-tc18HKmQcA", headers=headers, data=payload, verify = False)
df = pd.concat([df, pd.DataFrame.from_dict(response.json(), orient='columns')])
print("Done!")
df.to_csv("Generated_VDT_Cells.csv", sep=";", index=False)
print("Extraction done!")

Pool apply function hangs and never executes

I am trying to fetch Rally data by using its python library pyral. Sequentially the same code works, but its slow.
I thought of using python multiprocess package, however my pool.apply method gets stuck and never executes. I tried running it in Pycharm IDE as well as the windows cmd prompt.
import pandas as pd
from pyral import Rally
from multiprocessing import Pool, Manager
from pyral.entity import Project
def process_row(sheetHeaders: list, item: Project, L: list):
print('processing row : ' + item.Name) ## this print never gets called
row = ()
for header in sheetHeaders:
row.append(process_cell(header, item))
L.append(row)
def process_cell(attr, item: Project):
param = getattr(item, attr)
if param is None:
return None
try:
if attr == 'Owner':
return param.__getattr__('Name')
elif attr == 'Parent':
return param.__getattr__('ObjectID')
else:
return param
except KeyError as e:
print(e)
# Projects
# PortfolioItem
# User Story
# Hierarchical Req
# tasks
# defects
# -------------MAIN-----------------
def main():
# Rally connection
rally = Rally('rally1.rallydev.com', apikey='<my_key>')
file = 'rally_data.xlsx'
headers = {
'Project': ['Name', 'Description', 'CreationDate', 'ObjectID', 'Parent', 'Owner', 'State'],
}
sheetName = 'Project'
sheetHeaders = headers.get(sheetName)
p = Pool(1)
result = rally.get(sheetName, fetch=True, pagesize=10)
with Manager() as manager:
L = manager.list()
for item in result:
print('adding row for : ' + item.Name)
p.apply_async(func=process_row, args=(sheetHeaders, item, L)) ## gets stuck here
p.close()
p.join()
pd.DataFrame(L).to_excel(file, sheet_name=sheetName)
if __name__ == '__main__':
main()
Also tried without Manager list without any difference in the outcome
def main():
# Rally connection
rally = Rally('rally1.rallydev.com', apikey='<key>')
file = 'rally_data.xlsx'
headers = {
'Project': ['Name', 'Description', 'CreationDate', 'ObjectID', 'Parent', 'Owner', 'State'],
}
sheetName = 'Project'
sheetHeaders = headers.get(sheetName)
result = rally.get(sheetName, fetch=True, pagesize=10)
async_results = []
with Pool(50) as p:
for item in result:
print('adding row for : ' + item.Name)
async_results.append(p.apply_async(func=process_row, args=(sheetHeaders, item)))
res = [r.get() for r in async_results]
pd.DataFrame(res).to_excel(file, sheet_name=sheetName)
I dont know why, but replacing multiprocessing
with multiprocessing.dummy in the import statement worked.

youtube_dl video descriptions

I have a df containing a set of videoIDs from YT:
import pandas as pd
data = {'Order': ['1', '2', '3'],
'VideoID': ['jxwHmAoKte4', 'LsXM502SpiU','1I3f27iQ4pM']
}
df = pd.DataFrame (data, columns = ['Order','VideoID'])
print (df)
and want to download the video descriptions and save them in the same df in an extra column.
I tried to use youtube_dl in Jupyter this way:
import youtube_dl
def all_descriptions(URL):
videoID=df['VideoId']
URL = 'https://www.youtube.com/watch?v=' + videoID
ydl_opts = {
'forcedescription':True,
'skip_download': True,
'youtube-skip-dash-manifest': True,
'no_warnings': True,
'ignoreerrors': True
}
try:
youtube_dl.YoutubeDL(ydl_opts).download(URL)
return webpage
except:
pass
df['descriptions']=all_descriptions(URL)
I see the output of the code as text, but in df only "None" as text of the column.
Obviously I can't transport the output of the function to df in the proper way.
Can you suggest how to get it right?
Thank you in advance for help.
#perl
I modify the df to include two URLs that are causing two types of error:
import pandas as pd
data = {'Order': ['1', '2', '3', '4', '5'],
'VideoId': ['jxwHmAoKte4', 'LsXM502SpiU','1I3f27iQ4pM', 'MGQOX2rK5s', 'wNayw_E7lIA']
}
df = pd.DataFrame (data, columns = ['Order','VideoId'])
print (df)
Then I test it in the way you suggested, including my definition of ydl_opts:
videoID=df['VideoId']
URL = 'https://www.youtube.com/watch?v=' + videoID
ydl_opts = {
'forcedescription':True,
'skip_download': True,
'youtube-skip-dash-manifest': True,
'no_warnings': True,
'ignoreerrors': True
}
df['description'] = [
youtube_dl.YoutubeDL(ydl_opts).extract_info(
u, download=False)['description'] for u in URL]
df
Reaching to the first error I get the output:
TypeError: 'NoneType' object is not subscriptable
After that I replace 'forcedescription' in my code with 'extract_info':
def all_descriptions(URL):
videoID=df['VideoId']
URL = 'https://www.youtube.com/watch?v=' + videoID
ydl_opts = {
'forcedescription':True,
'skip_download': True,
'youtube-skip-dash-manifest': True,
'no_warnings': True,
'ignoreerrors': True
}
try:
youtube_dl.YoutubeDL(ydl_opts).download(URL)
return webpage
except:
pass
It skips all errors, but as the result there is nothing in the 'description'-column.
Any sugggestions?
You can use extract_info method:
df['description'] = [
youtube_dl.YoutubeDL().extract_info(
u, download=False)['description'] for u in URL]
df
Output:
Order VideoID description
0 1 jxwHmAoKte4 Bundesweit gelten sie nun ab heute, die schärf...
1 2 LsXM502SpiU Wie sicher ist der Impfstoff? Wäre eine Impfpf...
2 3 1I3f27iQ4pM Impfen ja oder nein, diese Frage stellen sich ...
P.S. The forcedescription parameter only prints the description to standard output, it doesn't return it
Update: extract_info returns None if it fails, so in case we have videos that may fail before getting the description from the info we can check that the info is not None:
ydl = youtube_dl.YoutubeDL(ydl_opts)
infos = [ydl.extract_info(u, download=False) for u in URL]
df['description'] = [
info['description'] if info is not None else ''
for info in infos]

python TypeError: expected string or buffer when parsing JSON from a file

I realize this problem has been answered for other folks but none of the threads are helping me solve it. I'm trying to parse a JSON structure and add all values in the sent_file when the keys match with the tweet_file. The error I'm getting
import sys
import json
def main():
sent_file = open(sys.argv[1])
tweet_file = open(sys.argv[2])
scores = {}
#tweet = {}
#tweet_text = {}
#hw()
#lines(sent_file)
#lines(tweet_file)
for line in sent_file:
term,score = line.split("\t")
scores[term] = int(score)
#print scores.items()
for tweets in tweet_file:
current_sent_value = 0
tweet = {} #this is a dict
#print type(tweets) str
tweet = json.loads(tweets)#[0] #this assignment changes tweet to a list. Why?
if 'text' in tweet:
tweet_text = {}
unicode_string = tweet['text']
encoded_string = unicode_string.encode('utf-8')
tweet_text = encoded_string.split()
for key in tweet_text:
for key in scores:
#print type(tweet_text) -- list
#print type(scores) --dict
if tweet_text.get(key) == scores.get(key): # get() does not work on a list. tweet_text is a list.
current_sent_value += scores(value)
print current_sent_value
if name == 'main':
main()
The error is here \assignment1\tweet_sentiment2.py", line 42, in main
if tweet_text.get(key) == scores.get(key): # get() does not work on a list. tweet_text is a list.
AttributeError: 'list' object has no attribute 'get'