cx_oracle ORA-01036: illegal variable name/number - pandas

I have this group of python scripts where I do an API call, merge the resulting df together, then output to a table in an Oracle db. This exact script is working perfectly in three other scripts configured same, except a different API, but in this particular script, this error is getting thrown. I read up on binds, but I can't see how I'm doing it incorrectly for a tuple. Thanks in advance for your time.
sf_joined = pd.merge(sf_opp, sf_account,on=["CustID","CustID"])
# sf_joined.to_csv('sf_joined.csv', index=False)
# sf_types = sf_joined.dtypes
# print(sf_types)
char_columns = sf_joined.select_dtypes(include=['object']).columns
for col in char_columns:
if col not in ['rundate','Amount','Estimated_GC','Probability','MDC','DaysOpen']:
sf_joined[col] = sf_joined[col].fillna('')
# sf_joined[col] = sf_joined[col].map(lambda x: x.encode('utf-8'))
sf_joined[col] = sf_joined[col].map(lambda x: x[:1000])
pw = '****'
db_con = cx_Oracle.connect('mktg', pw, "prd-bia-db-***.o******.com:1521/BIPRD", encoding = "UTF-8", nencoding = "UTF-8")
cur = db_con.cursor()
print(db_con.version)
cur.execute('drop table cs_salesforce')
create_opps = """create table cs_salesforce(
rundate date,
sfoppid varchar(500)
)
"""
cur.execute(create_opps)
all_opps = []
for x in sf_joined.itertuples():
all_opps.append(x[1:])
insert_statement = """insert into cs_salesforce(rundate,sfoppid)values(:1, :2)"""
cur.executemany(insert_statement, all_opps)
db_con.commit()
Error:
runfile('C:/python_scripts_prod/cs_salesforce.py', wdir='C:/python_scripts_prod')
18.3.0.0.0
Traceback (most recent call last):
File "C:\python_scripts_prod\cs_salesforce.py", line 163, in <module>
cur.executemany(insert_statement, all_opps)
DatabaseError: ORA-01036: illegal variable name/number

Related

Python Dbf append to memory indexed table fails

I'm using Python dbf-0.99.1 library from Ethan Furman. This approach to add record to table fails:
tab = dbf.Table( "MYTABLE" )
tab.open(mode=dbf.READ_WRITE)
idx = tab.create_index(lambda rec: (rec.id if not is_deleted(rec) else DoNotIndex ) ) # without this, append works
rec = { "id":id, "col2": val2 } # some values, id is numeric and is not None
tab.append( rec ) # fails here
My table contains various character and numeric columns. This is just an example. The exceptions is:
line 5959, in append
newrecord = Record(recnum=header.record_count, layout=meta, kamikaze=kamikaze)
line 3102, in __new__
record._update_disk()
line 3438, in _update_disk
index(self)
line 7550, in __call__
vindex = bisect_right(self._values, key)
TypeError: '<' not supported between instances of 'NoneType' and 'int'
Any help appreciated. Thanks.
EDIT: Here is testing script
import dbf
from dbf import is_deleted, DoNotIndex
tab = dbf.Table('temptable', "ID N(12,0)" )
tab.open(mode=dbf.READ_WRITE)
rc = { "id":1 }
tab.append( rc ) # need some data without index first
idx = tab.create_index(lambda rec: (rec.id if not is_deleted(rec) else DoNotIndex ) )
rc = { "id":2 }
tab.append( rc ) # fails here

I get this error when i try to use Wolfram Alpha in VS code python ValueError: dictionary update sequence element #0 has length 1; 2 is required

This is my code
import wolframalpha
app_id = '876P8Q-R2PY95YEXY'
client = wolframalpha.Client(app_id)
res = client.query(input('Question: '))
print(next(res.results).text)
the question I tried was 1 + 1
and i run it and then i get this error
Traceback (most recent call last):
File "c:/Users/akshi/Desktop/Xander/Untitled.py", line 9, in <module>
print(next(res.results).text)
File "C:\Users\akshi\AppData\Local\Programs\Python\Python38\lib\site-packages\wolframalpha\__init__.py", line 166, in text
return next(iter(self.subpod)).plaintext
ValueError: dictionary update sequence element #0 has length 1; 2 is required
Please help me
I was getting the same error when I tried to run the same code.
You can refer to "Implementing Wolfram Alpha Search" section of this website for better understanding of how the result was extracted from the dictionary returned.
https://medium.com/#salisuwy/build-an-ai-assistant-with-wolfram-alpha-and-wikipedia-in-python-d9bc8ac838fe
Also, I tried the following code by referring to the above website....hope it might help you :)
import wolframalpha
client = wolframalpha.Client('<your app_id>')
query = str(input('Question: '))
res = client.query(query)
if res['#success']=='true':
pod0=res['pod'][0]['subpod']['plaintext']
print(pod0)
pod1=res['pod'][1]
if (('definition' in pod1['#title'].lower()) or ('result' in pod1['#title'].lower()) or (pod1.get('#primary','false') == 'true')):
result = pod1['subpod']['plaintext']
print(result)
else:
print("No answer returned")

Unable to use 'read_sql' to call a SQL query class

I am trying to pull results from the database with the following code:
import pandas as pd
import pyodbc
class DataManagement(object):
def __init__(self, database = None, server=None, trusted_connection=True, database_driver=ODBC_SQL2005_2012, uid=None, pwd=None):
self.server = server
self.database = database
self.uid = uid
self.pwd = pwd
# Use default server name none supplied - assumed to be localhost
if self.server is None:
self.server = SERVER
if self.database is None:
self.database = DATABASE
# Use default sql credentials if none provided
if self.uid is None or self.pwd is None:
self.uid = DEFAULT_UID
self.pwd = DEFAULT_PASSWORD
if trusted_connection:
self.connectionstring = "DRIVER={0};SERVER={1};DATABASE={2};Trusted_Connection=yes;".format(database_driver, self.server, self.database)
else:
self.connectionstring = 'DRIVER={0};SERVER={1};DATABASE={2};UID={3};PWD={4};'.format(database_driver, self.server, self.database, uid, pwd)
self.connection = pyodbc.connect(self.connectionstring)
self.cursor = self.connection.cursor()
def __enter__(self):
return self
def __exit__(self, ctx_type, ctx_value, ctx_traceback):
self.connection.commit()
self.connection.close()
qq = DataManagement()
sql =("select * from ***** ")
data_df = pd.read_sql(sql, qq)
I get an error:
Traceback (most recent call last):
File "<ipython-input-94-453876631fe0>", line 3, in <module>
data_df = pd.read_sql(sql, qq)
File "***\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\io\sql.py", line 380, in read_sql
chunksize=chunksize)
File "***\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\io\sql.py", line 1468, in read_query
cursor = self.execute(*args)
File "***\AppData\Local\Continuum\anaconda3\lib\site-packages\pandas\io\sql.py", line 1426, in execute
cur = self.con.cursor()
TypeError: 'pyodbc.Cursor' object is not callable
I saw a similar question at TypeError: 'pyodbc.Cursor' object is not callable (Python 3.6) but unable to get an answer from there.
I got it to work by editing the class from
self.cursor = self.connection.cursor()
into
self.cursor = self.connection.cursor

Python: index out of range but I can't see why

I'm trying to code a program that opens a file, creates a list that contains each line of this file, and removes some words from this list. I have an Index Out of Range error.
#! /usr/bin/python3
# open the file
f = open("test.txt", "r")
# a list that contains each line of my file (without \n)
lines = f.read().splitlines()
# close the file
f.close()
# some words I want to delete from the file
data = ["fire", "water"]
# for each line of the file...
for i in range(len(lines)):
# if this line is in [data]
if lines[i] in data:
# delete this line from [data]
print(lines[i])
del lines[i]
This is my text file:
sun
moon
*
fire
water
*
metal
This is my output:
fire
Traceback (most recent call last):
File "debug.py", line 16, in <module>
if lines[i] in data:
IndexError: list index out of range
I'll address the out of index error first.
What's happening, in your first post, is that it takes the length of the list
which is 7, so len(lines) = 7 which will result in range(7) = (0,7) which will do this:
lines[0] = sun
lines[1] = moon
lines[2] = *
lines[3] = fire
lines[4] = water
lines[5] = *
lines[6] = metal
=>
lines[0] = sun
lines[1] = moon
lines[2] = *
deleted lines[3] = fire
lines[3] = water
lines[4] = *
lines[5] = metal
If you now delete lines[3] it will still try to iterate over lines[6] although it won't exist anymore.
It will also continue to iterate over the next index lines[4] so water will never be checked, it is absorbed.
I hope this helps and I suggest you do this instead:
# open the file
f = open("test.txt", "r")
# a list that contains each line of my file (without \n)
lines = f.read().splitlines()
# close the file
f.close()
# some words I want to delete from the file
data = ["fire", "water"]
for i in data:
if i in lines:
lines.remove(i)
print(lines)
Output:
['sun', 'moon', '', '', 'metal']

pypyodbc execute returns list index out of range error

I have a function that runs 3 queries and returns the result of the last (using the previous ones to create the last) when I get to the 3rd query, it get a list index our of range error. I have ran this exact query as the first query (with manually entered variables) and it worked fine.
This is my code:
import pypyodbc
def sql_conn():
conn = pypyodbc.connect(r'Driver={SQL Server};'
r'Server=HPSQL31\ni1;'
r'Database=tq_hp_prod;'
r'Trusted_Connection=yes;')
cursor = conn.cursor()
return conn, cursor
def get_number_of_jobs(ticket):
# Get Connection
conn, cursor = sql_conn()
# Get asset number
sqlcommand = "select top 1 item from deltickitem where dticket = {} and cat_code = 'Trq sub'".format(ticket)
cursor.execute(sqlcommand)
asset = cursor.fetchone()[0]
print(asset)
# Get last MPI date
sqlcommand = "select last_test from prevent where item = {} and description like '%mpi'".format(asset)
cursor.execute(sqlcommand)
last_recal = cursor.fetchone()[0]
print(last_recal)
# Get number of jobs since last recalibration
sqlcommand = """select count(i.item)
from deltickhdr as d
join deltickitem as i
on d.dticket = i.dticket
where i.start_rent >= '2017-03-03 00:00:00'
and i.meterstart <> i.meterstop
and i.item = '002600395'""" #.format(last_recal, asset)
cursor.execute(sqlcommand)
num_jobs = cursor.fetchone()[0]
print(num_jobs)
cursor.close()
conn.close()
return num_jobs
ticketnumber = 14195 # int(input("Ticket: "))
get_number_of_jobs(ticketnumber)
Below is the error(s) i get when i get to the 3rd cursor.execute(sqlcommand)
Traceback (most recent call last):
File "C:\Program Files\JetBrains\PyCharm Community Edition 2016.3.2\helpers\pydev\pydevd.py", line 1596, in <module>
globals = debugger.run(setup['file'], None, None, is_module)
File "C:\Program Files\JetBrains\PyCharm Community Edition 2016.3.2\helpers\pydev\pydevd.py", line 974, in run
pydev_imports.execfile(file, globals, locals) # execute the script
File "C:\Program Files\JetBrains\PyCharm Community Edition 2016.3.2\helpers\pydev\_pydev_imps\_pydev_execfile.py", line 18, in execfile
exec(compile(contents+"\n", file, 'exec'), glob, loc)
File "C:/Users/bdrillin/PycharmProjects/Torque_Turn_Data/tt_sub_ui.py", line 56, in <module>
get_number_of_jobs(ticketnumber)
File "C:/Users/bdrillin/PycharmProjects/Torque_Turn_Data/tt_sub_ui.py", line 45, in get_number_of_jobs
cursor.execute(sqlcommand)
File "C:\ProgramData\Anaconda3\lib\site-packages\pypyodbc.py", line 1470, in execute
self._free_stmt(SQL_CLOSE)
File "C:\ProgramData\Anaconda3\lib\site-packages\pypyodbc.py", line 1994, in _free_stmt
check_success(self, ret)
File "C:\ProgramData\Anaconda3\lib\site-packages\pypyodbc.py", line 1007, in check_success
ctrl_err(SQL_HANDLE_STMT, ODBC_obj.stmt_h, ret, ODBC_obj.ansi)
File "C:\ProgramData\Anaconda3\lib\site-packages\pypyodbc.py", line 972, in ctrl_err
state = err_list[0][0]
IndexError: list index out of range
Any help would be great
I've had the same error.
Even though I haven't come to the definite conclusion about what this error means I thought my guessing might help anyone else ending up here.
In my case, the problem was a conflict with a datatype length (NVARCHAR(24) and CHAR(10)).
So I guess this IndexError in ctrl_err function just means there is an error in your SQL code that pypyodbc does not know how to handle.
I know this is not much of an answer, but I know it would have saved me a couple of hours had I known this was not some bug in pypyodbc but an inconsistency in the data I was inserting.
Kind regards,
Luka