Get data from DB2 with Flask Pyodbc with parameter - api

So, I was working on API with flask. The data is in DB2. I tried to connect with pyodbc as below
#app.route('/api/acrdkl/all', methods=['GET'])
def api_all():
conn = pyodbc.connect("DSN=AS400;UID=....;PWD=....")
cur = conn.cursor()
all_books = cur.execute(""" select trim(dkkdcb), trim(dkkdps), trim(dkcob), trim(dkureg), trim(dkbktg), trim(dkblrg), trim(dkthrg)
from simdta.ACRDKL where dkkdcb=1402 and dkblrg=10 and dkthrg=2020""")
rows = cur.fetchall()
result = []
for dt in rows:
result.append([x for x in dt])
return jsonify(result)
Result are shown as JSON.
But when I tried to use some parameter as below
#app.route('/api/acrdkl/filter', methods=['GET'])
def api_filter():
dkkdcb = request.args.get('DKKDCB', 0)
dkblrg = request.args.get('DKBLRG', 0)
dkthrg = request.args.get('DKTHRG', 0)
query = """selecttrim(dkkdcb),trim(dkkdps),trim(dkcob),trim(dkureg),
trim(dkbktg), trim(dkblrg), trim(dkthrg)
from simdta.ACRDKL WHERE """
conn = pyodbc.connect("DSN=AS400;UID=.....;PWD=.....")
cur = conn.cursor()
rows = cur.execute(query, [int(dkkdcb), int(dkblrg), int(dkthrg)])
rows.fetchall()
print("rows 2 ", rows)
result = []
for dt in rows:
result.append([x for x in dt])
return jsonify(results)
And I go to this http://127.0.0.1:5000/api/acrdkl/filter?DKKDCB=1402&DKBLRG=10&DKTHRG=2020 and it throws error like this
pyodbc.DataError: ('22023', '[22023] [Microsoft][ODBC DB2 Driver]Data
exception - SQLSTATE 22023, SQLCODE -302. SQLSTATE: 22023, SQLCODE:
-302 (-302) (SQLExecDirectW)')
How do I get the desired result? Where is my mistake? Any help would be appreciate. Thanks

I don't see that you are accessing the request data provided by Flask, e.g.:
dkbrlg=request.args.get('dkbrlg',0)

Related

Inserting values into postgresql table error using psycopg2

I have a postgresql table with this schema:
id terminated code
string boolean integer
I want to add values from a pandas dataframe using this code:
param_dic = {
"host" : "xxx",
"database" : "xxxx",
"user" : "xxxxx",
"password" : "xxxx"
}
def connect(params_dic):
""" Connect to the PostgreSQL database server """
conn = None
try:
# connect to the PostgreSQL server
print('Connecting to the PostgreSQL database...')
conn = psycopg2.connect(**params_dic)
except (Exception, psycopg2.DatabaseError) as error:
print(error)
sys.exit(1)
return conn
conn = connect(param_dic)
def single_insert(conn, insert_req):
""" Execute a single INSERT request """
cursor = conn.cursor()
try:
cursor.execute(insert_req)
conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
print("Error: %s" % error)
conn.rollback()
cursor.close()
return 1
cursor.close()
and then I am using:
for i in df.index:
query = """
INSERT into status(id, terminated, code) values('%s','%s','%s');
""" % (df['id'], df['terminated'], df['code'])
single_insert(conn, query)
# Close the connection
conn.close()
But I am getting this error msg:
Error: invalid input syntax for type boolean: "0 1
16 1
28 1
44 1
51 1
..
1604 1
1615 1
Can anyone help me with this?

Is it possible to put EXECUTE IMMEDIATE and USING into Variable in Python

This is a continuous of previous post .
How to get result from BigQuery based on user input parameters .
I tried to use EXECUTE IMMEDIATE and USING as these article say.
https://cloud.google.com/bigquery/docs/parameterized-queries
https://towardsdatascience.com/how-to-use-dynamic-sql-in-bigquery-8c04dcc0f0de
But when I run the sql , I got syntax error . I'd like to get my sql checked . I guess this error is caused by the line breaks but I want to do that for the readability . Sorry for my poor coding skill . Could you give me advice ??
I'm little bit worry about BigQuery doesn't support dynamic parameter in Python. Because the article above seems to use these statement in Console not in Python .
The error
File "/srv/main.py", line 14 SELECT EXISTS(SELECT 1
SyntaxError: invalid syntax
SQL
query = """EXECUTE IMMEDIATE format("""
SELECT EXISTS(SELECT 1
FROM `test-266778.conversion_log.conversion_log_2020*` as p
WHERE p.luid = #request_luid AND orderid != '' limit 1000)""")"""
USING "request_luid" as request_luid;
/home/user/api_dev/main.py
from flask import Flask, request, jsonify
from google.cloud import bigquery
app = Flask(__name__)
#app.route('/')
def get_request():
request_luid = request.args.get('luid') or ''
client = bigquery.Client()
query = """EXECUTE IMMEDIATE format("""
SELECT EXISTS(SELECT 1
FROM `test-266778.conversion_log.conversion_log_2020*` as p
WHERE p.luid = #request_luid AND orderid != '' limit 1000)""")"""
USING "request_luid" as request_luid;
job_config = bigquery.QueryJobConfig(
query_parameters=[
bigquery.ScalarQueryParameter("request_luid", "STRING", request_luid)
]
)
query_job = client.query(query, job_config=job_config)
query_res = query_job.result()
first_row = next(iter(query_job.result()))
for row in query_res:
return str(row)
#return jsonify({request_luid:query_res.total_rows})
if __name__ == "__main__":
app.run()
You can try this:
def get_request():
request_luid = request.args.get("luid") or ""
client = bigquery.Client()
query = """SELECT EXISTS(
SELECT 1
FROM `test-266778.conversion_log.conversion_log_2020*` as p
WHERE p.luid = {}
AND p.orderid is not null limit 1000)""".format(request_luid)
query_job = client.query(query)
query_res = query_job.result()
first_row = next(iter(query_job.result()))
for row in query_res:
return str(row)
Notes: If the luid is non-numeric, then use '{}'.
You can try this:
EXECUTE IMMEDIATE
"""SELECT EXISTS(SELECT 1 FROM `test-266778.conversion_log.conversion_log_2020*` WHERE luid = ? AND orderid is not null limit 1000)"""
USING
"string-value";
For numeric input value, don't use double quotes

Ruby - Implement an SQL server?

I have an application which has a Ruby API. I would like to link to this application from a SQL server system.
Is there a way for me to implement a Ruby SQL server which receives SQL statements and returns the requested data from the applications. Is it then possible to hook into this from an SQL server applications?
E.G.
# request as string like "SELECT * FROM MAIN_TABLE WHERE SOME_COLUMN = <SOME DATA>"
SQLEngine.OnRequest do |request|
Application.RunSQL(request)
end
P.S. I don't have any experience with SQL server, so have no idea how one would go about this...
Note: I'm not asking how I can query an SQL server database, I'm asking how I can implement an SQL server connection.
After some searching I found a few other stack overflow questions about how to make Database Drivers in other languages:
creating a custom odbc driver for application
Implementing a ODBC driver
Creating a custom ODBC driver
Alternatives to writing an ODBC driver
Potentially these will be useful for others going down this road, the most hopeful suggestion is implementing the wire protocol, of which one has been made in python which should be relatively easy to port
import SocketServer
import struct
def char_to_hex(char):
retval = hex(ord(char))
if len(retval) == 4:
return retval[-2:]
else:
assert len(retval) == 3
return "0" + retval[-1]
def str_to_hex(inputstr):
return " ".join(char_to_hex(char) for char in inputstr)
class Handler(SocketServer.BaseRequestHandler):
def handle(self):
print "handle()"
self.read_SSLRequest()
self.send_to_socket("N")
self.read_StartupMessage()
self.send_AuthenticationClearText()
self.read_PasswordMessage()
self.send_AuthenticationOK()
self.send_ReadyForQuery()
self.read_Query()
self.send_queryresult()
def send_queryresult(self):
fieldnames = ['abc', 'def']
HEADERFORMAT = "!cih"
fields = ''.join(self.fieldname_msg(name) for name in fieldnames)
rdheader = struct.pack(HEADERFORMAT, 'T', struct.calcsize(HEADERFORMAT) - 1 + len(fields), len(fieldnames))
self.send_to_socket(rdheader + fields)
rows = [[1, 2], [3, 4]]
DRHEADER = "!cih"
for row in rows:
dr_data = struct.pack("!ii", -1, -1)
dr_header = struct.pack(DRHEADER, 'D', struct.calcsize(DRHEADER) - 1 + len(dr_data), 2)
self.send_to_socket(dr_header + dr_data)
self.send_CommandComplete()
self.send_ReadyForQuery()
def send_CommandComplete(self):
HFMT = "!ci"
msg = "SELECT 2\x00"
self.send_to_socket(struct.pack(HFMT, "C", struct.calcsize(HFMT) - 1 + len(msg)) + msg)
def fieldname_msg(self, name):
tableid = 0
columnid = 0
datatypeid = 23
datatypesize = 4
typemodifier = -1
format_code = 0 # 0=text 1=binary
return name + "\x00" + struct.pack("!ihihih", tableid, columnid, datatypeid, datatypesize, typemodifier, format_code)
def read_socket(self):
print "Trying recv..."
data = self.request.recv(1024)
print "Received {} bytes: {}".format(len(data), repr(data))
print "Hex: {}".format(str_to_hex(data))
return data
def send_to_socket(self, data):
print "Sending {} bytes: {}".format(len(data), repr(data))
print "Hex: {}".format(str_to_hex(data))
return self.request.sendall(data)
def read_Query(self):
data = self.read_socket()
msgident, msglen = struct.unpack("!ci", data[0:5])
assert msgident == "Q"
print data[5:]
def send_ReadyForQuery(self):
self.send_to_socket(struct.pack("!cic", 'Z', 5, 'I'))
def read_PasswordMessage(self):
data = self.read_socket()
b, msglen = struct.unpack("!ci", data[0:5])
assert b == "p"
print "Password: {}".format(data[5:])
def read_SSLRequest(self):
data = self.read_socket()
msglen, sslcode = struct.unpack("!ii", data)
assert msglen == 8
assert sslcode == 80877103
def read_StartupMessage(self):
data = self.read_socket()
msglen, protoversion = struct.unpack("!ii", data[0:8])
print "msglen: {}, protoversion: {}".format(msglen, protoversion)
assert msglen == len(data)
parameters_string = data[8:]
print parameters_string.split('\x00')
def send_AuthenticationOK(self):
self.send_to_socket(struct.pack("!cii", 'R', 8, 0))
def send_AuthenticationClearText(self):
self.send_to_socket(struct.pack("!cii", 'R', 8, 3))
if __name__ == "__main__":
server = SocketServer.TCPServer(("localhost", 9876), Handler)
try:
server.serve_forever()
except:
server.shutdown()
Every programming language – including Ruby – supplies packages which implement interfaces to various SQL servers.
Start here: Ruby database access.

How can I populate a pandas DataFrame with the result of a Snowflake sql query?

Using the Python Connector I can query Snowflake:
import snowflake.connector
# Gets the version
ctx = snowflake.connector.connect(
user=USER,
password=PASSWORD,
account=ACCOUNT,
authenticator='https://XXXX.okta.com',
)
ctx.cursor().execute('USE warehouse MY_WH')
ctx.cursor().execute('USE MYDB.MYSCHEMA')
query = '''
select * from MYDB.MYSCHEMA.MYTABLE
LIMIT 10;
'''
cur = ctx.cursor().execute(query)
The result is a snowflake.connector.cursor.SnowflakeCursor. How can I convert that to a pandas DataFrame?
You can use DataFrame.from_records() or pandas.read_sql() with snowflake-sqlalchemy. The snowflake-alchemy option has a simpler API
pd.DataFrame.from_records(iter(cur), columns=[x[0] for x in cur.description])
will return a DataFrame with proper column names taken from the SQL result. The iter(cur) will convert the cursor into an iterator and cur.description gives the names and types of the columns.
So the complete code will be
import snowflake.connector
import pandas as pd
# Gets the version
ctx = snowflake.connector.connect(
user=USER,
password=PASSWORD,
account=ACCOUNT,
authenticator='https://XXXX.okta.com',
)
ctx.cursor().execute('USE warehouse MY_WH')
ctx.cursor().execute('USE MYDB.MYSCHEMA')
query = '''
select * from MYDB.MYSCHEMA.MYTABLE
LIMIT 10;
'''
cur = ctx.cursor().execute(query)
df = pd.DataFrame.from_records(iter(cur), columns=[x[0] for x in cur.description])
If you prefer using pandas.read_sql then you can
import pandas as pd
from sqlalchemy import create_engine
from snowflake.sqlalchemy import URL
url = URL(
account = 'xxxx',
user = 'xxxx',
password = 'xxxx',
database = 'xxx',
schema = 'xxxx',
warehouse = 'xxx',
role='xxxxx',
authenticator='https://xxxxx.okta.com',
)
engine = create_engine(url)
connection = engine.connect()
query = '''
select * from MYDB.MYSCHEMA.MYTABLE
LIMIT 10;
'''
df = pd.read_sql(query, connection)
There is now a method .fetch_pandas.all() for this, no need for SQL Alchemy anymore.
Note that you need to install snowflake.connector for pandas by doing this
pip install snowflake-connector-python[pandas]
Full documentation here
import pandas as pd
import snowflake.connector
conn = snowflake.connector.connect(
user="xxx",
password="xxx",
account="xxx",
warehouse="xxx",
database="MYDB",
schema="MYSCHEMA"
)
cur = conn.cursor()
# Execute a statement that will generate a result set.
sql = "select * from MYTABLE limit 10"
cur.execute(sql)
# Fetch the result set from the cursor and deliver it as the Pandas DataFrame.
df = cur.fetch_pandas_all()
I just want to leave here a small change made to the code to ensure that the columns have correct names (in my case the fetch call returned long column names that included information beyond the name itself). I leave it here, in case someone needs it:
import snowflake.connector
import pandas as pd
def fetch_pandas(cur, sql):
cur.execute(sql)
rows = 0
while True:
dat = cur.fetchmany(n)
if not dat:
break
a = [cursor.description[i][0] for i in range(len(cursor.description))]
df = pd.DataFrame(dat, columns=a)
rows += df.shape[0]
return df
n = 100000
conn = snowflake.connector.connect(
user='xxxxx',
password='yyyyyy',
account='zzzzz',
warehouse = 'wwwww',
database = 'mmmmmm',
schema = 'nnnnn'
)
cursor = conn.cursor()
fetch_pandas(cursor, 'select * from "mmmmmm"."wwwww"."table"')

PyQt exclusive OR in sql query

How can I make that if my first search shows results it doesn't do the second part of the query, but stops and displays results? I tried something like this, but it just gives me blank window and it's pretty chaotic:
def test_update(self):
projectModel = QSqlQueryModel()
projectModel.setQuery("""SELECT * FROM pacijent WHERE prezime = '%s' OR (prezime, 3) = metaphone('%s', 3) OR LEVENSHTEIN(LOWER(prezime), '%s') < 3 AND NOT (prezime = '%s' AND (prezime, 3) = metaphone('%s', 3) AND LEVENSHTEIN(LOWER(prezime), '%s') < 3)""" % (str(self.lineEdit.text()), str(self.lineEdit.text()), str(self.lineEdit.text()), str(self.lineEdit.text()), str(self.lineEdit.text()), str(self.lineEdit.text())))
global projectView
projectView = QtGui.QTableView()
projectView.setModel(projectModel)
projectView.show()
So, if it finds the exact value of attribute "prezime" it should display it, but if it doesn't it should call for more advance saerch tactics, such as metaphone and levenshtein.
EDIT:
I got it working like this:
ef search_data(self):
myQSqlQueryModel = QSqlQueryModel()
query = QSqlQueryModel()
global myQTableView
myQTableView = QtGui.QTableView()
querySuccess = False
for queryCommand in [""" SELECT * FROM "%s" WHERE "%s" = '%s' """ % (str(self.search_from_table_lineEdit.text()), str(self.search_where_lineEdit.text()), str(self.search_has_value_lineEdit.text()))]:
myQSqlQueryModel.setQuery(queryCommand)
if myQSqlQueryModel.rowCount() > 0:
myQTableView.setModel(myQSqlQueryModel)
myQTableView.show()
querySuccess = True
break
if not querySuccess:
query.setQuery(""" SELECT * FROM "%s" WHERE METAPHONE("%s", 3) = METAPHONE('%s', 3) OR LEVENSHTEIN("%s", '%s') < 4 """ % (str(self.search_from_table_lineEdit.text()), str(self.search_where_lineEdit.text()), str(self.search_has_value_lineEdit.text()), str(self.search_where_lineEdit.text()), str(self.search_has_value_lineEdit.text())))
global var
var = QtGui.QTableView()
var.setModel(query)
var.show()
After your query success, your can check your data in model if have any row count in this method. And your for loop to get many query;
def testUpdate (self):
myQSqlQueryModel = QtSql.QSqlQueryModel()
myQTableView = QtGui.QTableView()
querySuccess = False
for queryCommand in ["YOUR QUERY 1", "YOUR QUERY 2"]:
myQSqlQueryModel.setQuery(queryCommand)
if myQSqlQueryModel.rowCount() > 0:
myQTableView.setModel(myQSqlQueryModel)
myQTableView.show()
querySuccess = True
break
if not querySuccess:
QtGui.QMessageBox.critical(self, 'Query error', 'Not found')