How to migrate pandas read_sql from psycopg2 to sqlalchemy with a tuple as one of the query params - pandas

With pandas=1.4.0, it emits a Warning about not using psycopg2 directly within read_sql, but to use sqlalchemy. While attempting to do such a migration, I can not resolve how to pass a tuple as one of the query parameters. For example, this presently works:
import pandas as pd
import psycopg2
read_sql(
"SELECT * from news where id in %s",
psycopg2.connect("dbname=mydatabase"),
params=[(1, 2, 3),],
)
attempting to migrate this to sqlalchemy like so:
import pandas as pd
read_sql(
"SELECT * from news where id in %s",
"postgresql://localhost/mydatabase",
params=[(1, 2, 3),],
)
results in
...snipped...
File "/opt/miniconda3/envs/prod/lib/python3.8/site-packages/sqlalchemy/engine/base.py", line 1802, in _execute_context
self.dialect.do_execute(
File "/opt/miniconda3/envs/prod/lib/python3.8/site-packages/sqlalchemy/engine/default.py", line 732, in do_execute
cursor.execute(statement, parameters)
TypeError: not all arguments converted during string formatting
So how do I pass a tuple as a params argument within pandas read_sql?

Wrap your query with a SQLAlchemy text object, use named parameters and pass the parameter values as a dictionary:
import pandas as pd
from sqlalchemy import text
read_sql(
text("SELECT * from news where id in :ids"),
"postgresql://localhost/mydatabase",
params={'id': (1, 2, 3),},
)

Related

snowflake.connector SQL compilation error invalid identifier from pandas dataframe

I'm trying to ingest a df I created from a json response into an existing table (the table is currently empty because I can't seem to get this to work)
The df looks something like the below table:
index
clicks_affiliated
0
3214
1
2221
but I'm seeing the following error:
snowflake.connector.errors.ProgrammingError: 000904 (42000): SQL
compilation error: error line 1 at position 94
invalid identifier '"clicks_affiliated"'
and the column names in snowflake match to the columns in my dataframe.
This is my code:
import pandas as pd
from snowflake.sqlalchemy import URL
from sqlalchemy import create_engine
import snowflake.connector
from snowflake.connector.pandas_tools import write_pandas, pd_writer
from pandas import json_normalize
import requests
df_norm = json_normalize(json_response, 'reports')
#I've tried also adding the below line (and removing it) but I see the same error
df = df_norm.reset_index(drop=True)
def create_db_engine(db_name, schema_name):
engine = URL(
account="ab12345.us-west-2",
user="my_user",
password="my_pw",
database="DB",
schema="PUBLIC",
warehouse="WH1",
role="DEV"
)
return engine
def create_table(out_df, table_name, idx=False):
url = create_db_engine(db_name="DB", schema_name="PUBLIC")
engine = create_engine(url)
connection = engine.connect()
try:
out_df.to_sql(
table_name, connection, if_exists="append", index=idx, method=pd_writer
)
except ConnectionError:
print("Unable to connect to database!")
finally:
connection.close()
engine.dispose()
return True
print(df.head)
create_table(df, "reporting")
So... it turns out I needed to change my columns in my dataframe to uppercase
I've added this after the dataframe creation to do so and it worked:
df.columns = map(lambda x: str(x).upper(), df.columns)

Load Teradata table from Python Pandas Dataframe

I am getting below error while trying to load Teradata table from Python Pandas, Any idea ?
teradatasql and pandas - writing dataframe into TD table - Error 3707 - Syntax error, expected something like '(' between the 'type' keyword and '='
import teradatasql
import pandas as pd
conTD = teradatasql.connect(host=Host, user=User, password=Passwd, logmech="LDAP", encryptdata="true")
df.to_sql(tableName, conTD, schema=schemaName, if_exists='fail', index=False)

Transfer a df to a new one and change the context of a column

I have one dataframe df_test and I want to parse all the columns into a new df.
Also I want with if else statement to modify one column's context.
Tried this:
import pyspark
import pandas as pd
from pyspark.sql import SparkSession
df_cast= df_test.withColumn('account_id', when(col("account_id") == 8, "teo").when(col("account_id") == 9, "liza").otherwise(' '))
But it gives me this error:
NameError: name 'when' is not defined
Thanks in advance
At the start of your code, you should import the pyspark sql functions. The following, for example, would work:
import pyspark.sql.functions as F
import pyspark
import pandas as pd
from pyspark.sql import SparkSession
df_cast= df_test.withColumn('account_id', F.when(col("account_id") == 8, "teo").F.when(col("account_id") == 9, "liza").otherwise(' '))

The type of <field> is not a SQLAlchemy type with Pandas to_sql to an Oracle database

I have a pandas dataframe that has several categorical fields.
SQLAlchemy throws a exception "The type of is not a SQLAlchemy type".
I've tried converting the object fields back to string, but get the same error.
dfx = pd.DataFrame()
for col_name in df.columns:
if(df[col_name].dtype == 'object'):
dfx[col_name] = df[col_name].astype('str').copy()
else:
dfx[col_name] = df[col_name].copy()
print(col_name, dfx[col_name].dtype)
.
dfx.to_sql('results', con=engine, dtype=my_dtypes, if_exists='append', method='multi', index=False)
the new dfx seems to have the same categoricals despite creating a new table with .copy()
Also, as a side note, why does to_sql() generate a CREATE TABLE with CLOBs?
No need to use the copy() function here, and you should not have to convert from 'object' to 'str' either.
Are you writing to an Oracle database? The default output type for text data (including 'object') is CLOB. You can get around it by specifying the dtype to use. For example:
import pandas as pd
from sqlalchemy import types, create_engine
from sqlalchemy.exc import InvalidRequestError
conn = create_engine(...)
testdf = pd.DataFrame({'pet': ['dog','cat','mouse','dog','fish','pony','cat']
, 'count': [2,6,12,1,45,1,3]
, 'x': [105.3, 98.7, 112.4, 3.6, 48.9, 208.9, -1.7]})
test_types = dict(zip(
testdf.columns.tolist(),
(types.VARCHAR(length=20), types.Integer(), types.Float()) ))
try:
testdf.to_sql( name="test", schema="myschema"
, con=conn
, if_exists='replace' #'append'
, index=False
, dtype = test_types)
print (f"Wrote final input dataset to table {schema}.{table2}")
except (ValueError, InvalidRequestError):
print ("Could not write to table 'test'.")
If you are not writing to Oracle, please specify your target database - perhaps someone else with experience in that DBMS can advise you.
What #eknumbat is absolutely correct. For AWS Redshift, you can do the following. Note you can find all of the sqlalchemy datatypes here (https://docs.sqlalchemy.org/en/14/core/types.html)
import pandas as pd
from sqlalchemy.types import VARCHAR, INTEGER, FLOAT
from sqlalchemy import create_engine
conn = create_engine(...)
testdf = pd.DataFrame({'pet': ['dog','cat','mouse','dog','fish','pony','cat'],
'count': [2,6,12,1,45,1,3],
'x': [105.3, 98.7, 112.4, 3.6, 48.9, 208.9, -1.7]})
test_types = {'pet': VARCHAR, 'count': Integer, 'x': Float}
testdf.to_sql(name="test",
schema="myschema".
con=conn,
if_exists='replace',
index=False,
dtype = test_types)

psycopg2: can't adapt type 'numpy.int64'

I have a dataframe with the dtypes shown below and I want to insert the dataframe into a postgres DB but it fails due to error can't adapt type 'numpy.int64'
id_code int64
sector object
created_date float64
updated_date float64
How can I convert these types to native python types such as from int64 (which is essentially 'numpy.int64') to a classic int that would then be acceptable to postgres via the psycopg2 client.
data['id_code'].astype(np.int) defaults to int64
It is nonetheless possible to convert from one numpy type to another (e.g from int to float)
data['id_code'].astype(float)
changes to
dtype: float64
The bottomline is that psycopg2 doesn't seem to understand numpy datatypes if any one has ideas how to convert them to classic types that would be helpful.
Updated: Insertion to DB
def insert_many():
"""Add data to the table."""
sql_query = """INSERT INTO classification(
id_code, sector, created_date, updated_date)
VALUES (%s, %s, %s, %s);"""
data = pd.read_excel(fh, sheet_name=sheetname)
data_list = list(data.to_records())
conn = None
try:
conn = psycopg2.connect(db)
cur = conn.cursor()
cur.executemany(sql_query, data_list)
conn.commit()
cur.close()
except(Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
Add below somewhere in your code:
import numpy
from psycopg2.extensions import register_adapter, AsIs
def addapt_numpy_float64(numpy_float64):
return AsIs(numpy_float64)
def addapt_numpy_int64(numpy_int64):
return AsIs(numpy_int64)
register_adapter(numpy.float64, addapt_numpy_float64)
register_adapter(numpy.int64, addapt_numpy_int64)
same problem here, successfully solve this problem after I transform series to nd.array and int.
you can try as following:
data['id_code'].values.astype(int)
--
update:
if the value including NaN, it still wrong.
It seems that psycopg2 can't explain the np.int64 format, therefore the following methods works for me.
import numpy as np
from psycopg2.extensions import register_adapter, AsIs
psycopg2.extensions.register_adapter(np.int64, psycopg2._psycopg.AsIs)
I'm not sure why your data_list contains NumPy data types, but the same thing happens to me when I run your code. Here is an alternative way to construct data_list that so that integers and floats end up as their native python types:
data_list = [list(row) for row in data.itertuples(index=False)]
Alternate approach
I think you could accomplish the same thing in fewer lines of code by using pandas to_sql:
import sqlalchemy
import pandas as pd
data = pd.read_excel(fh, sheet_name=sheetname)
engine = sqlalchemy.create_engine("postgresql://username#hostname/dbname")
data.to_sql(engine, 'classification', if_exists='append', index=False)
I had the same issue and fixed it using:
df = df.convert_dtypes()