Spark SQL date conversion error from timestamp to string when using presto - apache-spark-sql

I am trying to pull data from a table where the dates are in timestamp format 2019-06-31 19:00:00
I want to have them in string format as 20190631
It works if I choose get_pandas_df instead of get_spark_df but I need it in get_spark_df
I have tried this so far:
start_date = '2019-06-15'
end_date = '2019-06-31'
records = QaasHook.get_spark_df("""
(SELECT name,
cast(date_format(startdt, '%Y%m%d') AS VARCHAR) AS stdt,
cast(amnt AS DOUBLE) AS amount,
cast(date_format(enddt, '%Y%m%d') AS VARCHAR) AS endt
FROM table_1
WHERE cast(date_format(startdt, '%Y%m%d') AS INT) >= {st_dt}
AND cast(date_format(enddt, '%Y%m%d') AS INT) <= {end_dt} )
""".format(st_dt = start_date.replace('-',''), end_dt = end_date.replace('-',''))
)
But I get an error on the format line as
Py4JJavaError Traceback (most recent call last)
<ipython-input-36-c72fafc06f37> in <module>
159 where cast(date_format(cph.d_chkdate,'%Y%m%d') as int) >= {st_dt} and cast(date_format(cph.d_chkdate,'%Y%m%d') as int) <= {end_dt})
--> 160 """.format(st_dt = start_date.replace('-',''), end_dt = end_date.replace('-',''))
161 )
162
/opt/pyenv/versions/3.7.4/lib/python3.7/site-packages/dss_airflow_utils/hooks/qaas_hook.py in get_spark_df(self, dbtable, options)
258 ", ".join(actual_options.keys()))
259 spark_op = spark_op.options(**actual_options)
--> 260 return spark_op.load()
261
262 def get_conn(self):
/opt/spark-3.0.0/python/pyspark/sql/readwriter.py in load(self, path, format, schema, **options)
182 return self._df(self._jreader.load(self._spark._sc._jvm.PythonUtils.toSeq(path)))
183 else:
--> 184 return self._df(self._jreader.load())
185
186 #since(1.4)
/opt/spark-3.0.0/python/lib/py4j-0.10.9-src.zip/py4j/java_gateway.py in __call__(self, *args)
1303 answer = self.gateway_client.send_command(command)
1304 return_value = get_return_value(
-> 1305 answer, self.gateway_client, self.target_id, self.name)
1306
1307 for temp_arg in temp_args:
/opt/spark-3.0.0/python/pyspark/sql/utils.py in deco(*a, **kw)
129 def deco(*a, **kw):
130 try:
--> 131 return f(*a, **kw)
132 except py4j.protocol.Py4JJavaError as e:
133 converted = convert_exception(e.java_exception)
/opt/spark-3.0.0/python/lib/py4j-0.10.9-src.zip/py4j/protocol.py in get_return_value(answer, gateway_client, target_id, name)
326 raise Py4JJavaError(
327 "An error occurred while calling {0}{1}{2}.\n".
--> 328 format(target_id, ".", name), value)
329 else:
330 raise Py4JError(
Py4JJavaError: An error occurred while calling o527.load.
: java.sql.SQLException: Error executing query
at io.prestosql.jdbc.PrestoStatement.internalExecute(PrestoStatement.java:274)
at io.prestosql.jdbc.PrestoStatement.execute(PrestoStatement.java:227)
at io.prestosql.jdbc.PrestoPreparedStatement.<init>(PrestoPreparedStatement.java:80)
at io.prestosql.jdbc.PrestoConnection.prepareStatement(PrestoConnection.java:128)
at org.apache.spark.sql.execution.datasources.jdbc.JDBCRDD$.resolveTable(JDBCRDD.scala:58)
at org.apache.spark.sql.execution.datasources.jdbc.JDBCRelation$.getSchema(JDBCRelation.scala:226)
at org.apache.spark.sql.execution.datasources.jdbc.JdbcRelationProvider.createRelation(JdbcRelationProvider.scala:35)
at org.apache.spark.sql.execution.datasources.DataSource.resolveRelation(DataSource.scala:339)
at org.apache.spark.sql.DataFrameReader.loadV1Source(DataFrameReader.scala:279)
at org.apache.spark.sql.DataFrameReader.$anonfun$load$2(DataFrameReader.scala:268)
at scala.Option.getOrElse(Option.scala:189)
at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:268)
at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:203)
at sun.reflect.GeneratedMethodAccessor71.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
at py4j.Gateway.invoke(Gateway.java:282)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:238)
at java.lang.Thread.run(Thread.java:748)
Any help would be appreciated

Related

How to understand read_excel in Pandas

I'm trying to import some public data from the web but can't understand the error.
My code:
import pandas as pd
df2022 = pd.read_excel("https://ofslivefs.blob.core.windows.net/files/NSS%20data%202022/September/NSS2022_summary_data.xlsx")
It returns this:
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
/var/folders/v_/yq26pm194xj5ckqy8p_njwc00000gn/T/ipykernel_89117/2424267382.py in <module>
----> 1 df2022 = pd.read_excel("https://ofslivefs.blob.core.windows.net/files/NSS%20data%202022/September/NSS2022_summary_data.xlsx")
~/opt/anaconda3/lib/python3.9/site-packages/pandas/util/_decorators.py in wrapper(*args, **kwargs)
209 else:
210 kwargs[new_arg_name] = new_arg_value
--> 211 return func(*args, **kwargs)
212
213 return cast(F, wrapper)
~/opt/anaconda3/lib/python3.9/site-packages/pandas/util/_decorators.py in wrapper(*args, **kwargs)
329 stacklevel=find_stack_level(),
330 )
--> 331 return func(*args, **kwargs)
332
333 # error: "Callable[[VarArg(Any), KwArg(Any)], Any]" has no
~/opt/anaconda3/lib/python3.9/site-packages/pandas/io/excel/_base.py in read_excel(io, sheet_name, header, names, index_col, usecols, squeeze, dtype, engine, converters, true_values, false_values, skiprows, nrows, na_values, keep_default_na, na_filter, verbose, parse_dates, date_parser, thousands, decimal, comment, skipfooter, convert_float, mangle_dupe_cols, storage_options)
480 if not isinstance(io, ExcelFile):
481 should_close = True
--> 482 io = ExcelFile(io, storage_options=storage_options, engine=engine)
483 elif engine and engine != io.engine:
484 raise ValueError(
~/opt/anaconda3/lib/python3.9/site-packages/pandas/io/excel/_base.py in __init__(self, path_or_buffer, engine, storage_options)
1693 self.storage_options = storage_options
1694
-> 1695 self._reader = self._engines[engine](self._io, storage_options=storage_options)
1696
1697 def __fspath__(self):
~/opt/anaconda3/lib/python3.9/site-packages/pandas/io/excel/_openpyxl.py in __init__(self, filepath_or_buffer, storage_options)
555 """
556 import_optional_dependency("openpyxl")
--> 557 super().__init__(filepath_or_buffer, storage_options=storage_options)
558
559 #property
~/opt/anaconda3/lib/python3.9/site-packages/pandas/io/excel/_base.py in __init__(self, filepath_or_buffer, storage_options)
543 self.handles.handle.seek(0)
544 try:
--> 545 self.book = self.load_workbook(self.handles.handle)
546 except Exception:
547 self.close()
~/opt/anaconda3/lib/python3.9/site-packages/pandas/io/excel/_openpyxl.py in load_workbook(self, filepath_or_buffer)
566 from openpyxl import load_workbook
567
--> 568 return load_workbook(
569 filepath_or_buffer, read_only=True, data_only=True, keep_links=False
570 )
~/opt/anaconda3/lib/python3.9/site-packages/openpyxl/reader/excel.py in load_workbook(filename, read_only, keep_vba, data_only, keep_links)
315 reader = ExcelReader(filename, read_only, keep_vba,
316 data_only, keep_links)
--> 317 reader.read()
318 return reader.wb
~/opt/anaconda3/lib/python3.9/site-packages/openpyxl/reader/excel.py in read(self)
281 apply_stylesheet(self.archive, self.wb)
282 self.read_worksheets()
--> 283 self.parser.assign_names()
284 if not self.read_only:
285 self.archive.close()
~/opt/anaconda3/lib/python3.9/site-packages/openpyxl/reader/workbook.py in assign_names(self)
100 reserved = defn.is_reserved
101 if reserved in ("Print_Titles", "Print_Area"):
--> 102 sheet = self.wb._sheets[defn.localSheetId]
103 if reserved == "Print_Titles":
104 rows, cols = _unpack_print_titles(defn)
IndexError: list index out of range
At this point I would traditonally download and convert to CSV but I want to access straight from web.
The sheet (which I guess I could access as sheetname="Q27 Providers (benchmarked)") doesn't work.
It looks like xlsx file is broken, therefore u can't download it. Did u try to open that xlsx file?

tight_layout KeyError default, matplotlib widget

Using jupyterlab, i receive a KeyError: 'Default' when using plt.tight_layout() in combination with %matplotlib widget. The following code reproduces the issue:
import matplotlib.pyplot as plt
import numpy as np
%matplotlib widget
x=np.linspace(0,10)
y=x**2
plt.plot(x,y)
plt.tight_layout()
The complete error message is the following:
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
~/anaconda3/lib/python3.8/site-packages/matplotlib/backend_bases.py in _wait_cursor_for_draw_cm(self)
3024 try:
-> 3025 self.canvas.set_cursor(tools.Cursors.WAIT)
3026 yield
~/anaconda3/lib/python3.8/site-packages/matplotlib/backends/backend_webagg_core.py in set_cursor(self, cursor)
209 }, cursor=cursor)
--> 210 self.send_event('cursor', cursor=cursor)
211
~/anaconda3/lib/python3.8/site-packages/matplotlib/backends/backend_webagg_core.py in send_event(self, event_type, **kwargs)
391 if self.manager:
--> 392 self.manager._send_event(event_type, **kwargs)
393
~/anaconda3/lib/python3.8/site-packages/matplotlib/backends/backend_webagg_core.py in _send_event(self, event_type, **kwargs)
540 for s in self.web_sockets:
--> 541 s.send_json(payload)
542
~/anaconda3/lib/python3.8/site-packages/ipympl/backend_nbagg.py in send_json(self, content)
180 if content['type'] == 'cursor':
--> 181 self._cursor = cursors_str[content['cursor']]
182
KeyError: 'wait'
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
/tmp/ipykernel_119035/3466922198.py in <module>
7 y=x**2
8 plt.plot(x,y)
----> 9 plt.tight_layout()
~/anaconda3/lib/python3.8/site-packages/matplotlib/pyplot.py in tight_layout(pad, h_pad, w_pad, rect)
2300 #_copy_docstring_and_deprecators(Figure.tight_layout)
2301 def tight_layout(*, pad=1.08, h_pad=None, w_pad=None, rect=None):
-> 2302 return gcf().tight_layout(pad=pad, h_pad=h_pad, w_pad=w_pad, rect=rect)
2303
2304
~/anaconda3/lib/python3.8/site-packages/matplotlib/figure.py in tight_layout(self, pad, h_pad, w_pad, rect)
3186 "compatible with tight_layout, so results "
3187 "might be incorrect.")
-> 3188 renderer = _get_renderer(self)
3189 with getattr(renderer, "_draw_disabled", nullcontext)():
3190 kwargs = get_tight_layout_figure(
~/anaconda3/lib/python3.8/site-packages/matplotlib/backend_bases.py in _get_renderer(figure, print_method)
1542 figure.canvas._get_output_canvas(None, fmt), f"print_{fmt}")
1543 try:
-> 1544 print_method(io.BytesIO())
1545 except Done as exc:
1546 renderer, = figure._cachedRenderer, = exc.args
~/anaconda3/lib/python3.8/site-packages/matplotlib/backend_bases.py in wrapper(*args, **kwargs)
1641 kwargs.pop(arg)
1642
-> 1643 return func(*args, **kwargs)
1644
1645 return wrapper
~/anaconda3/lib/python3.8/site-packages/matplotlib/_api/deprecation.py in wrapper(*inner_args, **inner_kwargs)
410 else deprecation_addendum,
411 **kwargs)
--> 412 return func(*inner_args, **inner_kwargs)
413
414 DECORATORS[wrapper] = decorator
~/anaconda3/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py in print_png(self, filename_or_obj, metadata, pil_kwargs, *args)
538 *metadata*, including the default 'Software' key.
539 """
--> 540 FigureCanvasAgg.draw(self)
541 mpl.image.imsave(
542 filename_or_obj, self.buffer_rgba(), format="png", origin="upper",
~/anaconda3/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py in draw(self)
431 self.renderer = self.get_renderer(cleared=True)
432 # Acquire a lock on the shared font cache.
--> 433 with RendererAgg.lock, \
434 (self.toolbar._wait_cursor_for_draw_cm() if self.toolbar
435 else nullcontext()):
~/anaconda3/lib/python3.8/contextlib.py in __enter__(self)
111 del self.args, self.kwds, self.func
112 try:
--> 113 return next(self.gen)
114 except StopIteration:
115 raise RuntimeError("generator didn't yield") from None
~/anaconda3/lib/python3.8/site-packages/matplotlib/backend_bases.py in _wait_cursor_for_draw_cm(self)
3026 yield
3027 finally:
-> 3028 self.canvas.set_cursor(self._lastCursor)
3029 else:
3030 yield
~/anaconda3/lib/python3.8/site-packages/matplotlib/backends/backend_webagg_core.py in set_cursor(self, cursor)
208 backend_tools.Cursors.RESIZE_VERTICAL: 'ns-resize',
209 }, cursor=cursor)
--> 210 self.send_event('cursor', cursor=cursor)
211
212 def set_image_mode(self, mode):
~/anaconda3/lib/python3.8/site-packages/matplotlib/backends/backend_webagg_core.py in send_event(self, event_type, **kwargs)
390 def send_event(self, event_type, **kwargs):
391 if self.manager:
--> 392 self.manager._send_event(event_type, **kwargs)
393
394
~/anaconda3/lib/python3.8/site-packages/matplotlib/backends/backend_webagg_core.py in _send_event(self, event_type, **kwargs)
539 payload = {'type': event_type, **kwargs}
540 for s in self.web_sockets:
--> 541 s.send_json(payload)
542
543
~/anaconda3/lib/python3.8/site-packages/ipympl/backend_nbagg.py in send_json(self, content)
179 # Change in the widget state?
180 if content['type'] == 'cursor':
--> 181 self._cursor = cursors_str[content['cursor']]
182
183 elif content['type'] == 'message':
KeyError: 'default'

KeyError:'1,2' running Postgresql regexp_match() in Jupyter Notebook

I am using Jupyter notebook to run postgresql. Python, postgres, pips and Jupyter are all up to date. I have been able to load sql with
%load_ext sql
and connected to the postgres database with
%sqlpostgresql://postgres:postgres#localhost/analysis
Table was created and loaded a csv loaded successfully with
%%sql
CREATE TABLE crime_reports (
crime_id bigserial PRIMARY KEY,
date_1 timestamp with time zone,
date_2 timestamp with time zone,
street varchar(250),
city varchar(100),
crime_type varchar(100),
description text,
case_number varchar(50),
original_text text NOT NULL
);
COPY crime_reports (original_text)
FROM 'C:\YourDirectory\crime_reports.csv'
WITH (FORMAT CSV, HEADER OFF, QUOTE '"');
However, this code
%%sql
SELECT crime_id,
regexp_match(original_text, '\d{1,2}\/\d{1,2}\/\d{2}')
FROM crime_reports;
runs successfully in pgAdmin, returning dates but in the Jupyter notebook I get this error instead:
KeyError Traceback (most recent call last)
~\AppData\Local\Temp/ipykernel_10976/608277809.py in <module>
----> 1 get_ipython().run_cell_magic('sql', '', "SELECT crime_id,\n regexp_match(original_text, '\\d{1,2}\\/\\d{1,2}\\/\\d{2}')\nFROM crime_reports;\n")
~\AppData\Local\Programs\Python\Python310\lib\site-packages\IPython\core\interactiveshell.py in run_cell_magic(self, magic_name, line, cell)
2417 with self.builtin_trap:
2418 args = (magic_arg_s, cell)
-> 2419 result = fn(*args, **kwargs)
2420 return result
2421
~\AppData\Local\Programs\Python\Python310\lib\site-packages\decorator.py in fun(*args, **kw)
230 if not kwsyntax:
231 args, kw = fix(args, kw, sig)
--> 232 return caller(func, *(extras + args), **kw)
233 fun.__name__ = func.__name__
234 fun.__doc__ = func.__doc__
~\AppData\Local\Programs\Python\Python310\lib\site-packages\IPython\core\magic.py in <lambda>(f, *a, **k)
185 # but it's overkill for just that one bit of state.
186 def magic_deco(arg):
--> 187 call = lambda f, *a, **k: f(*a, **k)
188
189 if callable(arg):
~\AppData\Local\Programs\Python\Python310\lib\site-packages\decorator.py in fun(*args, **kw)
230 if not kwsyntax:
231 args, kw = fix(args, kw, sig)
--> 232 return caller(func, *(extras + args), **kw)
233 fun.__name__ = func.__name__
234 fun.__doc__ = func.__doc__
~\AppData\Local\Programs\Python\Python310\lib\site-packages\IPython\core\magic.py in <lambda>(f, *a, **k)
185 # but it's overkill for just that one bit of state.
186 def magic_deco(arg):
--> 187 call = lambda f, *a, **k: f(*a, **k)
188
189 if callable(arg):
~\AppData\Local\Programs\Python\Python310\lib\site-packages\sql\magic.py in execute(self, line, cell, local_ns)
149 cell_params = {}
150 for variable in cell_variables:
--> 151 cell_params[variable] = local_ns[variable]
152 cell = cell.format(**cell_params)
153
KeyError: '1,2'
I have tried trusting the Notebook and still getting same error.
Try to replace this:
regexp_match(original_text, '\d{1,2}\/\d{1,2}\/\d{2}')
with this:
regexp_match(original_text, '\d{{1,2}}\/\d{{1,2}}\/\d{{2}}')
Probably it's python formatting and you need to escape your {}s.

There is already an object named ""; in the database

I am using pyspark in databricks to append data to a sql table via ADF pipelines. Code is sown below:
log_status_df_all = spark.createDataFrame(log_status_df_all)
log_status_df_all.write.format("com.microsoft.sqlserver.jdbc.spark").mode(write_mode).option("url", url).option("dbtable", 'Logs_Collection_Status').option("user", username).option("password", password).save()
log_status_df_all.show()
On some days I am get the error message:
com.microsoft.sqlserver.jdbc.SQLServerException: There is already an object named '<table_name>' in the database.
Upon simply re-running the pipeline the table is updated with no issues; therefore the code is working. How can I prevent this from happening again? Is it an error when multiple pipelines try writing to the same table at the same time?
The rest of the error message is shown below:
---------------------------------------------------------------------------
Py4JJavaError Traceback (most recent call last)
<command-3143827225825384> in <module>
8
9 log_collection_df = spark.createDataFrame(log_collection_df)
---> 10 write_df_sql(log_collection_df, 'Logs_Collection_Status', 'overwrite')
11
<command-1421348210166948> in write_df_sql(df, table, write_mode)
14
15
---> 16 spark_df.write.format("com.microsoft.sqlserver.jdbc.spark").mode(write_mode).option("url", url).option("dbtable", table_name).option("user", username).option("password", password).save()
17
18 #backup table
/databricks/spark/python/pyspark/sql/readwriter.py in save(self, path, format, mode, partitionBy, **options)
735 self.format(format)
736 if path is None:
--> 737 self._jwrite.save()
738 else:
739 self._jwrite.save(path)
/databricks/spark/python/lib/py4j-0.10.7-src.zip/py4j/java_gateway.py in __call__(self, *args) 1255 answer = self.gateway_client.send_command(command) 1256 return_value
= get_return_value(
-> 1257 answer, self.gateway_client, self.target_id, self.name) 1258 1259 for temp_arg in temp_args:
/databricks/spark/python/pyspark/sql/utils.py in deco(*a, **kw)
61 def deco(*a, **kw):
62 try:
---> 63 return f(*a, **kw)
64 except py4j.protocol.Py4JJavaError as e:
65 s = e.java_exception.toString()
/databricks/spark/python/lib/py4j-0.10.7-src.zip/py4j/protocol.py in get_return_value(answer, gateway_client, target_id, name)
326 raise Py4JJavaError(
327 "An error occurred while calling {0}{1}{2}.\n".
--> 328 format(target_id, ".", name), value)
329 else:
330 raise Py4JError(
Py4JJavaError: An error occurred while calling o661.save. : com.microsoft.sqlserver.jdbc.SQLServerException: There is already an object named 'Logs_Collection_Status' in the database. at com.microsoft.sqlserver.jdbc.SQLServerException.makeFromDatabaseError(SQLServerException.java:258) at com.microsoft.sqlserver.jdbc.SQLServerStatement.getNextResult(SQLServerStatement.java:1535) at com.microsoft.sqlserver.jdbc.SQLServerStatement.doExecuteStatement(SQLServerStatement.java:845) at com.microsoft.sqlserver.jdbc.SQLServerStatement$StmtExecCmd.doExecute(SQLServerStatement.java:752) at com.microsoft.sqlserver.jdbc.TDSCommand.execute(IOBuffer.java:7151) at com.microsoft.sqlserver.jdbc.SQLServerConnection.executeCommand(SQLServerConnection.java:2478) at com.microsoft.sqlserver.jdbc.SQLServerStatement.executeCommand(SQLServerStatement.java:219) at com.microsoft.sqlserver.jdbc.SQLServerStatement.executeStatement(SQLServerStatement.java:199) at com.microsoft.sqlserver.jdbc.SQLServerStatement.executeUpdate(SQLServerStatement.java:680) at com.microsoft.sqlserver.jdbc.spark.BulkCopyUtils$.executeUpdate(BulkCopyUtils.scala:456) at com.microsoft.sqlserver.jdbc.spark.BulkCopyUtils$.mssqlCreateTable(BulkCopyUtils.scala:495) at com.microsoft.sqlserver.jdbc.spark.SingleInstanceConnector$.createTable(SingleInstanceConnector.scala:33) at com.microsoft.sqlserver.jdbc.spark.Connector.write(Connector.scala:60) at com.microsoft.sqlserver.jdbc.spark.DefaultSource.createRelation(DefaultSource.scala:51) at org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.run(SaveIntoDataSourceCommand.scala:45) at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:70) at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68) at org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:86) at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:152) at org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:140) at org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$5.apply(SparkPlan.scala:193) at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:189) at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:140) at org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:117) at org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:115) at org.apache.spark.sql.DataFrameWriter$$anonfun$runCommand$1.apply(DataFrameWriter.scala:711) at org.apache.spark.sql.DataFrameWriter$$anonfun$runCommand$1.apply(DataFrameWriter.scala:711) at org.apache.spark.sql.execution.SQLExecution$$anonfun$withCustomExecutionEnv$1.apply(SQLExecution.scala:113) at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:243) at org.apache.spark.sql.execution.SQLExecution$.withCustomExecutionEnv(SQLExecution.scala:99) at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:173) at org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:711) at org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:307) at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:293) at sun.reflect.GeneratedMethodAccessor841.invoke(Unknown Source) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244) at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:380) at py4j.Gateway.invoke(Gateway.java:295) at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132) at py4j.commands.CallCommand.execute(CallCommand.java:79) at py4j.GatewayConnection.run(GatewayConnection.java:251) at java.lang.Thread.run(Thread.java:748)
The issue has now been resolved.
Previous code:
if condition1==True:
write_mode = 'overwrite'
elif condition2==True:
write_mode = 'append'
log_status_df_all = spark.createDataFrame(log_status_df_all)
log_status_df_all.write.format("com.microsoft.sqlserver.jdbc.spark").mode(write_mode).option("url", url).option("dbtable", 'Logs_Collection_Status').option("user", username).option("password", password).save()
log_status_df_all.show()
Current code:
write_mode = 'append'
log_status_df_all = spark.createDataFrame(log_status_df_all)
log_status_df_all.write.format("com.microsoft.sqlserver.jdbc.spark").mode(write_mode).option("url", url).option("dbtable", 'Logs_Collection_Status').option("user", username).option("password", password).save()
log_status_df_all.show()

Trying to pass variable from pandas to sql

I am trying to pass a simple variable (string, list whatever I can get working) in to sql (using jaydebapi to AWS Redshift cluster.
The python/pandas code looks like this at moment. I think the %s is part of the problem:
id = (261894)
df = pd.read_sql_query(
'''
select * from purchases where customer_id = %s
''',
conn, params=[id])
exception is
ava.sql.SQLExceptionPyRaisable Traceback (most recent call last)
C:\ProgramData\Anaconda3\lib\site-packages\pandas\io\sql.py in execute(self, *args, **kwargs)
1377 else:
-> 1378 cur.execute(*args)
1379 return cur
C:\ProgramData\Anaconda3\lib\site-packages\jaydebeapi\__init__.py in execute(self, operation, parameters)
497 self._close_last()
--> 498 self._prep = self._connection.jconn.prepareStatement(operation)
499 self._set_stmt_parms(self._prep, parameters)
java.sql.SQLExceptionPyRaisable: java.sql.SQLException: [Amazon](500310) Invalid operation: syntax error at or near "%"
Position: 70;
During handling of the above exception, another exception occurred:
java.sql.SQLExceptionPyRaisable Traceback (most recent call last)
C:\ProgramData\Anaconda3\lib\site-packages\jaydebeapi\__init__.py in rollback(self)
416 try:
--> 417 self.jconn.rollback()
418 except:
java.sql.SQLExceptionPyRaisable: java.sql.SQLException: [Amazon][JDBC](11680) Cannot use rollback while Connection is in auto-commit mode.
During handling of the above exception, another exception occurred:
DatabaseError Traceback (most recent call last)
C:\ProgramData\Anaconda3\lib\site-packages\pandas\io\sql.py in execute(self, *args, **kwargs)
1381 try:
-> 1382 self.con.rollback()
1383 except Exception: # pragma: no cover
C:\ProgramData\Anaconda3\lib\site-packages\jaydebeapi\__init__.py in rollback(self)
418 except:
--> 419 _handle_sql_exception()
420
C:\ProgramData\Anaconda3\lib\site-packages\jaydebeapi\__init__.py in _handle_sql_exception_jpype()
155 exc_type = InterfaceError
--> 156 reraise(exc_type, exc_info[1], exc_info[2])
157
C:\ProgramData\Anaconda3\lib\site-packages\jaydebeapi\__init__.py in reraise(tp, value, tb)
56 if tb:
---> 57 raise value.with_traceback(tb)
58 raise value
C:\ProgramData\Anaconda3\lib\site-packages\jaydebeapi\__init__.py in rollback(self)
416 try:
--> 417 self.jconn.rollback()
418 except:
DatabaseError: java.sql.SQLException: [Amazon][JDBC](11680) Cannot use rollback while Connection is in auto-commit mode.
During handling of the above exception, another exception occurred:
DatabaseError Traceback (most recent call last)
in
5 select * from purchases where customer_id = %s
6 ''',
----> 7 conn, params=[id])
8
C:\ProgramData\Anaconda3\lib\site-packages\pandas\io\sql.py in read_sql_query(sql, con, index_col, coerce_float, params, parse_dates, chunksize)
312 return pandas_sql.read_query(
313 sql, index_col=index_col, params=params, coerce_float=coerce_float,
--> 314 parse_dates=parse_dates, chunksize=chunksize)
315
316
C:\ProgramData\Anaconda3\lib\site-packages\pandas\io\sql.py in read_query(self, sql, index_col, coerce_float, params, parse_dates, chunksize)
1411
1412 args = _convert_params(sql, params)
-> 1413 cursor = self.execute(*args)
1414 columns = [col_desc[0] for col_desc in cursor.description]
1415
C:\ProgramData\Anaconda3\lib\site-packages\pandas\io\sql.py in execute(self, *args, **kwargs)
1384 ex = DatabaseError("Execution failed on sql: %s\n%s\nunable"
1385 " to rollback" % (args[0], exc))
-> 1386 raise_with_traceback(ex)
1387
1388 ex = DatabaseError(
C:\ProgramData\Anaconda3\lib\site-packages\pandas\compat\__init__.py in raise_with_traceback(exc, traceback)
402 if traceback == Ellipsis:
403 _, _, traceback = sys.exc_info()
--> 404 raise exc.with_traceback(traceback)
405 else:
406 # this version of raise is a syntax error in Python 3
C:\ProgramData\Anaconda3\lib\site-packages\pandas\io\sql.py in execute(self, *args, **kwargs)
1380 except Exception as exc:
1381 try:
-> 1382 self.con.rollback()
1383 except Exception: # pragma: no cover
1384 ex = DatabaseError("Execution failed on sql: %s\n%s\nunable"
C:\ProgramData\Anaconda3\lib\site-packages\jaydebeapi\__init__.py in rollback(self)
417 self.jconn.rollback()
418 except:
--> 419 _handle_sql_exception()
420
421 def cursor(self):
C:\ProgramData\Anaconda3\lib\site-packages\jaydebeapi\__init__.py in _handle_sql_exception_jpype()
154 else:
155 exc_type = InterfaceError
--> 156 reraise(exc_type, exc_info[1], exc_info[2])
157
158 def _jdbc_connect_jpype(jclassname, url, driver_args, jars, libs):
C:\ProgramData\Anaconda3\lib\site-packages\jaydebeapi\__init__.py in reraise(tp, value, tb)
55 value = tp(value)
56 if tb:
---> 57 raise value.with_traceback(tb)
58 raise value
59
C:\ProgramData\Anaconda3\lib\site-packages\jaydebeapi\__init__.py in rollback(self)
415 def rollback(self):
416 try:
--> 417 self.jconn.rollback()
418 except:
419 _handle_sql_exception()
DatabaseError: Execution failed on sql:
select * from purchases where customer_id = %s
java.sql.SQLException: [Amazon](500310) Invalid operation: syntax error at or near "%"
Position: 70;
unable to rollback
[39]
id = (261894)
[44]
Any thoughts on how I can get this to work? Do I need an alternative to %s?
try this:
id = (261894)
df = pd.read_sql_query( ''' select * from purchases where customer_id = ? ''', conn, params=[id])
it should work