Related
i have tried to load my dataframe to postgresql and especially to the server postgresql 14( i have 2 servers postgresql 9.3 running on port 5434 , and the other one is postgresql 14 running on port 5433) with this command :
from sqlalchemy import create_engine
engine = create_engine('postgresql://postgres:password#localhost:5433/MYDATABASE')
df.to_sql('My_Table', engine)
this is the error i get , also i've tried with more ways but its always the same error , i guess its related to the two servers i'm using :
---------------------------------------------------------------------------
OperationalError Traceback (most recent call last)
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\engine\base.py:3280, in Engine._wrap_pool_connect(self, fn, connection)
3279 try:
-> 3280 return fn()
3281 except dialect.dbapi.Error as e:
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\pool\base.py:310, in Pool.connect(self)
303 """Return a DBAPI connection from the pool.
304
305 The connection is instrumented such that when its
(...)
308
309 """
--> 310 return _ConnectionFairy._checkout(self)
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\pool\base.py:868, in _ConnectionFairy._checkout(cls, pool, threadconns, fairy)
867 if not fairy:
--> 868 fairy = _ConnectionRecord.checkout(pool)
870 fairy._pool = pool
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\pool\base.py:476, in _ConnectionRecord.checkout(cls, pool)
474 #classmethod
475 def checkout(cls, pool):
--> 476 rec = pool._do_get()
477 try:
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\pool\impl.py:145, in QueuePool._do_get(self)
144 except:
--> 145 with util.safe_reraise():
146 self._dec_overflow()
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\util\langhelpers.py:70, in safe_reraise.__exit__(self, type_, value, traceback)
69 if not self.warn_only:
---> 70 compat.raise_(
71 exc_value,
72 with_traceback=exc_tb,
73 )
74 else:
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\util\compat.py:207, in raise_(***failed resolving arguments***)
206 try:
--> 207 raise exception
208 finally:
209 # credit to
210 # https://cosmicpercolator.com/2016/01/13/exception-leaks-in-python-2-and-3/
211 # as the __traceback__ object creates a cycle
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\pool\impl.py:143, in QueuePool._do_get(self)
142 try:
--> 143 return self._create_connection()
144 except:
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\pool\base.py:256, in Pool._create_connection(self)
254 """Called by subclasses to create a new ConnectionRecord."""
--> 256 return _ConnectionRecord(self)
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\pool\base.py:371, in _ConnectionRecord.__init__(self, pool, connect)
370 if connect:
--> 371 self.__connect()
372 self.finalize_callback = deque()
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\pool\base.py:665, in _ConnectionRecord.__connect(self)
664 except Exception as e:
--> 665 with util.safe_reraise():
666 pool.logger.debug("Error on connect(): %s", e)
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\util\langhelpers.py:70, in safe_reraise.__exit__(self, type_, value, traceback)
69 if not self.warn_only:
---> 70 compat.raise_(
71 exc_value,
72 with_traceback=exc_tb,
73 )
74 else:
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\util\compat.py:207, in raise_(***failed resolving arguments***)
206 try:
--> 207 raise exception
208 finally:
209 # credit to
210 # https://cosmicpercolator.com/2016/01/13/exception-leaks-in-python-2-and-3/
211 # as the __traceback__ object creates a cycle
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\pool\base.py:661, in _ConnectionRecord.__connect(self)
660 self.starttime = time.time()
--> 661 self.dbapi_connection = connection = pool._invoke_creator(self)
662 pool.logger.debug("Created new connection %r", connection)
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\engine\create.py:590, in create_engine.<locals>.connect(connection_record)
589 return connection
--> 590 return dialect.connect(*cargs, **cparams)
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\engine\default.py:597, in DefaultDialect.connect(self, *cargs, **cparams)
595 def connect(self, *cargs, **cparams):
596 # inherits the docstring from interfaces.Dialect.connect
--> 597 return self.dbapi.connect(*cargs, **cparams)
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\psycopg2\__init__.py:122, in connect(dsn, connection_factory, cursor_factory, **kwargs)
121 dsn = _ext.make_dsn(dsn, **kwargs)
--> 122 conn = _connect(dsn, connection_factory=connection_factory, **kwasync)
123 if cursor_factory is not None:
OperationalError:
The above exception was the direct cause of the following exception:
OperationalError Traceback (most recent call last)
Input In [105], in <cell line: 3>()
1 from sqlalchemy import create_engine
2 engine = create_engine('postgresql://postgres:password#localhost:5433/MYDATABASE')
----> 3 df.to_sql('My_Table', engine)
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\pandas\core\generic.py:2951, in NDFrame.to_sql(self, name, con, schema, if_exists, index, index_label, chunksize, dtype, method)
2794 """
2795 Write records stored in a DataFrame to a SQL database.
2796
(...)
2947 [(1,), (None,), (2,)]
2948 """ # noqa:E501
2949 from pandas.io import sql
-> 2951 return sql.to_sql(
2952 self,
2953 name,
2954 con,
2955 schema=schema,
2956 if_exists=if_exists,
2957 index=index,
2958 index_label=index_label,
2959 chunksize=chunksize,
2960 dtype=dtype,
2961 method=method,
2962 )
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\pandas\io\sql.py:697, in to_sql(frame, name, con, schema, if_exists, index, index_label, chunksize, dtype, method, engine, **engine_kwargs)
692 elif not isinstance(frame, DataFrame):
693 raise NotImplementedError(
694 "'frame' argument should be either a Series or a DataFrame"
695 )
--> 697 return pandas_sql.to_sql(
698 frame,
699 name,
700 if_exists=if_exists,
701 index=index,
702 index_label=index_label,
703 schema=schema,
704 chunksize=chunksize,
705 dtype=dtype,
706 method=method,
707 engine=engine,
708 **engine_kwargs,
709 )
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\pandas\io\sql.py:1729, in SQLDatabase.to_sql(self, frame, name, if_exists, index, index_label, schema, chunksize, dtype, method, engine, **engine_kwargs)
1679 """
1680 Write records stored in a DataFrame to a SQL database.
1681
(...)
1725 Any additional kwargs are passed to the engine.
1726 """
1727 sql_engine = get_engine(engine)
-> 1729 table = self.prep_table(
1730 frame=frame,
1731 name=name,
1732 if_exists=if_exists,
1733 index=index,
1734 index_label=index_label,
1735 schema=schema,
1736 dtype=dtype,
1737 )
1739 total_inserted = sql_engine.insert_records(
1740 table=table,
1741 con=self.connectable,
(...)
1748 **engine_kwargs,
1749 )
1751 self.check_case_sensitive(name=name, schema=schema)
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\pandas\io\sql.py:1628, in SQLDatabase.prep_table(self, frame, name, if_exists, index, index_label, schema, dtype)
1616 raise ValueError(f"The type of {col} is not a SQLAlchemy type")
1618 table = SQLTable(
1619 name,
1620 self,
(...)
1626 dtype=dtype,
1627 )
-> 1628 table.create()
1629 return table
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\pandas\io\sql.py:831, in SQLTable.create(self)
830 def create(self):
--> 831 if self.exists():
832 if self.if_exists == "fail":
833 raise ValueError(f"Table '{self.name}' already exists.")
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\pandas\io\sql.py:815, in SQLTable.exists(self)
814 def exists(self):
--> 815 return self.pd_sql.has_table(self.name, self.schema)
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\pandas\io\sql.py:1762, in SQLDatabase.has_table(self, name, schema)
1759 if _gt14():
1760 from sqlalchemy import inspect
-> 1762 insp = inspect(self.connectable)
1763 return insp.has_table(name, schema or self.meta.schema)
1764 else:
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\inspection.py:64, in inspect(subject, raiseerr)
62 if reg is True:
63 return subject
---> 64 ret = reg(subject)
65 if ret is not None:
66 break
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\engine\reflection.py:182, in Inspector._engine_insp(bind)
180 #inspection._inspects(Engine)
181 def _engine_insp(bind):
--> 182 return Inspector._construct(Inspector._init_engine, bind)
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\engine\reflection.py:117, in Inspector._construct(cls, init, bind)
114 cls = bind.dialect.inspector
116 self = cls.__new__(cls)
--> 117 init(self, bind)
118 return self
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\engine\reflection.py:128, in Inspector._init_engine(self, engine)
126 def _init_engine(self, engine):
127 self.bind = self.engine = engine
--> 128 engine.connect().close()
129 self._op_context_requires_connect = True
130 self.dialect = self.engine.dialect
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\engine\base.py:3234, in Engine.connect(self, close_with_result)
3219 def connect(self, close_with_result=False):
3220 """Return a new :class:`_engine.Connection` object.
3221
3222 The :class:`_engine.Connection` object is a facade that uses a DBAPI
(...)
3231
3232 """
-> 3234 return self._connection_cls(self, close_with_result=close_with_result)
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\engine\base.py:96, in Connection.__init__(self, engine, connection, close_with_result, _branch_from, _execution_options, _dispatch, _has_events, _allow_revalidate)
91 self._has_events = _branch_from._has_events
92 else:
93 self._dbapi_connection = (
94 connection
95 if connection is not None
---> 96 else engine.raw_connection()
97 )
99 self._transaction = self._nested_transaction = None
100 self.__savepoint_seq = 0
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\engine\base.py:3313, in Engine.raw_connection(self, _connection)
3291 def raw_connection(self, _connection=None):
3292 """Return a "raw" DBAPI connection from the connection pool.
3293
3294 The returned object is a proxied version of the DBAPI
(...)
3311
3312 """
-> 3313 return self._wrap_pool_connect(self.pool.connect, _connection)
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\engine\base.py:3283, in Engine._wrap_pool_connect(self, fn, connection)
3281 except dialect.dbapi.Error as e:
3282 if connection is None:
-> 3283 Connection._handle_dbapi_exception_noconnection(
3284 e, dialect, self
3285 )
3286 else:
3287 util.raise_(
3288 sys.exc_info()[1], with_traceback=sys.exc_info()[2]
3289 )
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\engine\base.py:2117, in Connection._handle_dbapi_exception_noconnection(cls, e, dialect, engine)
2115 util.raise_(newraise, with_traceback=exc_info[2], from_=e)
2116 elif should_wrap:
-> 2117 util.raise_(
2118 sqlalchemy_exception, with_traceback=exc_info[2], from_=e
2119 )
2120 else:
2121 util.raise_(exc_info[1], with_traceback=exc_info[2])
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\util\compat.py:207, in raise_(***failed resolving arguments***)
204 exception.__cause__ = replace_context
206 try:
--> 207 raise exception
208 finally:
209 # credit to
210 # https://cosmicpercolator.com/2016/01/13/exception-leaks-in-python-2-and-3/
211 # as the __traceback__ object creates a cycle
212 del exception, replace_context, from_, with_traceback
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\engine\base.py:3280, in Engine._wrap_pool_connect(self, fn, connection)
3278 dialect = self.dialect
3279 try:
-> 3280 return fn()
3281 except dialect.dbapi.Error as e:
3282 if connection is None:
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\pool\base.py:310, in Pool.connect(self)
302 def connect(self):
303 """Return a DBAPI connection from the pool.
304
305 The connection is instrumented such that when its
(...)
308
309 """
--> 310 return _ConnectionFairy._checkout(self)
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\pool\base.py:868, in _ConnectionFairy._checkout(cls, pool, threadconns, fairy)
865 #classmethod
866 def _checkout(cls, pool, threadconns=None, fairy=None):
867 if not fairy:
--> 868 fairy = _ConnectionRecord.checkout(pool)
870 fairy._pool = pool
871 fairy._counter = 0
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\pool\base.py:476, in _ConnectionRecord.checkout(cls, pool)
474 #classmethod
475 def checkout(cls, pool):
--> 476 rec = pool._do_get()
477 try:
478 dbapi_connection = rec.get_connection()
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\pool\impl.py:145, in QueuePool._do_get(self)
143 return self._create_connection()
144 except:
--> 145 with util.safe_reraise():
146 self._dec_overflow()
147 else:
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\util\langhelpers.py:70, in safe_reraise.__exit__(self, type_, value, traceback)
68 self._exc_info = None # remove potential circular references
69 if not self.warn_only:
---> 70 compat.raise_(
71 exc_value,
72 with_traceback=exc_tb,
73 )
74 else:
75 if not compat.py3k and self._exc_info and self._exc_info[1]:
76 # emulate Py3K's behavior of telling us when an exception
77 # occurs in an exception handler.
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\util\compat.py:207, in raise_(***failed resolving arguments***)
204 exception.__cause__ = replace_context
206 try:
--> 207 raise exception
208 finally:
209 # credit to
210 # https://cosmicpercolator.com/2016/01/13/exception-leaks-in-python-2-and-3/
211 # as the __traceback__ object creates a cycle
212 del exception, replace_context, from_, with_traceback
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\pool\impl.py:143, in QueuePool._do_get(self)
141 if self._inc_overflow():
142 try:
--> 143 return self._create_connection()
144 except:
145 with util.safe_reraise():
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\pool\base.py:256, in Pool._create_connection(self)
253 def _create_connection(self):
254 """Called by subclasses to create a new ConnectionRecord."""
--> 256 return _ConnectionRecord(self)
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\pool\base.py:371, in _ConnectionRecord.__init__(self, pool, connect)
369 self.__pool = pool
370 if connect:
--> 371 self.__connect()
372 self.finalize_callback = deque()
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\pool\base.py:665, in _ConnectionRecord.__connect(self)
663 self.fresh = True
664 except Exception as e:
--> 665 with util.safe_reraise():
666 pool.logger.debug("Error on connect(): %s", e)
667 else:
668 # in SQLAlchemy 1.4 the first_connect event is not used by
669 # the engine, so this will usually not be set
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\util\langhelpers.py:70, in safe_reraise.__exit__(self, type_, value, traceback)
68 self._exc_info = None # remove potential circular references
69 if not self.warn_only:
---> 70 compat.raise_(
71 exc_value,
72 with_traceback=exc_tb,
73 )
74 else:
75 if not compat.py3k and self._exc_info and self._exc_info[1]:
76 # emulate Py3K's behavior of telling us when an exception
77 # occurs in an exception handler.
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\util\compat.py:207, in raise_(***failed resolving arguments***)
204 exception.__cause__ = replace_context
206 try:
--> 207 raise exception
208 finally:
209 # credit to
210 # https://cosmicpercolator.com/2016/01/13/exception-leaks-in-python-2-and-3/
211 # as the __traceback__ object creates a cycle
212 del exception, replace_context, from_, with_traceback
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\pool\base.py:661, in _ConnectionRecord.__connect(self)
659 try:
660 self.starttime = time.time()
--> 661 self.dbapi_connection = connection = pool._invoke_creator(self)
662 pool.logger.debug("Created new connection %r", connection)
663 self.fresh = True
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\engine\create.py:590, in create_engine.<locals>.connect(connection_record)
588 if connection is not None:
589 return connection
--> 590 return dialect.connect(*cargs, **cparams)
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\sqlalchemy\engine\default.py:597, in DefaultDialect.connect(self, *cargs, **cparams)
595 def connect(self, *cargs, **cparams):
596 # inherits the docstring from interfaces.Dialect.connect
--> 597 return self.dbapi.connect(*cargs, **cparams)
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\psycopg2\__init__.py:122, in connect(dsn, connection_factory, cursor_factory, **kwargs)
119 kwasync['async_'] = kwargs.pop('async_')
121 dsn = _ext.make_dsn(dsn, **kwargs)
--> 122 conn = _connect(dsn, connection_factory=connection_factory, **kwasync)
123 if cursor_factory is not None:
124 conn.cursor_factory = cursor_factory
OperationalError: (psycopg2.OperationalError)
(Background on this error at: https://sqlalche.me/e/14/e3q8)
Using jupyterlab, i receive a KeyError: 'Default' when using plt.tight_layout() in combination with %matplotlib widget. The following code reproduces the issue:
import matplotlib.pyplot as plt
import numpy as np
%matplotlib widget
x=np.linspace(0,10)
y=x**2
plt.plot(x,y)
plt.tight_layout()
The complete error message is the following:
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
~/anaconda3/lib/python3.8/site-packages/matplotlib/backend_bases.py in _wait_cursor_for_draw_cm(self)
3024 try:
-> 3025 self.canvas.set_cursor(tools.Cursors.WAIT)
3026 yield
~/anaconda3/lib/python3.8/site-packages/matplotlib/backends/backend_webagg_core.py in set_cursor(self, cursor)
209 }, cursor=cursor)
--> 210 self.send_event('cursor', cursor=cursor)
211
~/anaconda3/lib/python3.8/site-packages/matplotlib/backends/backend_webagg_core.py in send_event(self, event_type, **kwargs)
391 if self.manager:
--> 392 self.manager._send_event(event_type, **kwargs)
393
~/anaconda3/lib/python3.8/site-packages/matplotlib/backends/backend_webagg_core.py in _send_event(self, event_type, **kwargs)
540 for s in self.web_sockets:
--> 541 s.send_json(payload)
542
~/anaconda3/lib/python3.8/site-packages/ipympl/backend_nbagg.py in send_json(self, content)
180 if content['type'] == 'cursor':
--> 181 self._cursor = cursors_str[content['cursor']]
182
KeyError: 'wait'
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
/tmp/ipykernel_119035/3466922198.py in <module>
7 y=x**2
8 plt.plot(x,y)
----> 9 plt.tight_layout()
~/anaconda3/lib/python3.8/site-packages/matplotlib/pyplot.py in tight_layout(pad, h_pad, w_pad, rect)
2300 #_copy_docstring_and_deprecators(Figure.tight_layout)
2301 def tight_layout(*, pad=1.08, h_pad=None, w_pad=None, rect=None):
-> 2302 return gcf().tight_layout(pad=pad, h_pad=h_pad, w_pad=w_pad, rect=rect)
2303
2304
~/anaconda3/lib/python3.8/site-packages/matplotlib/figure.py in tight_layout(self, pad, h_pad, w_pad, rect)
3186 "compatible with tight_layout, so results "
3187 "might be incorrect.")
-> 3188 renderer = _get_renderer(self)
3189 with getattr(renderer, "_draw_disabled", nullcontext)():
3190 kwargs = get_tight_layout_figure(
~/anaconda3/lib/python3.8/site-packages/matplotlib/backend_bases.py in _get_renderer(figure, print_method)
1542 figure.canvas._get_output_canvas(None, fmt), f"print_{fmt}")
1543 try:
-> 1544 print_method(io.BytesIO())
1545 except Done as exc:
1546 renderer, = figure._cachedRenderer, = exc.args
~/anaconda3/lib/python3.8/site-packages/matplotlib/backend_bases.py in wrapper(*args, **kwargs)
1641 kwargs.pop(arg)
1642
-> 1643 return func(*args, **kwargs)
1644
1645 return wrapper
~/anaconda3/lib/python3.8/site-packages/matplotlib/_api/deprecation.py in wrapper(*inner_args, **inner_kwargs)
410 else deprecation_addendum,
411 **kwargs)
--> 412 return func(*inner_args, **inner_kwargs)
413
414 DECORATORS[wrapper] = decorator
~/anaconda3/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py in print_png(self, filename_or_obj, metadata, pil_kwargs, *args)
538 *metadata*, including the default 'Software' key.
539 """
--> 540 FigureCanvasAgg.draw(self)
541 mpl.image.imsave(
542 filename_or_obj, self.buffer_rgba(), format="png", origin="upper",
~/anaconda3/lib/python3.8/site-packages/matplotlib/backends/backend_agg.py in draw(self)
431 self.renderer = self.get_renderer(cleared=True)
432 # Acquire a lock on the shared font cache.
--> 433 with RendererAgg.lock, \
434 (self.toolbar._wait_cursor_for_draw_cm() if self.toolbar
435 else nullcontext()):
~/anaconda3/lib/python3.8/contextlib.py in __enter__(self)
111 del self.args, self.kwds, self.func
112 try:
--> 113 return next(self.gen)
114 except StopIteration:
115 raise RuntimeError("generator didn't yield") from None
~/anaconda3/lib/python3.8/site-packages/matplotlib/backend_bases.py in _wait_cursor_for_draw_cm(self)
3026 yield
3027 finally:
-> 3028 self.canvas.set_cursor(self._lastCursor)
3029 else:
3030 yield
~/anaconda3/lib/python3.8/site-packages/matplotlib/backends/backend_webagg_core.py in set_cursor(self, cursor)
208 backend_tools.Cursors.RESIZE_VERTICAL: 'ns-resize',
209 }, cursor=cursor)
--> 210 self.send_event('cursor', cursor=cursor)
211
212 def set_image_mode(self, mode):
~/anaconda3/lib/python3.8/site-packages/matplotlib/backends/backend_webagg_core.py in send_event(self, event_type, **kwargs)
390 def send_event(self, event_type, **kwargs):
391 if self.manager:
--> 392 self.manager._send_event(event_type, **kwargs)
393
394
~/anaconda3/lib/python3.8/site-packages/matplotlib/backends/backend_webagg_core.py in _send_event(self, event_type, **kwargs)
539 payload = {'type': event_type, **kwargs}
540 for s in self.web_sockets:
--> 541 s.send_json(payload)
542
543
~/anaconda3/lib/python3.8/site-packages/ipympl/backend_nbagg.py in send_json(self, content)
179 # Change in the widget state?
180 if content['type'] == 'cursor':
--> 181 self._cursor = cursors_str[content['cursor']]
182
183 elif content['type'] == 'message':
KeyError: 'default'
I'm working with a Dask Cluster on GCP. I'm using this code to deploy it:
from dask_cloudprovider.gcp import GCPCluster
from dask.distributed import Client
enviroment_vars = {
'EXTRA_PIP_PACKAGES': '"gcsfs"'
}
cluster = GCPCluster(
n_workers=32,
docker_image='daskdev/dask:2021.2.0',
env_vars=enviroment_vars,
network='my-network',
#filesystem_size=150,
machine_type='e2-standard-16',
projectid='my-project-id',
zone='us-central1-a',
on_host_maintenance="MIGRATE"
client = Client(cluster)
Then I read csv files, with the following code:
import dask.dataframe as dd
import csv
col_dtypes = {
'var1': 'float64',
'var2': 'object',
'var3': 'object',
'var4': 'float64'
}
df = dd.read_csv('gs://my_bucket/files-*.csv', blocksize=None, dtype= col_dtypes)
df = df.persist()
Everything works fine, but when I try to do some queries, or calculation, I get an error. For instance this piece of code:
df.var1.value_counts().compute()
This is the output:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-14-711a7c21ed42> in <module>
----> 1 df.var1.value_counts().compute()
/opt/conda/lib/python3.8/site-packages/dask/base.py in compute(self, **kwargs)
279 dask.base.compute
280 """
--> 281 (result,) = compute(self, traverse=False, **kwargs)
282 return result
283
/opt/conda/lib/python3.8/site-packages/dask/base.py in compute(*args, **kwargs)
561 postcomputes.append(x.__dask_postcompute__())
562
--> 563 results = schedule(dsk, keys, **kwargs)
564 return repack([f(r, *a) for r, (f, a) in zip(results, postcomputes)])
565
/opt/conda/lib/python3.8/site-packages/distributed/client.py in get(self, dsk, keys, workers, allow_other_workers, resources, sync, asynchronous, direct, retries, priority, fifo_timeout, actors, **kwargs)
2653 should_rejoin = False
2654 try:
-> 2655 results = self.gather(packed, asynchronous=asynchronous, direct=direct)
2656 finally:
2657 for f in futures.values():
/opt/conda/lib/python3.8/site-packages/distributed/client.py in gather(self, futures, errors, direct, asynchronous)
1962 else:
1963 local_worker = None
-> 1964 return self.sync(
1965 self._gather,
1966 futures,
/opt/conda/lib/python3.8/site-packages/distributed/client.py in sync(self, func, asynchronous, callback_timeout, *args, **kwargs)
836 return future
837 else:
--> 838 return sync(
839 self.loop, func, *args, callback_timeout=callback_timeout, **kwargs
840 )
/opt/conda/lib/python3.8/site-packages/distributed/utils.py in sync(loop, func, callback_timeout, *args, **kwargs)
338 if error[0]:
339 typ, exc, tb = error[0]
--> 340 raise exc.with_traceback(tb)
341 else:
342 return result[0]
/opt/conda/lib/python3.8/site-packages/distributed/utils.py in f()
322 if callback_timeout is not None:
323 future = asyncio.wait_for(future, callback_timeout)
--> 324 result[0] = yield future
325 except Exception as exc:
326 error[0] = sys.exc_info()
/opt/conda/lib/python3.8/site-packages/tornado/gen.py in run(self)
760
761 try:
--> 762 value = future.result()
763 except Exception:
764 exc_info = sys.exc_info()
/opt/conda/lib/python3.8/site-packages/distributed/client.py in _gather(self, futures, errors, direct, local_worker)
1827 exc = CancelledError(key)
1828 else:
-> 1829 raise exception.with_traceback(traceback)
1830 raise exc
1831 if errors == "skip":
/opt/conda/lib/python3.8/site-packages/dask/optimization.py in __call__()
961 if not len(args) == len(self.inkeys):
962 raise ValueError("Expected %d args, got %d" % (len(self.inkeys), len(args)))
--> 963 return core.get(self.dsk, self.outkey, dict(zip(self.inkeys, args)))
964
965 def __reduce__(self):
/opt/conda/lib/python3.8/site-packages/dask/core.py in get()
149 for key in toposort(dsk):
150 task = dsk[key]
--> 151 result = _execute_task(task, cache)
152 cache[key] = result
153 result = _execute_task(out, cache)
/opt/conda/lib/python3.8/site-packages/dask/core.py in _execute_task()
119 # temporaries by their reference count and can execute certain
120 # operations in-place.
--> 121 return func(*(_execute_task(a, cache) for a in args))
122 elif not ishashable(arg):
123 return arg
/opt/conda/lib/python3.8/site-packages/dask/utils.py in apply()
33 def apply(func, args, kwargs=None):
34 if kwargs:
---> 35 return func(*args, **kwargs)
36 else:
37 return func(*args)
/opt/conda/lib/python3.8/site-packages/dask/dataframe/core.py in apply_and_enforce()
5474 return meta
5475 if is_dataframe_like(df):
-> 5476 check_matching_columns(meta, df)
5477 c = meta.columns
5478 else:
/opt/conda/lib/python3.8/site-packages/dask/dataframe/utils.py in check_matching_columns()
690 def check_matching_columns(meta, actual):
691 # Need nan_to_num otherwise nan comparison gives False
--> 692 if not np.array_equal(np.nan_to_num(meta.columns), np.nan_to_num(actual.columns)):
693 extra = methods.tolist(actual.columns.difference(meta.columns))
694 missing = methods.tolist(meta.columns.difference(actual.columns))
/opt/conda/lib/python3.8/site-packages/pandas/core/generic.py in __getattr__()
5268 or name in self._accessors
5269 ):
-> 5270 return object.__getattribute__(self, name)
5271 else:
5272 if self._info_axis._can_hold_identifiers_and_holds_name(name):
pandas/_libs/properties.pyx in pandas._libs.properties.AxisProperty.__get__()
/opt/conda/lib/python3.8/site-packages/pandas/core/generic.py in __getattr__()
5268 or name in self._accessors
5269 ):
-> 5270 return object.__getattribute__(self, name)
5271 else:
5272 if self._info_axis._can_hold_identifiers_and_holds_name(name):
AttributeError: 'DataFrame' object has no attribute '_data'
The version of Pandas in my docker file is 1.0.1, so I already try upgrading Pandas (to version 1.2.2), but it didn't work, what am I doing wrong?
My guess is that you have a version mismatch somewhere. What does client.get_versions(check=True) say?
I am training a Keras model using custom training loops in TensorFlow, where the weights are updated using gradient tape rather than the model.fit() method. As such, the model is not compiled before training.
After exporting the saved_model, I am able to successfully load it for inference:
model = tf.saved_model.load("path/to/saved_model")
pred_fn = model.signatures["serving_default"]
results = pred_fn(tf.constant(examples))
However, when I try loading it with TFMA using run_model_analysis:
eval_shared_model = tfma.default_eval_shared_model("path/to/saved_model", eval_config=eval_config)
eval_results = tfma.run_model_analysis(
eval_shared_model=eval_shared_model,
data_location=test_tfrecords_path,
file_format="tfrecords"
)
I get the following error:
WARNING:tensorflow:No training configuration found in save file, so the model was *not* compiled. Compile it manually.
-----------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-107-19f51f42014a> in <module>
2 eval_shared_model=eval_shared_model,
3 data_location=test_tfrecords_path,
----> 4 file_format="tfrecords"
5 )
~/.pyenv/versions/miniconda3-4.3.30/envs/tensorflow/lib/python3.7/site-packages/tensorflow_model_analysis/api/model_eval_lib.py in run_model_analysis(eval_shared_model, eval_config, data_location, file_format, output_path, extractors, evaluators, writers, pipeline_options, slice_spec, write_config, compute_confidence_intervals, min_slice_size, random_seed_for_testing, schema)
1200 random_seed_for_testing=random_seed_for_testing,
1201 tensor_adapter_config=tensor_adapter_config,
-> 1202 schema=schema))
1203 # pylint: enable=no-value-for-parameter
1204
~/.pyenv/versions/miniconda3-4.3.30/envs/tensorflow/lib/python3.7/site-packages/apache_beam/pvalue.py in __or__(self, ptransform)
138
139 def __or__(self, ptransform):
--> 140 return self.pipeline.apply(ptransform, self)
141
142
~/.pyenv/versions/miniconda3-4.3.30/envs/tensorflow/lib/python3.7/site-packages/apache_beam/pipeline.py in apply(self, transform, pvalueish, label)
575 if isinstance(transform, ptransform._NamedPTransform):
576 return self.apply(
--> 577 transform.transform, pvalueish, label or transform.label)
578
579 if not isinstance(transform, ptransform.PTransform):
~/.pyenv/versions/miniconda3-4.3.30/envs/tensorflow/lib/python3.7/site-packages/apache_beam/pipeline.py in apply(self, transform, pvalueish, label)
585 try:
586 old_label, transform.label = transform.label, label
--> 587 return self.apply(transform, pvalueish)
588 finally:
589 transform.label = old_label
~/.pyenv/versions/miniconda3-4.3.30/envs/tensorflow/lib/python3.7/site-packages/apache_beam/pipeline.py in apply(self, transform, pvalueish, label)
628 transform.type_check_inputs(pvalueish)
629
--> 630 pvalueish_result = self.runner.apply(transform, pvalueish, self._options)
631
632 if type_options is not None and type_options.pipeline_type_check:
~/.pyenv/versions/miniconda3-4.3.30/envs/tensorflow/lib/python3.7/site-packages/apache_beam/runners/runner.py in apply(self, transform, input, options)
196 m = getattr(self, 'apply_%s' % cls.__name__, None)
197 if m:
--> 198 return m(transform, input, options)
199 raise NotImplementedError(
200 'Execution of [%s] not implemented in runner %s.' % (transform, self))
~/.pyenv/versions/miniconda3-4.3.30/envs/tensorflow/lib/python3.7/site-packages/apache_beam/runners/runner.py in apply_PTransform(self, transform, input, options)
226 def apply_PTransform(self, transform, input, options):
227 # The base case of apply is to call the transform's expand.
--> 228 return transform.expand(input)
229
230 def run_transform(self,
~/.pyenv/versions/miniconda3-4.3.30/envs/tensorflow/lib/python3.7/site-packages/apache_beam/transforms/ptransform.py in expand(self, pcoll)
921 # Might not be a function.
922 pass
--> 923 return self._fn(pcoll, *args, **kwargs)
924
925 def default_label(self):
~/.pyenv/versions/miniconda3-4.3.30/envs/tensorflow/lib/python3.7/site-packages/tensorflow_model_analysis/api/model_eval_lib.py in ExtractEvaluateAndWriteResults(examples, eval_shared_model, eval_config, extractors, evaluators, writers, output_path, display_only_data_location, display_only_file_format, slice_spec, write_config, compute_confidence_intervals, min_slice_size, random_seed_for_testing, tensor_adapter_config, schema)
1079 | 'ExtractAndEvaluate' >> ExtractAndEvaluate(
1080 extractors=extractors, evaluators=evaluators)
-> 1081 | 'WriteResults' >> WriteResults(writers=writers))
1082
1083 return beam.pvalue.PDone(examples.pipeline)
~/.pyenv/versions/miniconda3-4.3.30/envs/tensorflow/lib/python3.7/site-packages/apache_beam/pvalue.py in __or__(self, ptransform)
138
139 def __or__(self, ptransform):
--> 140 return self.pipeline.apply(ptransform, self)
141
142
~/.pyenv/versions/miniconda3-4.3.30/envs/tensorflow/lib/python3.7/site-packages/apache_beam/pipeline.py in apply(self, transform, pvalueish, label)
575 if isinstance(transform, ptransform._NamedPTransform):
576 return self.apply(
--> 577 transform.transform, pvalueish, label or transform.label)
578
579 if not isinstance(transform, ptransform.PTransform):
~/.pyenv/versions/miniconda3-4.3.30/envs/tensorflow/lib/python3.7/site-packages/apache_beam/pipeline.py in apply(self, transform, pvalueish, label)
585 try:
586 old_label, transform.label = transform.label, label
--> 587 return self.apply(transform, pvalueish)
588 finally:
589 transform.label = old_label
~/.pyenv/versions/miniconda3-4.3.30/envs/tensorflow/lib/python3.7/site-packages/apache_beam/pipeline.py in apply(self, transform, pvalueish, label)
628 transform.type_check_inputs(pvalueish)
629
--> 630 pvalueish_result = self.runner.apply(transform, pvalueish, self._options)
631
632 if type_options is not None and type_options.pipeline_type_check:
~/.pyenv/versions/miniconda3-4.3.30/envs/tensorflow/lib/python3.7/site-packages/apache_beam/runners/runner.py in apply(self, transform, input, options)
196 m = getattr(self, 'apply_%s' % cls.__name__, None)
197 if m:
--> 198 return m(transform, input, options)
199 raise NotImplementedError(
200 'Execution of [%s] not implemented in runner %s.' % (transform, self))
~/.pyenv/versions/miniconda3-4.3.30/envs/tensorflow/lib/python3.7/site-packages/apache_beam/runners/runner.py in apply_PTransform(self, transform, input, options)
226 def apply_PTransform(self, transform, input, options):
227 # The base case of apply is to call the transform's expand.
--> 228 return transform.expand(input)
229
230 def run_transform(self,
~/.pyenv/versions/miniconda3-4.3.30/envs/tensorflow/lib/python3.7/site-packages/apache_beam/transforms/ptransform.py in expand(self, pcoll)
921 # Might not be a function.
922 pass
--> 923 return self._fn(pcoll, *args, **kwargs)
924
925 def default_label(self):
~/.pyenv/versions/miniconda3-4.3.30/envs/tensorflow/lib/python3.7/site-packages/tensorflow_model_analysis/api/model_eval_lib.py in ExtractAndEvaluate(extracts, extractors, evaluators)
818 for v in evaluators:
819 if v.run_after == x.stage_name:
--> 820 update(evaluation, extracts | v.stage_name >> v.ptransform)
821 for v in evaluators:
822 if v.run_after == extractor.LAST_EXTRACTOR_STAGE_NAME:
~/.pyenv/versions/miniconda3-4.3.30/envs/tensorflow/lib/python3.7/site-packages/apache_beam/pvalue.py in __or__(self, ptransform)
138
139 def __or__(self, ptransform):
--> 140 return self.pipeline.apply(ptransform, self)
141
142
~/.pyenv/versions/miniconda3-4.3.30/envs/tensorflow/lib/python3.7/site-packages/apache_beam/pipeline.py in apply(self, transform, pvalueish, label)
575 if isinstance(transform, ptransform._NamedPTransform):
576 return self.apply(
--> 577 transform.transform, pvalueish, label or transform.label)
578
579 if not isinstance(transform, ptransform.PTransform):
~/.pyenv/versions/miniconda3-4.3.30/envs/tensorflow/lib/python3.7/site-packages/apache_beam/pipeline.py in apply(self, transform, pvalueish, label)
585 try:
586 old_label, transform.label = transform.label, label
--> 587 return self.apply(transform, pvalueish)
588 finally:
589 transform.label = old_label
~/.pyenv/versions/miniconda3-4.3.30/envs/tensorflow/lib/python3.7/site-packages/apache_beam/pipeline.py in apply(self, transform, pvalueish, label)
628 transform.type_check_inputs(pvalueish)
629
--> 630 pvalueish_result = self.runner.apply(transform, pvalueish, self._options)
631
632 if type_options is not None and type_options.pipeline_type_check:
~/.pyenv/versions/miniconda3-4.3.30/envs/tensorflow/lib/python3.7/site-packages/apache_beam/runners/runner.py in apply(self, transform, input, options)
196 m = getattr(self, 'apply_%s' % cls.__name__, None)
197 if m:
--> 198 return m(transform, input, options)
199 raise NotImplementedError(
200 'Execution of [%s] not implemented in runner %s.' % (transform, self))
~/.pyenv/versions/miniconda3-4.3.30/envs/tensorflow/lib/python3.7/site-packages/apache_beam/runners/runner.py in apply_PTransform(self, transform, input, options)
226 def apply_PTransform(self, transform, input, options):
227 # The base case of apply is to call the transform's expand.
--> 228 return transform.expand(input)
229
230 def run_transform(self,
~/.pyenv/versions/miniconda3-4.3.30/envs/tensorflow/lib/python3.7/site-packages/apache_beam/transforms/ptransform.py in expand(self, pcoll)
921 # Might not be a function.
922 pass
--> 923 return self._fn(pcoll, *args, **kwargs)
924
925 def default_label(self):
~/.pyenv/versions/miniconda3-4.3.30/envs/tensorflow/lib/python3.7/site-packages/tensorflow_model_analysis/evaluators/metrics_and_plots_evaluator_v2.py in _EvaluateMetricsAndPlots(extracts, eval_config, eval_shared_models, metrics_key, plots_key, validations_key, schema, random_seed_for_testing)
757 plots_key=plots_key,
758 schema=schema,
--> 759 random_seed_for_testing=random_seed_for_testing))
760
761 for k, v in evaluation.items():
~/.pyenv/versions/miniconda3-4.3.30/envs/tensorflow/lib/python3.7/site-packages/apache_beam/pvalue.py in __or__(self, ptransform)
138
139 def __or__(self, ptransform):
--> 140 return self.pipeline.apply(ptransform, self)
141
142
~/.pyenv/versions/miniconda3-4.3.30/envs/tensorflow/lib/python3.7/site-packages/apache_beam/pipeline.py in apply(self, transform, pvalueish, label)
575 if isinstance(transform, ptransform._NamedPTransform):
576 return self.apply(
--> 577 transform.transform, pvalueish, label or transform.label)
578
579 if not isinstance(transform, ptransform.PTransform):
~/.pyenv/versions/miniconda3-4.3.30/envs/tensorflow/lib/python3.7/site-packages/apache_beam/pipeline.py in apply(self, transform, pvalueish, label)
585 try:
586 old_label, transform.label = transform.label, label
--> 587 return self.apply(transform, pvalueish)
588 finally:
589 transform.label = old_label
~/.pyenv/versions/miniconda3-4.3.30/envs/tensorflow/lib/python3.7/site-packages/apache_beam/pipeline.py in apply(self, transform, pvalueish, label)
628 transform.type_check_inputs(pvalueish)
629
--> 630 pvalueish_result = self.runner.apply(transform, pvalueish, self._options)
631
632 if type_options is not None and type_options.pipeline_type_check:
~/.pyenv/versions/miniconda3-4.3.30/envs/tensorflow/lib/python3.7/site-packages/apache_beam/runners/runner.py in apply(self, transform, input, options)
196 m = getattr(self, 'apply_%s' % cls.__name__, None)
197 if m:
--> 198 return m(transform, input, options)
199 raise NotImplementedError(
200 'Execution of [%s] not implemented in runner %s.' % (transform, self))
~/.pyenv/versions/miniconda3-4.3.30/envs/tensorflow/lib/python3.7/site-packages/apache_beam/runners/runner.py in apply_PTransform(self, transform, input, options)
226 def apply_PTransform(self, transform, input, options):
227 # The base case of apply is to call the transform's expand.
--> 228 return transform.expand(input)
229
230 def run_transform(self,
~/.pyenv/versions/miniconda3-4.3.30/envs/tensorflow/lib/python3.7/site-packages/apache_beam/transforms/ptransform.py in expand(self, pcoll)
921 # Might not be a function.
922 pass
--> 923 return self._fn(pcoll, *args, **kwargs)
924
925 def default_label(self):
~/.pyenv/versions/miniconda3-4.3.30/envs/tensorflow/lib/python3.7/site-packages/tensorflow_model_analysis/evaluators/metrics_and_plots_evaluator_v2.py in _ComputeMetricsAndPlots(extracts, eval_config, metrics_specs, eval_shared_models, metrics_key, plots_key, schema, random_seed_for_testing)
582 if eval_shared_model.model_type == constants.TF_KERAS:
583 keras_specs = keras_util.metrics_specs_from_keras(
--> 584 model_name, eval_shared_model.model_loader)
585 metrics_specs = keras_specs + metrics_specs[:]
586 # TODO(mdreves): Add support for calling keras.evaluate().
~/.pyenv/versions/miniconda3-4.3.30/envs/tensorflow/lib/python3.7/site-packages/tensorflow_model_analysis/evaluators/keras_util.py in metrics_specs_from_keras(model_name, model_loader)
60 # y_true, y_pred as inputs so it can't be calculated via standard inputs so
61 # we remove it.
---> 62 metrics.extend(model.compiled_loss.metrics[1:])
63 metrics.extend(model.compiled_metrics.metrics)
64 metric_names = [m.name for m in metrics]
AttributeError: 'NoneType' object has no attribute 'metrics'
I suspect this might be because I am not compiling the Keras model before exporting it. Does TFMA only support compiled models?
I am using tensorflow==2.3.0 and tensorflow-model-analysis==0.22.1
Yes, your understanding is correct i.e., it is resulting in error because you are not compiling and consequently, not adding the METRICS.
It is evident from the statement specified in the Tensorflow Model Analysis Documentation as well, which is mentioned below.
Note: Only training time metrics added via model.compile (not
model.add_metric) are currently supported for keras.
The following starter code errors out (AttributeError: incompatible shape for a non-contiguous array) when I try to use networkx on OSX 10.8.2, in an IPython notebook.
import networkx as nx
g = nx.Graph()
g.add_node(1)
g.add_node(2)
g.add_edge(1,2) # no error if this line is omitted
nx.draw(g)
I do not see the error if I do not add the edge to this graph. Matplotlib, Numpy, and Scipy were all installed as suggested here.
Below is the error message:
AttributeError Traceback (most recent call last)
<ipython-input-33-df7dfeff6452> in <module>()
----> 1 nx.draw(g)
/usr/local/lib/python2.7/site-packages/networkx/drawing/nx_pylab.pyc in draw(G, pos, ax, hold, **kwds)
131 pylab.hold(h)
132 try:
--> 133 draw_networkx(G,pos=pos,ax=ax,**kwds)
134 ax.set_axis_off()
135 pylab.draw_if_interactive()
/usr/local/lib/python2.7/site-packages/networkx/drawing/nx_pylab.pyc in draw_networkx(G, pos, with_labels, **kwds)
265
266 node_collection=draw_networkx_nodes(G, pos, **kwds)
--> 267 edge_collection=draw_networkx_edges(G, pos, **kwds)
268 if with_labels:
269 draw_networkx_labels(G, pos, **kwds)
/usr/local/lib/python2.7/site-packages/networkx/drawing/nx_pylab.pyc in draw_networkx_edges(G, pos, edgelist, width, edge_color, style, alpha, edge_cmap, edge_vmin, edge_vmax, ax, arrows, label, **kwds)
544 edge_collection.set_zorder(1) # edges go behind nodes
545 edge_collection.set_label(label)
--> 546 ax.add_collection(edge_collection)
547
548 # Note: there was a bug in mpl regarding the handling of alpha values for
/usr/local/lib/python2.7/site-packages/matplotlib/axes.pyc in add_collection(self, collection, autolim)
1443 if autolim:
1444 if collection._paths and len(collection._paths):
-> 1445 self.update_datalim(collection.get_datalim(self.transData))
1446
1447 collection._remove_method = lambda h: self.collections.remove(h)
/usr/local/lib/python2.7/site-packages/matplotlib/collections.pyc in get_datalim(self, transData)
165 offsets = offsets.filled(np.nan)
166 # get_path_collection_extents handles nan but not masked arrays
--> 167 offsets.shape = (-1, 2) # Make it Nx2
168
169 result = mpath.get_path_collection_extents(
AttributeError: incompatible shape for a non-contiguous array
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
/usr/local/lib/python2.7/site-packages/IPython/zmq/pylab/backend_inline.pyc in show(close)
100 try:
101 for figure_manager in Gcf.get_all_fig_managers():
--> 102 send_figure(figure_manager.canvas.figure)
103 finally:
104 show._to_draw = []
/usr/local/lib/python2.7/site-packages/IPython/zmq/pylab/backend_inline.pyc in send_figure(fig)
209 """
210 fmt = InlineBackend.instance().figure_format
--> 211 data = print_figure(fig, fmt)
212 # print_figure will return None if there's nothing to draw:
213 if data is None:
/usr/local/lib/python2.7/site-packages/IPython/core/pylabtools.pyc in print_figure(fig, fmt)
102 try:
103 bytes_io = BytesIO()
--> 104 fig.canvas.print_figure(bytes_io, format=fmt, bbox_inches='tight')
105 data = bytes_io.getvalue()
106 finally:
/usr/local/lib/python2.7/site-packages/matplotlib/backend_bases.pyc in print_figure(self, filename, dpi, facecolor, edgecolor, orientation, format, **kwargs)
1981 orientation=orientation,
1982 dryrun=True,
-> 1983 **kwargs)
1984 renderer = self.figure._cachedRenderer
1985 bbox_inches = self.figure.get_tightbbox(renderer)
/usr/local/lib/python2.7/site-packages/matplotlib/backends/backend_agg.pyc in print_png(self, filename_or_obj, *args, **kwargs)
467
468 def print_png(self, filename_or_obj, *args, **kwargs):
--> 469 FigureCanvasAgg.draw(self)
470 renderer = self.get_renderer()
471 original_dpi = renderer.dpi
/usr/local/lib/python2.7/site-packages/matplotlib/backends/backend_agg.pyc in draw(self)
419
420 try:
--> 421 self.figure.draw(self.renderer)
422 finally:
423 RendererAgg.lock.release()
/usr/local/lib/python2.7/site-packages/matplotlib/artist.pyc in draw_wrapper(artist, renderer, *args, **kwargs)
53 def draw_wrapper(artist, renderer, *args, **kwargs):
54 before(artist, renderer)
---> 55 draw(artist, renderer, *args, **kwargs)
56 after(artist, renderer)
57
/usr/local/lib/python2.7/site-packages/matplotlib/figure.pyc in draw(self, renderer)
896 dsu.sort(key=itemgetter(0))
897 for zorder, a, func, args in dsu:
--> 898 func(*args)
899
900 renderer.close_group('figure')
/usr/local/lib/python2.7/site-packages/matplotlib/artist.pyc in draw_wrapper(artist, renderer, *args, **kwargs)
53 def draw_wrapper(artist, renderer, *args, **kwargs):
54 before(artist, renderer)
---> 55 draw(artist, renderer, *args, **kwargs)
56 after(artist, renderer)
57
/usr/local/lib/python2.7/site-packages/matplotlib/axes.pyc in draw(self, renderer, inframe)
1995
1996 for zorder, a in dsu:
-> 1997 a.draw(renderer)
1998
1999 renderer.close_group('axes')
/usr/local/lib/python2.7/site-packages/matplotlib/artist.pyc in draw_wrapper(artist, renderer, *args, **kwargs)
53 def draw_wrapper(artist, renderer, *args, **kwargs):
54 before(artist, renderer)
---> 55 draw(artist, renderer, *args, **kwargs)
56 after(artist, renderer)
57
/usr/local/lib/python2.7/site-packages/matplotlib/collections.pyc in draw(self, renderer)
227 self.update_scalarmappable()
228
--> 229 transform, transOffset, offsets, paths = self._prepare_points()
230
231 gc = renderer.new_gc()
/usr/local/lib/python2.7/site-packages/matplotlib/collections.pyc in _prepare_points(self)
203
204 offsets = np.asanyarray(offsets, np.float_)
--> 205 offsets.shape = (-1, 2) # Make it Nx2
206
207 if not transform.is_affine:
AttributeError: incompatible shape for a non-contiguous array
This was reported earlier in Draw a graph with edges using Networkx. It likely is a bug/feature in the NumPy library development version as mentioned above (numpy issue 2700).