Undefined symbol when importing Sequential - tensorflow

I get error once I want to ‍import Sequential from keras.models; how to deal with it?
from keras.models import Sequential
-----
ImportError Traceback (most recent call last)
Input In [26], in <cell line: 1>()
----> 1 from keras.models import Sequential
2 from keras.layers import Dense
File ~/.local/lib/python3.8/site-packages/keras/__init__.py:24, in <module>
21 from tensorflow.python import tf2
22 from keras import distribute
---> 24 from keras import models
26 from keras.engine.input_layer import Input
27 from keras.engine.sequential import Sequential
File ~/.local/lib/python3.8/site-packages/keras/models/__init__.py:18, in <module>
15 """Keras models API."""
16 # pylint: disable=g-bad-import-order
---> 18 from keras.engine.functional import Functional
19 from keras.engine.sequential import Sequential
20 from keras.engine.training import Model
File ~/.local/lib/python3.8/site-packages/keras/engine/functional.py:24, in <module>
22 import warnings
23 from keras import backend
---> 24 from keras.dtensor import layout_map as layout_map_lib
25 from keras.engine import base_layer
26 from keras.engine import base_layer_utils
File ~/.local/lib/python3.8/site-packages/keras/dtensor/__init__.py:22, in <module>
20 # Conditional import the dtensor API, since it is currently broken in OSS.
21 if _DTENSOR_API_ENABLED:
---> 22 from tensorflow.compat.v2.experimental import dtensor as dtensor_api # pylint: disable=g-import-not-at-top
23 else:
24 # Leave it with a placeholder, so that the import line from other python file
25 # will not break.
26 dtensor_api = None
File ~/.local/lib/python3.8/site-packages/tensorflow/_api/v2/compat/v2/experimental/dtensor/__init__.py:8, in <module>
3 """Public API for tf.experimental.dtensor namespace.
4 """
6 import sys as _sys
----> 8 from tensorflow.dtensor.python.api import call_with_layout
9 from tensorflow.dtensor.python.api import check_layout
10 from tensorflow.dtensor.python.api import client_id
File ~/.local/lib/python3.8/site-packages/tensorflow/dtensor/python/__init__.py:19, in <module>
15 """DTensor Python API."""
17 from tensorflow.dtensor.python import gen_dtensor_ops as ops
---> 19 from tensorflow.dtensor.python.api import call_with_layout
20 from tensorflow.dtensor.python.api import check_layout
21 from tensorflow.dtensor.python.api import client_id
File ~/.local/lib/python3.8/site-packages/tensorflow/dtensor/python/api.py:22, in <module>
19 import threading
20 from typing import Any, Callable, List, Optional, Sequence, Union
---> 22 from tensorflow.dtensor.python import dtensor_device
23 from tensorflow.dtensor.python import gen_dtensor_ops
24 from tensorflow.dtensor.python import layout as layout_lib
File ~/.local/lib/python3.8/site-packages/tensorflow/dtensor/python/dtensor_device.py:29, in <module>
27 from tensorflow.dtensor.python import gen_dtensor_ops
28 from tensorflow.dtensor.python import layout as layout_lib
---> 29 from tensorflow.python import _pywrap_dtensor_device
30 from tensorflow.python.eager import context
31 from tensorflow.python.eager import core
ImportError: /home/saifullah/.local/lib/python3.8/site-packages/tensorflow/python/_pywrap_dtensor_device.so: undefined symbol: _ZN10tensorflow7dtensor4PackEP11TFE_ContextiPP16TFE_TensorHandleRKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEPvP9TF_Status, version tensorflow

Related

AttributeError: module 'object_detection.metrics.coco_evaluation' has no attribute 'CocoKeypointEvaluator'

Folks, I'm trying to run this model_main_tf2.py and I'm getting the below error.
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
File ~/Documents/Indian_License_Plate_Recognition/models/research/object_detection/model_main_tf2.py:31
29 from absl import flags
30 import tensorflow.compat.v2 as tf
---> 31 from object_detection import model_lib_v2
33 flags.DEFINE_string('pipeline_config_path', None, 'Path to pipeline config '
34 'file.')
35 flags.DEFINE_integer('num_train_steps', None, 'Number of train steps.')
File ~/anaconda3/envs/tfsetup/lib/python3.10/site-packages/object_detection/model_lib_v2.py:29
26 import numpy as np
27 import tensorflow.compat.v1 as tf
---> 29 from object_detection import eval_util
30 from object_detection import inputs
31 from object_detection import model_lib
File ~/anaconda3/envs/tfsetup/lib/python3.10/site-packages/object_detection/eval_util.py:53
44 EVAL_KEYPOINT_METRIC = 'coco_keypoint_metrics'
46 # A dictionary of metric names to classes that implement the metric. The classes
47 # in the dictionary must implement
48 # utils.object_detection_evaluation.DetectionEvaluator interface.
49 EVAL_METRICS_CLASS_DICT = {
50 'coco_detection_metrics':
51 coco_evaluation.CocoDetectionEvaluator,
52 'coco_keypoint_metrics':
---> 53 coco_evaluation.CocoKeypointEvaluator,
54 'coco_mask_metrics':
55 coco_evaluation.CocoMaskEvaluator,
56 'coco_panoptic_metrics':
57 coco_evaluation.CocoPanopticSegmentationEvaluator,
58 'lvis_mask_metrics':
59 lvis_evaluation.LVISMaskEvaluator,
60 'oid_challenge_detection_metrics':
61 object_detection_evaluation.OpenImagesDetectionChallengeEvaluator,
62 'oid_challenge_segmentation_metrics':
63 object_detection_evaluation
64 .OpenImagesInstanceSegmentationChallengeEvaluator,
65 'pascal_voc_detection_metrics':
66 object_detection_evaluation.PascalDetectionEvaluator,
67 'weighted_pascal_voc_detection_metrics':
68 object_detection_evaluation.WeightedPascalDetectionEvaluator,
69 'precision_at_recall_detection_metrics':
70 object_detection_evaluation.PrecisionAtRecallDetectionEvaluator,
71 'pascal_voc_instance_segmentation_metrics':
72 object_detection_evaluation.PascalInstanceSegmentationEvaluator,
73 'weighted_pascal_voc_instance_segmentation_metrics':
74 object_detection_evaluation.WeightedPascalInstanceSegmentationEvaluator,
75 'oid_V2_detection_metrics':
76 object_detection_evaluation.OpenImagesDetectionEvaluator,
77 }
79 EVAL_DEFAULT_METRIC = 'coco_detection_metrics'
82 def write_metrics(metrics, global_step, summary_dir):
AttributeError: module 'object_detection.metrics.coco_evaluation' has no attribute 'CocoKeypointEvaluator'
When I check my coco_evaluation.py file in my local, I'm able to find this class CocoKeypointEvaluator. Can someone please help me on how I can resolve this issue.
I tried re-installing object detection api, tensorflow and pycocotools, but nothing helps.

Truth value of a Dataframe is ambiguous. Use a.empty, a.bool(), a.item(), a.any() or a.all()

I've tried to do pairplot by seaborn with my csv data (this link) by follow code according to seaborn site.
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
freq_data = pd.read_csv('C:\\Users\\frequency.csv')
freq = sns.load_dataset(freq_data)
df = sns.pairplot(iris, hue="condition", height=2.5)
plt.show()
the results show the trackback of ambiguous of dataframe
Traceback (most recent call last):
File "\.vscode\test.py", line 8, in <module>
freq = sns.load_dataset(freq_data)
File "\site-packages\seaborn\utils.py", line 485, in load_dataset
if name not in get_dataset_names():
File "\site-packages\pandas\core\generic.py", line 1441, in __nonzero__
raise ValueError(
ValueError: The truth value of a DataFrame is ambiguous. Use a.empty, a.bool(), a.item(), a.any() or a.all().
I've checked my data and result it here
condition area sphericity aspect_ratio
0 20 kHz 0.249 0.287 1.376
1 20 kHz 0.954 0.721 1.421
2 20 kHz 0.118 0.260 1.409
3 20 kHz 0.540 0.552 1.526
4 20 kHz 0.448 0.465 1.160
.. ... ... ... ...
310 30 kHz 6.056 0.955 2.029
311 30 kHz 4.115 1.097 1.398
312 30 kHz 11.055 1.816 1.838
313 30 kHz 4.360 1.183 1.162
314 30 kHz 10.596 0.940 1.715
[315 rows x 4 columns]
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 315 entries, 0 to 314
Data columns (total 4 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 condition 315 non-null object
1 area 315 non-null float64
2 sphericity 315 non-null float64
3 aspect_ratio 315 non-null float64
dtypes: float64(3), object(1)
memory usage: 10.0+ KB
I have no ideas what happen with my dataframe :(
Please advice me to solve these problem
Thank you everyone
The first argument of seaborn.load_dataset() is name of the dataset ({name}.csv on https://github.com/mwaskom/seaborn-data) not a pandas.DataFrame object. The return value of seaborn.load_dataset() is just pandas.DataFrame, so you don't need to do
freq = sns.load_dataset(freq_data)
Moreover, you may want freq_data rather than iris in df = sns.pairplot(iris, hue="condition", height=2.5).
Here is the final example code
from io import StringIO
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
TESTDATA = StringIO("""condition;area;sphericity;aspect_ratio
20 kHz;0.249;0.287;1.376
20 kHz;0.954;0.721;1.421
20 kHz;0.118;0.260;1.409
20 kHz;0.540;0.552;1.526
20 kHz;0.448;0.465;1.160
30 kHz;6.056;0.955;2.029
30 kHz;4.115;1.097;1.398
30 kHz;11.055;1.816;1.838
30 kHz;4.360;1.183;1.162
30 kHz;10.596;0.940;1.715
""")
freq_data = pd.read_csv(TESTDATA, sep=";")
sns.pairplot(freq_data, hue="condition", height=2.5)
plt.show()

TypeError: '<=' not supported between instances of 'Timestamp' and 'numpy.float64'

I am trying to plot using hvplot, and I am getting this:
TypeError: '<=' not supported between instances of 'Timestamp' and 'numpy.float64'
Here is my data:
TimeConv Hospitalizations
1 2020-04-04 827
2 2020-04-05 1132
3 2020-04-06 1153
4 2020-04-07 1252
5 2020-04-08 1491
... ... ...
71 2020-06-13 2242
72 2020-06-14 2287
73 2020-06-15 2326
74 NaT NaN
75 NaT NaN
Below is my code:
import numpy as np
import matplotlib.pyplot as plt
import xlsxwriter
import pandas as pd
from pandas import DataFrame
path = ('Casecountdata.xlsx')
xl = pd.ExcelFile(path)
df1 = xl.parse('Hospitalization by Day')
df2 = df1[['Unnamed: 1','Unnamed: 2']]
df2 = df2.drop(df2.index[0])
df2 = df2.rename(columns={"Unnamed: 1": "Time", "Unnamed: 2": "Hospitalizations"})
df2['TimeConv'] = pd.to_datetime(df2.Time)
df3 = df2[['TimeConv','Hospitalizations']]
When I take a sample of your data above and try to plot it, it works for me, so there might be something wrong in the way you read your data from excel to pandas. You can try to do df.info() to see what the datatypes of your data look like. Column TimeConv should be datetime64[ns] and column Hospitalizations should be int64 (or float). Could also be a version problem... do you have the latest versions of hvplot etc installed? But my guess is, your data doesn't look right.
In any case, when I run the following, it works and plots your data:
# import libraries
import pandas as pd
import hvplot.pandas
import holoviews as hv
hv.extension('bokeh')
from io import StringIO # need this to read your text data
# your sample data
text_data = StringIO("""
column1 TimeConv Hospitalizations
1 2020-04-04 827
2 2020-04-05 1132
72 2020-06-14 2287
73 2020-06-15 2326
74 NaT NaN
""")
# read text data to dataframe
df = pd.read_csv(text_data, sep="\s+")
df['TimeConv'] = pd.to_datetime(df.TimeConv, yearfirst=True)
# shortly checkout datatypes of your data
df.info()
# create scatter plot of your data
df.hvplot.scatter(
x='TimeConv',
y='Hospitalizations',
width=500,
title='Showing hospitalizations over time',
)
This code results in the following plot:

ImportError: DLL load failed when importing sklearn in Jupyter with Anaconda

I updated Anaconda, and since then I can't import sklearn in my Jupyter Notebook.
Here is my traceback:
---------------------------------------------------------------------------
ImportError Traceback (most recent call last)
<ipython-input-2-d20256e4506d> in <module>
4
5 # Scikit-Learn ≥0.20 is required
----> 6 import sklearn
7 # assert sklearn.__version__ >= "0.20"
8
~\Anaconda3\lib\site-packages\sklearn\__init__.py in <module>
80 from . import _distributor_init # noqa: F401
81 from . import __check_build # noqa: F401
---> 82 from .base import clone
83 from .utils._show_versions import show_versions
84
~\Anaconda3\lib\site-packages\sklearn\base.py in <module>
18
19 from . import __version__
---> 20 from .utils import _IS_32BIT
21
22 _DEFAULT_TAGS = {
~\Anaconda3\lib\site-packages\sklearn\utils\__init__.py in <module>
25 from ..exceptions import DataConversionWarning
26 from .deprecation import deprecated
---> 27 from .fixes import np_version
28 from .validation import (as_float_array,
29 assert_all_finite,
~\Anaconda3\lib\site-packages\sklearn\utils\fixes.py in <module>
16 import scipy.sparse as sp
17 import scipy
---> 18 import scipy.stats
19 from scipy.sparse.linalg import lsqr as sparse_lsqr # noqa
20
~\Anaconda3\lib\site-packages\scipy\stats\__init__.py in <module>
382 from __future__ import division, print_function, absolute_import
383
--> 384 from .stats import *
385 from .distributions import *
386 from .morestats import *
~\Anaconda3\lib\site-packages\scipy\stats\stats.py in <module>
183 import scipy.special as special
184 from scipy import linalg
--> 185 from . import distributions
186 from . import mstats_basic
187 from ._stats_mstats_common import (_find_repeats, linregress, theilslopes,
~\Anaconda3\lib\site-packages\scipy\stats\distributions.py in <module>
8 from __future__ import division, print_function, absolute_import
9
---> 10 from ._distn_infrastructure import (entropy, rv_discrete, rv_continuous,
11 rv_frozen)
12
~\Anaconda3\lib\site-packages\scipy\stats\_distn_infrastructure.py in <module>
23
24 # for root finding for discrete distribution ppf, and max likelihood estimation
---> 25 from scipy import optimize
26
27 # for functions of continuous distributions (e.g. moments, entropy, cdf)
~\Anaconda3\lib\site-packages\scipy\optimize\__init__.py in <module>
388
389 from .optimize import *
--> 390 from ._minimize import *
391 from ._root import *
392 from ._root_scalar import *
~\Anaconda3\lib\site-packages\scipy\optimize\_minimize.py in <module>
28 from ._trustregion_krylov import _minimize_trust_krylov
29 from ._trustregion_exact import _minimize_trustregion_exact
---> 30 from ._trustregion_constr import _minimize_trustregion_constr
31
32 # constrained minimization
~\Anaconda3\lib\site-packages\scipy\optimize\_trustregion_constr\__init__.py in <module>
2
3
----> 4 from .minimize_trustregion_constr import _minimize_trustregion_constr
5
6 __all__ = ['_minimize_trustregion_constr']
~\Anaconda3\lib\site-packages\scipy\optimize\_trustregion_constr\minimize_trustregion_constr.py in <module>
2 import time
3 import numpy as np
----> 4 from scipy.sparse.linalg import LinearOperator
5 from .._differentiable_functions import VectorFunction
6 from .._constraints import (
~\Anaconda3\lib\site-packages\scipy\sparse\linalg\__init__.py in <module>
114 from .dsolve import *
115 from .interface import *
--> 116 from .eigen import *
117 from .matfuncs import *
118 from ._onenormest import *
~\Anaconda3\lib\site-packages\scipy\sparse\linalg\eigen\__init__.py in <module>
9 from __future__ import division, print_function, absolute_import
10
---> 11 from .arpack import *
12 from .lobpcg import *
13
~\Anaconda3\lib\site-packages\scipy\sparse\linalg\eigen\arpack\__init__.py in <module>
20 from __future__ import division, print_function, absolute_import
21
---> 22 from .arpack import *
~\Anaconda3\lib\site-packages\scipy\sparse\linalg\eigen\arpack\arpack.py in <module>
43 __all__ = ['eigs', 'eigsh', 'svds', 'ArpackError', 'ArpackNoConvergence']
44
---> 45 from . import _arpack
46 import numpy as np
47 import warnings
ImportError: DLL load failed: La procédure spécifiée est introuvable.
I have found several threads about this problem but none of them works for me.
Apparently there's a problem with arpack in my case. I already tried to uninstal/reinstall numpy, scipy, sklearn, mkl. Nothing is working.
Please help
I've finally found a solution.
I reinstalled numpy+mkl.
Apparently you have to download it specifically from here.
Then in the Terminal you go to the folder you saved the file and you run the following command:
pip install --upgrade --force-reinstall "numpy-1.18.1+mkl-cp37-cp37m-win_amd64.whl"
(replace the name of the file by the one you have downloaded)
Magic. It's working.

How do I read tabulator separated CSV in blaze?

I have a "CSV" data file with the following format (well, it's rather a TSV):
event pdg x y z t px py pz ekin
3383 11 -161.515 5.01938e-05 -0.000187112 0.195413 0.664065 0.126078 -0.736968 0.00723234
1694 11 -161.515 -0.000355633 0.000263174 0.195413 0.511853 -0.523429 0.681196 0.00472714
4228 11 -161.535 6.59631e-06 -3.32796e-05 0.194947 -0.713983 -0.0265468 -0.69966 0.0108681
4233 11 -161.515 -0.000524488 6.5069e-05 0.195413 0.942642 0.331324 0.0406377 0.017594
This file is interpretable as-is in pandas:
from pandas import read_csv, read_table
data = read_csv("test.csv", sep="\t", index_col=False) # Works
data = read_table("test.csv", index_col=False) # Works
However, when I try to read it in blaze (that declares to use pandas keyword arguments), an exception is thrown:
from blaze import Data
Data("test.csv") # Attempt 1
Data("test.csv", sep="\t") # Attempt 2
Data("test.csv", sep="\t", index_col=False) # Attempt 3
None of these works and pandas is not used at all. The "sniffer" that tries to deduce column names and types just calls csv.Sniffer.sniff() from standard library (which fails).
Is there a way how to properly read this file in blaze (given that its "little brother" has few hundred MBs, I want to use blaze's sequential processing capabilities)?
Thanks for any ideas.
Edit: I think it might be a problem of odo/csv and filed an issue: https://github.com/blaze/odo/issues/327
Edit2:
Complete error:
Error Traceback (most recent call last) in () ----> 1 bz.Data("test.csv", sep="\t", index_col=False)
/home/[username-hidden]/anaconda3/lib/python3.4/site-packages/blaze/interactive.py in Data(data, dshape, name, fields, columns, schema, **kwargs)
54 if isinstance(data, _strtypes):
55 data = resource(data, schema=schema, dshape=dshape, columns=columns,
---> 56 **kwargs)
57 if (isinstance(data, Iterator) and
58 not isinstance(data, tuple(not_an_iterator))):
/home/[username-hidden]/anaconda3/lib/python3.4/site-packages/odo/regex.py in __call__(self, s, *args, **kwargs)
62
63 def __call__(self, s, *args, **kwargs):
---> 64 return self.dispatch(s)(s, *args, **kwargs)
65
66 #property
/home/[username-hidden]/anaconda3/lib/python3.4/site-packages/odo/backends/csv.py in resource_csv(uri, **kwargs)
276 #resource.register('.+\.(csv|tsv|ssv|data|dat)(\.gz|\.bz2?)?')
277 def resource_csv(uri, **kwargs):
--> 278 return CSV(uri, **kwargs)
279
280
/home/[username-hidden]/anaconda3/lib/python3.4/site-packages/odo/backends/csv.py in __init__(self, path, has_header, encoding, sniff_nbytes, **kwargs)
102 if has_header is None:
103 self.has_header = (not os.path.exists(path) or
--> 104 infer_header(path, sniff_nbytes))
105 else:
106 self.has_header = has_header
/home/[username-hidden]/anaconda3/lib/python3.4/site-packages/odo/backends/csv.py in infer_header(path, nbytes, encoding, **kwargs)
58 with open_file(path, 'rb') as f:
59 raw = f.read(nbytes)
---> 60 return csv.Sniffer().has_header(raw if PY2 else raw.decode(encoding))
61
62
/home/[username-hidden]/anaconda3/lib/python3.4/csv.py in has_header(self, sample)
392 # subtracting from the likelihood of the first row being a header.
393
--> 394 rdr = reader(StringIO(sample), self.sniff(sample))
395
396 header = next(rdr) # assume first row is header
/home/[username-hidden]/anaconda3/lib/python3.4/csv.py in sniff(self, sample, delimiters)
187
188 if not delimiter:
--> 189 raise Error("Could not determine delimiter")
190
191 class dialect(Dialect):
Error: Could not determine delimiter
I am working with Python 2.7.10, dask v0.7.1, blaze v0.8.2 and conda v3.17.0.
conda install dask
conda install blaze
Here is a way you can import the data for use with blaze. Parse the data first with pandas and then convert it into blaze. Perhaps this defeats the purpose, but there are no troubles this way.
As a side note in order to parse the data file correctly your line in pandas parse statment should be:
from blaze import Data
from pandas import DataFrame, read_csv
data = read_csv("csvdata.dat", sep="\s*", index_col=False)
bdata = Data(data)
Now the data is formatted correctly with no errors, bdata:
event pdg x y z t px py \
0 3383 11 -161.515 0.000050 -0.000187 0.195413 0.664065 0.126078
1 1694 11 -161.515 -0.000356 0.000263 0.195413 0.511853 -0.523429
2 4228 11 -161.535 0.000007 -0.000033 0.194947 -0.713983 -0.026547
3 4233 11 -161.515 -0.000524 0.000065 0.195413 0.942642 0.331324
pz ekin
0 -0.736968 0.007232
1 0.681196 0.004727
2 -0.699660 0.010868
Here is an alternative, use dask, it probably can do the same chunking, or large scale processing you are looking for. Dask certainly makes it immediately easy to correctly load a tsv format.
In [17]: import dask.dataframe as dd
In [18]: df = dd.read_csv('tsvdata.txt', sep='\t', index_col=False)
In [19]: df.head()
Out[19]:
event pdg x y z t px py \
0 3383 11 -161.515 0.000050 -0.000187 0.195413 0.664065 0.126078
1 1694 11 -161.515 -0.000356 0.000263 0.195413 0.511853 -0.523429
2 4228 11 -161.535 0.000007 -0.000033 0.194947 -0.713983 -0.026547
3 4233 11 -161.515 -0.000524 0.000065 0.195413 0.942642 0.331324
4 854 11 -161.515 0.000032 0.000418 0.195414 0.675752 0.315671
pz ekin
0 -0.736968 0.007232
1 0.681196 0.004727
2 -0.699660 0.010868
3 0.040638 0.017594
4 -0.666116 0.012641
In [20]:
See also: http://dask.pydata.org/en/latest/array-blaze.html#how-to-use-blaze-with-dask