How to fix AttributeError: partially initialized module 'charset_normalizer' has no attribute 'md__mypyc' (most likely due to a circular import) - tensorflow

when i import TensorFlow GPU I get this error
after i type "import tensorflow as tf"
AttributeError: partially initialized module 'charset_normalizer' has no attribute 'md__mypyc' (most likely due to a circular import)
like below:
`your tex
AttributeError Traceback (most recent call last)
Cell In[22], line 1
----> 1 import tensorflow as tf
File ~\anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\__init__.py:51
49 from ._api.v2 import autograph
50 from ._api.v2 import bitwise
---> 51 from ._api.v2 import compat
52 from ._api.v2 import config
53 from ._api.v2 import data
File ~\anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\_api\v2\compat\__init__.py:37
3 """Compatibility functions.
4
5 The `tf.compat` module contains two sets of compatibility functions.
(...)
32
33 """
35 import sys as _sys
---> 37 from . import v1
38 from . import v2
39 from tensorflow.python.compat.compat import forward_compatibility_horizon
File ~\anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\_api\v2\compat\v1\__init__.py:30
28 from . import autograph
29 from . import bitwise
---> 30 from . import compat
31 from . import config
32 from . import data
File ~\anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\_api\v2\compat\v1\compat\__init__.py:38
35 import sys as _sys
37 from . import v1
---> 38 from . import v2
39 from tensorflow.python.compat.compat import forward_compatibility_horizon
40 from tensorflow.python.compat.compat import forward_compatible
File ~\anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\_api\v2\compat\v1\compat\v2\__init__.py:28
25 # pylint: disable=g-bad-import-order
27 from . import compat
---> 28 from tensorflow._api.v2.compat.v2 import __internal__
29 from tensorflow._api.v2.compat.v2 import __operators__
30 from tensorflow._api.v2.compat.v2 import audio
File ~\anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\_api\v2\compat\v2\__init__.py:33
31 from . import autograph
32 from . import bitwise
---> 33 from . import compat
34 from . import config
35 from . import data
File ~\anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\_api\v2\compat\v2\compat\__init__.py:38
35 import sys as _sys
37 from . import v1
---> 38 from . import v2
39 from tensorflow.python.compat.compat import forward_compatibility_horizon
40 from tensorflow.python.compat.compat import forward_compatible
File ~\anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\_api\v2\compat\v2\compat\v2\__init__.py:37
35 from tensorflow._api.v2.compat.v2 import data
36 from tensorflow._api.v2.compat.v2 import debugging
---> 37 from tensorflow._api.v2.compat.v2 import distribute
38 from tensorflow._api.v2.compat.v2 import dtypes
39 from tensorflow._api.v2.compat.v2 import errors
File ~\anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\_api\v2\compat\v2\distribute\__init__.py:182
180 from . import cluster_resolver
181 from . import coordinator
--> 182 from . import experimental
183 from tensorflow.python.distribute.collective_all_reduce_strategy import CollectiveAllReduceStrategy as MultiWorkerMirroredStrategy
184 from tensorflow.python.distribute.cross_device_ops import CrossDeviceOps
File ~\anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\_api\v2\compat\v2\distribute\experimental\__init__.py:10
8 from . import coordinator
9 from . import partitioners
---> 10 from . import rpc
11 from tensorflow.python.distribute.central_storage_strategy import CentralStorageStrategy
12 from tensorflow.python.distribute.collective_all_reduce_strategy import _CollectiveAllReduceStrategyExperimental as MultiWorkerMirroredStrategy
File ~\anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\_api\v2\compat\v2\distribute\experimental\rpc\__init__.py:8
3 """Public API for tf.distribute.experimental.rpc namespace.
4 """
6 import sys as _sys
----> 8 from tensorflow.python.distribute.experimental.rpc.rpc_ops import Client
9 from tensorflow.python.distribute.experimental.rpc.rpc_ops import Server
File ~\anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\python\distribute\experimental\__init__.py:22
20 from tensorflow.python.distribute import parameter_server_strategy
21 from tensorflow.python.distribute import tpu_strategy
---> 22 from tensorflow.python.distribute.failure_handling import failure_handling
File ~\anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\python\distribute\failure_handling\failure_handling.py:33
31 from tensorflow.python.checkpoint import checkpoint_management
32 from tensorflow.python.distribute import multi_worker_util
---> 33 from tensorflow.python.distribute.failure_handling import gce_util
34 from tensorflow.python.eager import context
35 from tensorflow.python.framework import constant_op
File ~\anaconda3\envs\tf_gpu\lib\site-packages\tensorflow\python\distribute\failure_handling\gce_util.py:20
17 import os
18 import sys
---> 20 import requests
22 from six.moves.urllib import request
23 from tensorflow.python.eager import context
File ~\anaconda3\envs\tf_gpu\lib\site-packages\requests\__init__.py:48
45 from .exceptions import RequestsDependencyWarning
47 try:
---> 48 from charset_normalizer import __version__ as charset_normalizer_version
49 except ImportError:
50 charset_normalizer_version = None
File ~\anaconda3\envs\tf_gpu\lib\site-packages\charset_normalizer\__init__.py:23
1 """
2 Charset-Normalizer
3 ~~~~~~~~~~~~~~
(...)
21 :license: MIT, see LICENSE for more details.
22 """
---> 23 from charset_normalizer.api import from_fp, from_path, from_bytes, normalize
24 from charset_normalizer.legacy import detect
25 from charset_normalizer.version import __version__, VERSION
File ~\anaconda3\envs\tf_gpu\lib\site-packages\charset_normalizer\api.py:10
7 PathLike = Union[str, 'os.PathLike[str]'] # type: ignore
9 from charset_normalizer.constant import TOO_SMALL_SEQUENCE, TOO_BIG_SEQUENCE, IANA_SUPPORTED
---> 10 from charset_normalizer.md import mess_ratio
11 from charset_normalizer.models import CharsetMatches, CharsetMatch
12 from warnings import warn
AttributeError: partially initialized module 'charset_normalizer' has no attribute 'md__mypyc' (most likely due to a circular import)
t`
I install "requests" , "chardet" ,"openpyxl" but nothing change .

Related

ModuleNotFoundError: No module named 'numpy.random.bit_generator' while importing sklearn

I installed opencv, tensorflow and other tools on my Macbook M1 air by watching this tutorial:
After installing, opencv and tensorflow works fine but when I try to import sklearn the mentioned error occurs. Here is the Error:
File ~/miniforge3/envs/ml/lib/python3.8/site-packages/scipy/stats/distributions.py:11, in <module>
8 from ._distn_infrastructure import (rv_discrete, rv_continuous, rv_frozen)
10 from . import _continuous_distns
---> 11 from . import _discrete_distns
13 from ._continuous_distns import *
14 from ._discrete_distns import *
File ~/miniforge3/envs/ml/lib/python3.8/site-packages/scipy/stats/_discrete_distns.py:21, in <module>
17 from ._distn_infrastructure import (
18 rv_discrete, _ncx2_pdf, _ncx2_cdf, get_distribution_names,
19 _check_shape)
20 import scipy.stats._boost as _boost
---> 21 from ._biasedurn import (_PyFishersNCHypergeometric,
22 _PyWalleniusNCHypergeometric,
23 _PyStochasticLib3)
25 class binom_gen(rv_discrete):
26 r"""A binomial discrete random variable.
27
28 %(before_notes)s
(...)
51
52 """
File _biasedurn.pyx:1, in init scipy.stats._biasedurn()
ModuleNotFoundError: No module named 'numpy.random.bit_generator'
The tutorial I was following suggested the exact version of numpy and python. I looked some places for help and they suggested updating the numpy. I'm not sure whether I should do that because that may break other libraries like OpenCV.
versions:
python 3.8.6
numpy 1.18.5
scikit-learn 1.1.1
scipy 1.8.1

I am not able to import statsmodel.api into kaggle kernel

I updated the statsmodels package in the kaggle kernel successfully using the code:
!pip install statsmodels --upgrade
This gave me the version of (0.10.0). However when I try to import statsmodels.api it giving me an error.
import statsmodels.api as sm
The error I am getting is:
---------------------------------------------------------------------------
ImportError Traceback (most recent call last)
<ipython-input-7-3b8b7e2c2e57> in <module>
8 import matplotlib.pyplot as plt
9 from sklearn.preprocessing import Normalizer
---> 10 import statsmodels.api as sm
11
12 # Input data files are available in the "../input/" directory.
/opt/conda/lib/python3.6/site-packages/statsmodels/api.py in <module>
16 from . import robust
17 from .robust.robust_linear_model import RLM
---> 18 from .discrete.discrete_model import (Poisson, Logit, Probit,
19 MNLogit, NegativeBinomial,
20 GeneralizedPoisson,
/opt/conda/lib/python3.6/site-packages/statsmodels/discrete/discrete_model.py in <module>
43
44 from statsmodels.base.l1_slsqp import fit_l1_slsqp
---> 45 from statsmodels.distributions import genpoisson_p
46
47 try:
/opt/conda/lib/python3.6/site-packages/statsmodels/distributions/__init__.py in <module>
1 from statsmodels.tools._testing import PytestTester
2 from .empirical_distribution import ECDF, monotone_fn_inverter, StepFunction
----> 3 from .edgeworth import ExpandedNormal
4 from .discrete import genpoisson_p, zipoisson, zigenpoisson, zinegbin
5
/opt/conda/lib/python3.6/site-packages/statsmodels/distributions/edgeworth.py in <module>
5 import numpy as np
6 from numpy.polynomial.hermite_e import HermiteE
----> 7 from statsmodels.compat.scipy import factorial
8 from scipy.stats import rv_continuous
9 import scipy.special as special
ImportError: cannot import name 'factorial'
I've upgraded the scipy package, and I am still getting the same error. I am new to Kaggle and Python and I need the OLS function to do regression analysis. How can I fix this problem? If not, is there any other function I can use to get a regression summary?

Import plotly.plotly

I am just trying to import plotly.plotly as py per this tutorial:https://plot.ly/~Dreamshot/9199/import-plotly-plotly-version-/#/ but I am getting the following error, not sure why?
0 from plotly import exceptions, files, session, tools, utils
---> 31 from plotly.api import v1, v2
32 from plotly.basedatatypes import BaseTraceType, BaseFigure, BaseLayoutType
33 from plotly.plotly import chunked_requests
ImportError: cannot import name 'v1'

Getting importerror when importing matplotlib.pyplot

So Im getting the following error when I import matplotlib.pyplot:
ImportError Traceback (most recent call last)
<ipython-input-18-413b0dcce8d2> in <module>()
1 import pandas as pd
----> 2 import matplotlib.pyplot as plt
3 data=pd.read_csv("fifa_countries_audience.csv")
4 del data['country']
5 print(data)
~\Anaconda3\lib\site-packages\matplotlib\pyplot.py in <module>()
30 from cycler import cycler
31 import matplotlib
---> 32 import matplotlib.colorbar
33 from matplotlib import style
34 from matplotlib import _pylab_helpers, interactive
~\Anaconda3\lib\site-packages\matplotlib\colorbar.py in <module>()
30
31 import matplotlib as mpl
---> 32 import matplotlib.artist as martist
33 import matplotlib.cbook as cbook
34 import matplotlib.collections as collections
~\Anaconda3\lib\site-packages\matplotlib\artist.py in <module>()
14 import matplotlib
15 from . import cbook, docstring, rcParams
---> 16 from .path import Path
17 from .transforms import (Bbox, IdentityTransform, Transform, TransformedBbox,
18 TransformedPatchPath, TransformedPath)
~\Anaconda3\lib\site-packages\matplotlib\path.py in <module>()
24
25 from . import _path, rcParams
---> 26 from .cbook import (_to_unmasked_float_array, simple_linear_interpolation,
27 maxdict)
28
ImportError: cannot import name '_to_unmasked_float_array'
Anyone has idea what might be the case? I tried reinstalling matplotlib, updating it, updating conda but it didnt solve the case.

Failure to import numpy in Jupyter notebook

I am new to iPython/Jupyter. Python skills limited, but learning. I am trying to import numpy as np and get the following:
---------------------------------------------------------------------------
ImportError Traceback (most recent call last)
<ipython-input-1-4ee716103900> in <module>()
----> 1 import numpy as np
/Users/jmmiii/Library/Enthought/Canopy_32bit/User/lib/python2.7/site-packages/numpy/__init__.py in <module>()
166 return loader(*packages, **options)
167
--> 168 from . import add_newdocs
169 __all__ = ['add_newdocs', 'ModuleDeprecationWarning']
170
/Users/jmmiii/Library/Enthought/Canopy_32bit/User/lib/python2.7/site-packages/numpy/add_newdocs.py in <module>()
11 from __future__ import division, absolute_import, print_function
12
---> 13 from numpy.lib import add_newdoc
14
15 ###############################################################################
/Users/jmmiii/Library/Enthought/Canopy_32bit/User/lib/python2.7/site-packages/numpy/lib/__init__.py in <module>()
6 from numpy.version import version as __version__
7
----> 8 from .type_check import *
9 from .index_tricks import *
10 from .function_base import *
/Users/jmmiii/Library/Enthought/Canopy_32bit/User/lib/python2.7/site-packages/numpy/lib/type_check.py in <module>()
9 'common_type']
10
---> 11 import numpy.core.numeric as _nx
12 from numpy.core.numeric import asarray, asanyarray, array, isnan, \
13 obj2sctype, zeros
/Users/jmmiii/Library/Enthought/Canopy_32bit/User/lib/python2.7/site-packages/numpy/core/__init__.py in <module>()
4 from numpy.version import version as __version__
5
----> 6 from . import multiarray
7 from . import umath
8 from . import _internal # for freeze programs
ImportError: dlopen(/Users/jmmiii/Library/Enthought/Canopy_32bit/User/lib/python2.7/site-packages/numpy/core/multiarray.so, 2): no suitable image found. Did find:
/Users/jmmiii/Library/Enthought/Canopy_32bit/User/lib/python2.7/site-packages/numpy/core/multiarray.so: mach-o, but wrong architecture
I have several python installs on my Mac, which has Yosemite, including Canopy and Anaconda. I want my Jupyter notebook to use the Anaconda install including all the modules, libraries, etc. associated with it. It seems however that jupyter is targeting Canopy instead. Thus, I think my problem might stem from the wrong linkage.
QUESTION 1: Does my conclusion hold water? If not, what might I be missing?
QUESTION 2: How can I direct/link jupyter with Anaconda and not with Canopy so that I import everything from anaconda only?
Thanks for everyone's help!
You can either set the PATH to execute python commands from the ~/anaconda/bin directory by prepending it to your .bah_profile by running the following command.
export PATH="/Users/jmmiii/anaconda/bin:$PATH"
OR, you can create an alias for the command by editing your ~/.bash_profile and adding:
alias jupyter-notebook="/Users/jmmiii/anaconda/bin/jupyter-notebook"