Cannot load lvis via tfds - tensorflow

I am trying to load built-in dataset lvis. It turns out that the tfds and lvis should be imported and installed respectively, however, I did possible all, it still does not work.
import os
import tensorflow as tf
from matplotlib import pyplot as plt
%matplotlib inline
!pip install lvis
!pip install tfds-nightly
import tensorflow_datasets as tfds
train_data, info = tfds.load('lvis', split='train', as_supervised=True, with_info=True)
validation_data = tfds.load('lvis', split='validation', as_supervised=True)
test_data = tfds.load('lvis', split='test', as_supervised=True)
There are some odd outputs after running upon codes in colab.
otFoundError Traceback (most recent call last)
/usr/local/lib/python3.7/dist-packages/tensorflow_datasets/core/utils/py_utils.py in try_reraise(*args, **kwargs)
391 try:
--> 392 yield
393 except Exception as e: # pylint: disable=broad-except
15 frames
/usr/local/lib/python3.7/dist-packages/tensorflow_datasets/core/load.py in builder(name, try_gcs, **builder_kwargs)
167 with py_utils.try_reraise(prefix=f'Failed to construct dataset {name}: '):
--> 168 return cls(**builder_kwargs) # pytype: disable=not-instantiable
169
/usr/local/lib/python3.7/dist-packages/tensorflow_datasets/core/dataset_builder.py in __init__(self, file_format, **kwargs)
917 """
--> 918 super().__init__(**kwargs)
919 self.info.set_file_format(file_format)
/usr/local/lib/python3.7/dist-packages/tensorflow_datasets/core/dataset_builder.py in __init__(self, data_dir, config, version)
184 else: # Use the code version (do not restore data)
--> 185 self.info.initialize_from_bucket()
186
/usr/local/lib/python3.7/dist-packages/tensorflow_datasets/core/utils/py_utils.py in __get__(self, obj, objtype)
145 if cached is None:
--> 146 cached = self.fget(obj) # pytype: disable=attribute-error
147 setattr(obj, attr, cached)
/usr/local/lib/python3.7/dist-packages/tensorflow_datasets/core/dataset_builder.py in info(self)
328 "the restored dataset.")
--> 329 info = self._info()
330 if not isinstance(info, dataset_info.DatasetInfo):
/usr/local/lib/python3.7/dist-packages/tensorflow_datasets/object_detection/lvis/lvis.py in _info(self)
94 names_file=tfds.core.tfds_path(
---> 95 'object_detection/lvis/lvis_classes.txt'))
96 return tfds.core.DatasetInfo(
/usr/local/lib/python3.7/dist-packages/tensorflow_datasets/core/features/class_label_feature.py in __init__(self, num_classes, names, names_file)
67 else:
---> 68 self.names = _load_names_from_file(names_file)
69
/usr/local/lib/python3.7/dist-packages/tensorflow_datasets/core/features/class_label_feature.py in _load_names_from_file(names_filepath)
198 name.strip()
--> 199 for name in tf.compat.as_text(f.read()).split("\n")
200 if name.strip() # Filter empty names
/usr/local/lib/python3.7/dist-packages/tensorflow/python/lib/io/file_io.py in read(self, n)
116 """
--> 117 self._preread_check()
118 if n == -1:
/usr/local/lib/python3.7/dist-packages/tensorflow/python/lib/io/file_io.py in _preread_check(self)
79 self._read_buf = _pywrap_file_io.BufferedInputStream(
---> 80 compat.path_to_str(self.__name), 1024 * 512)
81
NotFoundError: /usr/local/lib/python3.7/dist-packages/tensorflow_datasets/object_detection/lvis/lvis_classes.txt; No such file or directory
The above exception was the direct cause of the following exception:
RuntimeError Traceback (most recent call last)
<ipython-input-4-b8c819fe5c62> in <module>()
----> 1 train_data, info = tfds.load('lvis', split='train', as_supervised=True, with_info=True)
2 validation_data = tfds.load('lvis', split='validation', as_supervised=True)
3 test_data = tfds.load('lvis', split='test', as_supervised=True)
/usr/local/lib/python3.7/dist-packages/tensorflow_datasets/core/load.py in load(name, split, data_dir, batch_size, shuffle_files, download, as_supervised, decoders, read_config, with_info, builder_kwargs, download_and_prepare_kwargs, as_dataset_kwargs, try_gcs)
315 builder_kwargs = {}
316
--> 317 dbuilder = builder(name, data_dir=data_dir, try_gcs=try_gcs, **builder_kwargs)
318 if download:
319 download_and_prepare_kwargs = download_and_prepare_kwargs or {}
/usr/local/lib/python3.7/dist-packages/tensorflow_datasets/core/load.py in builder(name, try_gcs, **builder_kwargs)
166 if cls:
167 with py_utils.try_reraise(prefix=f'Failed to construct dataset {name}: '):
--> 168 return cls(**builder_kwargs) # pytype: disable=not-instantiable
169
170 # If neither the code nor the files are found, raise DatasetNotFoundError
/usr/lib/python3.7/contextlib.py in __exit__(self, type, value, traceback)
128 value = type()
129 try:
--> 130 self.gen.throw(type, value, traceback)
131 except StopIteration as exc:
132 # Suppress StopIteration *unless* it's the same exception that
/usr/local/lib/python3.7/dist-packages/tensorflow_datasets/core/utils/py_utils.py in try_reraise(*args, **kwargs)
392 yield
393 except Exception as e: # pylint: disable=broad-except
--> 394 reraise(e, *args, **kwargs)
395
396
/usr/local/lib/python3.7/dist-packages/tensorflow_datasets/core/utils/py_utils.py in reraise(e, prefix, suffix)
359 else:
360 exception = RuntimeError(f'{type(e).__name__}: {msg}')
--> 361 raise exception from e
362 # Otherwise, modify the exception in-place
363 elif len(e.args) <= 1:
RuntimeError: NotFoundError: Failed to construct dataset lvis: /usr/local/lib/python3.7/dist-packages/tensorflow_datasets/object_detection/lvis/lvis_classes.txt; No such file or directory

This is what I did to get it to work on Colab Notebook:
!pip install -q tfds-nightly tensorflow tensorflow-datasets matplotlib lvis pycocotools apache_beam
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
Since the tfds object detection lvis folder isn't up to date, I deleted that folder then redownloaded it from the tfds github page.
First install github-clone so we can download specific repo subfolders
!pip install github-clone
Then remove the lvis folder and redownload it from github:
!rm -rf ../usr/local/lib/python3.7/dist-packages/tensorflow_datasets/object_detection/lvis
!ghclone https://github.com/tensorflow/datasets/tree/master/tensorflow_datasets/object_detection/lvis
!mv ./lvis ../usr/local/lib/python3.7/dist-packages/tensorflow_datasets/object_detection/
After that I could get it to work, this next chunk of code worked for me:
ds, info = tfds.load('lvis', split='train[:25%]', with_info=True,
data_dir= '../content/tensorflow_datasets/',
decoders=tfds.decode.PartialDecoding({
'image': True,
'features': tfds.features.FeaturesDict({'image/id':True,
'objects':tfds.features.Sequence({
'id': True,
'bbox': True,
'label': tfds.features.ClassLabel(names=['skateboard','shoe'])
})
})
})
)

Related

AttributeError: 'Adam' object has no attribute 'get_weights'

I am pretty new to tensorflow Keras and there is a Problem Running Cross Validation that I could not fix. It all worked before I installed featurewiz (conda install -c conda-forge featurewiz).
from sklearn.model_selection import KFold, cross_validate, cross_val_score
from scikeras.wrappers import KerasClassifier
estimator = KerasClassifier(model, epochs=500, batch_size=10) #, verbose = 0
kfold = KFold(n_splits=5, shuffle=True)
results = cross_validate(estimator, X, y, cv=kfold, scoring=['accuracy', 'precision_weighted', 'recall_weighted', 'f1_weighted'], return_train_score=True)
print(results)
Error:
WARNING:absl:Found untraced functions such as _update_step_xla while saving (showing 1 of 1). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: ram:///var/folders/c4/ywdtx99d1vl0ptsg1fy494_40000gn/T/tmpsuvxkjb9/assets
INFO:tensorflow:Assets written to: ram:///var/folders/c4/ywdtx99d1vl0ptsg1fy494_40000gn/T/tmpsuvxkjb9/assets
---------------------------------------------------------------------------
Empty Traceback (most recent call last)
File ~/tensorflow-test/env/lib/python3.8/site-packages/joblib/parallel.py:862, in Parallel.dispatch_one_batch(self, iterator)
861 try:
--> 862 tasks = self._ready_batches.get(block=False)
863 except queue.Empty:
864 # slice the iterator n_jobs * batchsize items at a time. If the
865 # slice returns less than that, then the current batchsize puts
(...)
868 # accordingly to distribute evenly the last items between all
869 # workers.
File ~/tensorflow-test/env/lib/python3.8/queue.py:167, in Queue.get(self, block, timeout)
166 if not self._qsize():
--> 167 raise Empty
168 elif timeout is None:
Empty:
During handling of the above exception, another exception occurred:
AttributeError Traceback (most recent call last)
Cell In[5], line 6
4 estimator = KerasClassifier(model, epochs=500, batch_size=10) #, verbose = 0
5 kfold = KFold(n_splits=5, shuffle=True) #seed, damit shuffle gleich bleibt , random_state=1337
----> 6 results = cross_validate(estimator, X, y, cv=kfold, scoring=['accuracy', 'precision_weighted', 'recall_weighted', 'f1_weighted'], return_train_score=True)
8 print(results)
File ~/tensorflow-test/env/lib/python3.8/site-packages/sklearn/model_selection/_validation.py:266, in cross_validate(estimator, X, y, groups, scoring, cv, n_jobs, verbose, fit_params, pre_dispatch, return_train_score, return_estimator, error_score)
263 # We clone the estimator to make sure that all the folds are
264 # independent, and that it is pickle-able.
265 parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch)
--> 266 results = parallel(
267 delayed(_fit_and_score)(
268 clone(estimator),
269 X,
270 y,
271 scorers,
272 train,
273 test,
274 verbose,
275 None,
276 fit_params,
277 return_train_score=return_train_score,
278 return_times=True,
279 return_estimator=return_estimator,
280 error_score=error_score,
281 )
282 for train, test in cv.split(X, y, groups)
283 )
285 _warn_or_raise_about_fit_failures(results, error_score)
287 # For callabe scoring, the return type is only know after calling. If the
288 # return type is a dictionary, the error scores can now be inserted with
289 # the correct key.
File ~/tensorflow-test/env/lib/python3.8/site-packages/joblib/parallel.py:1085, in Parallel.__call__(self, iterable)
1076 try:
1077 # Only set self._iterating to True if at least a batch
1078 # was dispatched. In particular this covers the edge
(...)
1082 # was very quick and its callback already dispatched all the
1083 # remaining jobs.
1084 self._iterating = False
-> 1085 if self.dispatch_one_batch(iterator):
1086 self._iterating = self._original_iterator is not None
1088 while self.dispatch_one_batch(iterator):
File ~/tensorflow-test/env/lib/python3.8/site-packages/joblib/parallel.py:873, in Parallel.dispatch_one_batch(self, iterator)
870 n_jobs = self._cached_effective_n_jobs
871 big_batch_size = batch_size * n_jobs
--> 873 islice = list(itertools.islice(iterator, big_batch_size))
874 if len(islice) == 0:
875 return False
File ~/tensorflow-test/env/lib/python3.8/site-packages/sklearn/model_selection/_validation.py:268, in <genexpr>(.0)
263 # We clone the estimator to make sure that all the folds are
264 # independent, and that it is pickle-able.
265 parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch)
266 results = parallel(
267 delayed(_fit_and_score)(
--> 268 clone(estimator),
269 X,
270 y,
271 scorers,
272 train,
273 test,
274 verbose,
275 None,
276 fit_params,
277 return_train_score=return_train_score,
278 return_times=True,
279 return_estimator=return_estimator,
280 error_score=error_score,
281 )
282 for train, test in cv.split(X, y, groups)
283 )
285 _warn_or_raise_about_fit_failures(results, error_score)
287 # For callabe scoring, the return type is only know after calling. If the
288 # return type is a dictionary, the error scores can now be inserted with
289 # the correct key.
File ~/tensorflow-test/env/lib/python3.8/site-packages/sklearn/base.py:89, in clone(estimator, safe)
87 new_object_params = estimator.get_params(deep=False)
88 for name, param in new_object_params.items():
---> 89 new_object_params[name] = clone(param, safe=False)
90 new_object = klass(**new_object_params)
91 params_set = new_object.get_params(deep=False)
File ~/tensorflow-test/env/lib/python3.8/site-packages/sklearn/base.py:70, in clone(estimator, safe)
68 elif not hasattr(estimator, "get_params") or isinstance(estimator, type):
69 if not safe:
---> 70 return copy.deepcopy(estimator)
71 else:
72 if isinstance(estimator, type):
File ~/tensorflow-test/env/lib/python3.8/copy.py:153, in deepcopy(x, memo, _nil)
151 copier = getattr(x, "__deepcopy__", None)
152 if copier is not None:
--> 153 y = copier(memo)
154 else:
155 reductor = dispatch_table.get(cls)
File ~/tensorflow-test/env/lib/python3.8/site-packages/scikeras/_saving_utils.py:117, in deepcopy_model(model, memo)
116 def deepcopy_model(model: keras.Model, memo: Dict[Hashable, Any]) -> keras.Model:
--> 117 _, (model_bytes, optimizer_weights) = pack_keras_model(model)
118 new_model = unpack_keras_model(model_bytes, optimizer_weights)
119 memo[model] = new_model
File ~/tensorflow-test/env/lib/python3.8/site-packages/scikeras/_saving_utils.py:108, in pack_keras_model(model)
106 optimizer_weights = None
107 if model.optimizer is not None:
--> 108 optimizer_weights = model.optimizer.get_weights()
109 model_bytes = np.asarray(memoryview(b.read()))
110 return (
111 unpack_keras_model,
112 (model_bytes, optimizer_weights),
113 )
AttributeError: 'Adam' object has no attribute 'get_weights'
I created a Tensorflow enviroment on my M1 Macbook following https://github.com/mrdbourke/m1-machine-learning-test.
It all worked, I got following results:
TensorFlow has access to the following devices:
[PhysicalDevice(name='/physical_device:CPU:0', device_type='CPU'), PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
TensorFlow version: 2.11.0
I also installed featurewiz, I am not sure if there are some Problems installing it (I did conda install -c conda-forge featurewiz)
SciKeras doesn't work with TensorFlow 2.11. The TensorFlow team release a breaking change in a minor version bump (they removed the get_weights() method). It will be fixed in SciKeras soon: https://github.com/adriangb/scikeras/pull/287
Edit: that PR was merged so the new version of SciKeras (v0.10.0) should solve this issue.

Pandas can't read in excel file

Something is wrong with my pandas module. I tried to read in an excel file using the following code, which works on my classmate's computer, but it's giving me an error on my computer:
FFT1=pd.read_excel('FFT1.xlsx', sheet_name='sheet1')
The file named 'FFT1.xlsx' is in the same directory as my jupyter notebook. The error message says:
XLRDError Traceback (most recent call last)
~\AppData\Local\Temp/ipykernel_7436/2793485739.py in <module>
----> 1 FFT1=pd.read_excel('FFT1.xlsx', sheet_name='sheet1')
D:\Softwares\Anaconda\lib\site-packages\pandas\io\excel\_base.py in read_excel(io, sheet_name, header, names, index_col, usecols, squeeze, dtype, engine, converters, true_values, false_values, skiprows, nrows, na_values, keep_default_na, verbose, parse_dates, date_parser, thousands, comment, skipfooter, convert_float, mangle_dupe_cols, **kwds)
302
303 if not isinstance(io, ExcelFile):
--> 304 io = ExcelFile(io, engine=engine)
305 elif engine and engine != io.engine:
306 raise ValueError(
D:\Softwares\Anaconda\lib\site-packages\pandas\io\excel\_base.py in __init__(self, io, engine)
819 self._io = stringify_path(io)
820
--> 821 self._reader = self._engines[engine](self._io)
822
823 def __fspath__(self):
D:\Softwares\Anaconda\lib\site-packages\pandas\io\excel\_xlrd.py in __init__(self, filepath_or_buffer)
19 err_msg = "Install xlrd >= 1.0.0 for Excel support"
20 import_optional_dependency("xlrd", extra=err_msg)
---> 21 super().__init__(filepath_or_buffer)
22
23 #property
D:\Softwares\Anaconda\lib\site-packages\pandas\io\excel\_base.py in __init__(self, filepath_or_buffer)
351 self.book = self.load_workbook(filepath_or_buffer)
352 elif isinstance(filepath_or_buffer, str):
--> 353 self.book = self.load_workbook(filepath_or_buffer)
354 elif isinstance(filepath_or_buffer, bytes):
355 self.book = self.load_workbook(BytesIO(filepath_or_buffer))
D:\Softwares\Anaconda\lib\site-packages\pandas\io\excel\_xlrd.py in load_workbook(self, filepath_or_buffer)
34 return open_workbook(file_contents=data)
35 else:
---> 36 return open_workbook(filepath_or_buffer)
37
38 #property
D:\Softwares\Anaconda\lib\site-packages\xlrd\__init__.py in open_workbook(filename, logfile, verbosity, use_mmap, file_contents, encoding_override, formatting_info, on_demand, ragged_rows, ignore_workbook_corruption)
168 # files that xlrd can parse don't start with the expected signature.
169 if file_format and file_format != 'xls':
--> 170 raise XLRDError(FILE_FORMAT_DESCRIPTIONS[file_format]+'; not supported')
171
172 bk = open_workbook_xls(
XLRDError: Excel xlsx file; not supported
How should I fix this?
Make sure that you already install openpyxl, if you don't try
pip install openpyxl
Change your code to
FFT1=pd.read_excel('FFT1.xlsx', sheet_name='sheet1',engine='openpyxl')

hdbscan error when inside rapids container

I am using rapids UMAP in conjunction with HDBSCAN inside a rapidsai docker container : rapidsai/rapidsai-core:0.18-cuda11.0-runtime-ubuntu18.04-py3.7
import cudf
import cupy
from cuml.manifold import UMAP
import hdbscan
from sklearn.datasets import make_blobs
from cuml.experimental.preprocessing import StandardScaler
blobs, labels = make_blobs(n_samples=100000, n_features=10)
df_gpu=cudf.DataFrame(blobs)
scaler= StandardScaler()
cupy_scaled=scaler.fit_transform(df_gpu.values)
projector= UMAP(n_components=3, n_neighbors=2000)
cupy_projected=projector.fit_transform(cupy_scaled)
numpy_projected=cupy.asnumpy(cupy_projected)
clusterer= hdbscan.HDBSCAN(min_cluster_size=1000, prediction_data=True, gen_min_span_tree=True)#, core_dist_n_jobs=1)
clusterer.fit(numpy_projected)
I get an error which is fixed if I use core_dist_n_jobs=1 but makes the code slower:
--------------------------------------------------------------------------- TerminatedWorkerError Traceback (most recent call
last) in
1 clusterer= hdbscan.HDBSCAN(min_cluster_size=1000, prediction_data=True, gen_min_span_tree=True)
----> 2 clusterer.fit(numpy_projected)
/opt/conda/envs/rapids/lib/python3.7/site-packages/hdbscan/hdbscan_.py
in fit(self, X, y)
917 self._condensed_tree,
918 self._single_linkage_tree,
--> 919 self._min_spanning_tree) = hdbscan(X, **kwargs)
920
921 if self.prediction_data:
/opt/conda/envs/rapids/lib/python3.7/site-packages/hdbscan/hdbscan_.py
in hdbscan(X, min_cluster_size, min_samples, alpha,
cluster_selection_epsilon, metric, p, leaf_size, algorithm, memory,
approx_min_span_tree, gen_min_span_tree, core_dist_n_jobs,
cluster_selection_method, allow_single_cluster,
match_reference_implementation, **kwargs)
613 approx_min_span_tree,
614 gen_min_span_tree,
--> 615 core_dist_n_jobs, **kwargs)
616 else: # Metric is a valid BallTree metric
617 # TO DO: Need heuristic to decide when to go to boruvka;
/opt/conda/envs/rapids/lib/python3.7/site-packages/joblib/memory.py in
call(self, *args, **kwargs)
350
351 def call(self, *args, **kwargs):
--> 352 return self.func(*args, **kwargs)
353
354 def call_and_shelve(self, *args, **kwargs):
/opt/conda/envs/rapids/lib/python3.7/site-packages/hdbscan/hdbscan_.py
in _hdbscan_boruvka_kdtree(X, min_samples, alpha, metric, p,
leaf_size, approx_min_span_tree, gen_min_span_tree, core_dist_n_jobs,
**kwargs)
276 leaf_size=leaf_size // 3,
277 approx_min_span_tree=approx_min_span_tree,
--> 278 n_jobs=core_dist_n_jobs, **kwargs)
279 min_spanning_tree = alg.spanning_tree()
280 # Sort edges of the min_spanning_tree by weight
hdbscan/_hdbscan_boruvka.pyx in
hdbscan._hdbscan_boruvka.KDTreeBoruvkaAlgorithm.init()
hdbscan/_hdbscan_boruvka.pyx in
hdbscan._hdbscan_boruvka.KDTreeBoruvkaAlgorithm._compute_bounds()
/opt/conda/envs/rapids/lib/python3.7/site-packages/joblib/parallel.py
in call(self, iterable) 1052 1053 with
self._backend.retrieval_context():
-> 1054 self.retrieve() 1055 # Make sure that we get a last message telling us we are done 1056
elapsed_time = time.time() - self._start_time
/opt/conda/envs/rapids/lib/python3.7/site-packages/joblib/parallel.py
in retrieve(self)
931 try:
932 if getattr(self._backend, 'supports_timeout', False):
--> 933 self._output.extend(job.get(timeout=self.timeout))
934 else:
935 self._output.extend(job.get())
/opt/conda/envs/rapids/lib/python3.7/site-packages/joblib/_parallel_backends.py
in wrap_future_result(future, timeout)
540 AsyncResults.get from multiprocessing."""
541 try:
--> 542 return future.result(timeout=timeout)
543 except CfTimeoutError as e:
544 raise TimeoutError from e
/opt/conda/envs/rapids/lib/python3.7/concurrent/futures/_base.py in
result(self, timeout)
433 raise CancelledError()
434 elif self._state == FINISHED:
--> 435 return self.__get_result()
436 else:
437 raise TimeoutError()
/opt/conda/envs/rapids/lib/python3.7/concurrent/futures/_base.py in
__get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
TerminatedWorkerError: A worker process managed by the executor was
unexpectedly terminated. This could be caused by a segmentation fault
while calling the function or by an excessive memory usage causing the
Operating System to kill the worker.
The exit codes of the workers are {EXIT(1)}
Is there a way to solve this issue but still keep HDBSCAN to be fast?
Try setting min_samples to a value
In https://github.com/scikit-learn-contrib/hdbscan/issues/345#issuecomment-628749332 , lmcinnes says that you "may have issues if your min_cluster_size is large and your min_samples is not set. You could try setting min_samples to something smallish and see if that helps." I noticed that you do not have a min_samples set in your code.

error while loading semantic similarity with BERT model

I want to calculate semantic similarity between sentences using BERT. I found this code on github for an already fine-tuned BERT for semantic similarity:
from semantic_text_similarity.models import WebBertSimilarity
from semantic_text_similarity.models import ClinicalBertSimilarity
web_model = WebBertSimilarity(device='cpu', batch_size=10)
It downloads 100% and gives me the following error (this is the final line):
TypeError: init_weights() takes 1 positional argument but 2 were given
I tried to read about this error, but I don't understand where is the 2 positional arguments that where given instead of one.
the model I downloaded is from https://github.com/AndriyMulyar/semantic-text-similarity
I'd appreciate any hint on where to look.
Thanks!
-------------------------EDIT QUESTION------------------------------
This is the whole error:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-73-97be4030b59e> in <module>()
----> 1 web_model = WebBertSimilarity(device='cpu', batch_size=10) #defaults to GPU prediction
/anaconda3/lib/python3.6/site-packages/semantic_text_similarity/models/bert/web_similarity.py in __init__(self, device, batch_size, model_name)
6 def __init__(self, device='cuda', batch_size=10, model_name="web-bert-similarity"):
7 model_path = get_model_path(model_name)
----> 8 super().__init__(device=device, batch_size=batch_size, bert_model_path=model_path)
/anaconda3/lib/python3.6/site-packages/semantic_text_similarity/models/bert/similarity.py in __init__(self, args, device, bert_model_path, batch_size, learning_rate, weight_decay, additional_features)
80 config.pretrained_config_archive_map['additional_features'] = additional_features
81
---> 82 self.regressor_net = BertSimilarityRegressor.from_pretrained(self.args['bert_model_path'], config=config)
83 self.optimizer = torch.optim.Adam(
84 self.regressor_net.parameters(),
/anaconda3/lib/python3.6/site-packages/pytorch_transformers/modeling_utils.py in from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs)
534 if hasattr(model, 'tie_weights'):
535 model.tie_weights() # make sure word embedding weights are still tied
--> 536
537 # Set model in evaluation mode to desactivate DropOut modules by default
538 model.eval()
/anaconda3/lib/python3.6/site-packages/semantic_text_similarity/models/bert/similarity.py in __init__(self, bert_model_config)
25 )
26
---> 27 self.apply(self.init_weights)
28
29
/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py in apply(self, fn)
291 """
292 for module in self.children():
--> 293 module.apply(fn)
294 fn(self)
295 return self
/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py in apply(self, fn)
291 """
292 for module in self.children():
--> 293 module.apply(fn)
294 fn(self)
295 return self
/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py in apply(self, fn)
291 """
292 for module in self.children():
--> 293 module.apply(fn)
294 fn(self)
295 return self
/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py in apply(self, fn)
292 for module in self.children():
293 module.apply(fn)
--> 294 fn(self)
295 return self
296
TypeError: init_weights() takes 1 positional argument but 2 were given

tf.estimator.BoostedTreesRegressor SavedModel Restore Issue

I am having an issue restoring a tf.estimator.BoostedTreesRegressor model using tf.SavedModel. When reloading the model from the saved model directory using tf.contrib.predictor.from_saved_model() I receive the following error:
KeyError: "The name 'boosted_trees/QuantileAccumulator/' refers to an
Operation not in the graph."
This error only occurs when using numeric features (e.g. tf.feature_column.numeric_column). Reloading the model works fine when using only categorical columns
When I'm not saving/restoring, BoostedTreesRegressor evaluates and predicts successfully with all features.
The following estimator save/restore scenarios have worked successfully:
- DNNRegressor with numeric and categorical features
- LinearRegressor with numeric and categorical features
- BoostedTreeRegressor with just categorical features
fc = tf.feature_column
feature_columns = [
fc.numeric_column('f1', dtype=tf.int64),
fc.numeric_column('f2', dtype=tf.int64),
fc.indicator_column(
fc.categorical_column_with_vocabulary_list('f3',f3)),
fc.indicator_column(
fc.categorical_column_with_vocabulary_list('f4',f4))
]
feature_spec = fc.make_parse_example_spec(feature_columns)
params = {
'feature_columns' : feature_columns,
'n_batches_per_layer' : n_batches,
'n_trees': 200,
'max_depth': 6,
'learning_rate': 0.01
}
regressor = tf.estimator.BoostedTreesRegressor(**params)
regressor.train(train_input_fn, max_steps=400)
serving_input_receiver_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(feature_spec)
regressor.export_saved_model('saved_model', serving_input_receiver_fn)
.
.
.
# latest is path to saved model
predict_fn = predictor.from_saved_model(latest[:-4])
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-101-ee20beae4424> in <module>
----> 1 predict_fn = predictor.from_saved_model(latest[:-4])
/usr/local/anaconda3/envs/zume/lib/python3.7/site-packages/tensorflow/contrib/predictor/predictor_factories.py in from_saved_model(export_dir, signature_def_key, signature_def, input_names, output_names, tags, graph, config)
151 tags=tags,
152 graph=graph,
--> 153 config=config)
/usr/local/anaconda3/envs/zume/lib/python3.7/site-packages/tensorflow/contrib/predictor/saved_model_predictor.py in __init__(self, export_dir, signature_def_key, signature_def, input_names, output_names, tags, graph, config)
151 with self._graph.as_default():
152 self._session = session.Session(config=config)
--> 153 loader.load(self._session, tags.split(','), export_dir)
154
155 if input_names is None:
/usr/local/anaconda3/envs/zume/lib/python3.7/site-packages/tensorflow/python/util/deprecation.py in new_func(*args, **kwargs)
322 'in a future version' if date is None else ('after %s' % date),
323 instructions)
--> 324 return func(*args, **kwargs)
325 return tf_decorator.make_decorator(
326 func, new_func, 'deprecated',
/usr/local/anaconda3/envs/zume/lib/python3.7/site-packages/tensorflow/python/saved_model/loader_impl.py in load(sess, tags, export_dir, import_scope, **saver_kwargs)
267 """
268 loader = SavedModelLoader(export_dir)
--> 269 return loader.load(sess, tags, import_scope, **saver_kwargs)
270
271
/usr/local/anaconda3/envs/zume/lib/python3.7/site-packages/tensorflow/python/saved_model/loader_impl.py in load(self, sess, tags, import_scope, **saver_kwargs)
418 with sess.graph.as_default():
419 saver, _ = self.load_graph(sess.graph, tags, import_scope,
--> 420 **saver_kwargs)
421 self.restore_variables(sess, saver, import_scope)
422 self.run_init_ops(sess, tags, import_scope)
/usr/local/anaconda3/envs/zume/lib/python3.7/site-packages/tensorflow/python/saved_model/loader_impl.py in load_graph(self, graph, tags, import_scope, **saver_kwargs)
348 with graph.as_default():
349 return tf_saver._import_meta_graph_with_return_elements( # pylint: disable=protected-access
--> 350 meta_graph_def, import_scope=import_scope, **saver_kwargs)
351
352 def restore_variables(self, sess, saver, import_scope=None):
/usr/local/anaconda3/envs/zume/lib/python3.7/site-packages/tensorflow/python/training/saver.py in _import_meta_graph_with_return_elements(meta_graph_or_file, clear_devices, import_scope, return_elements, **kwargs)
1455 import_scope=import_scope,
1456 return_elements=return_elements,
-> 1457 **kwargs))
1458
1459 saver = _create_saver_from_imported_meta_graph(
/usr/local/anaconda3/envs/zume/lib/python3.7/site-packages/tensorflow/python/framework/meta_graph.py in import_scoped_meta_graph_with_return_elements(meta_graph_or_file, clear_devices, graph, import_scope, input_map, unbound_inputs_col_name, restore_collections_predicate, return_elements)
850 for value in field.value:
851 col_op = graph.as_graph_element(
--> 852 ops.prepend_name_scope(value, scope_to_prepend_to_names))
853 graph.add_to_collection(key, col_op)
854 elif kind == "int64_list":
/usr/local/anaconda3/envs/zume/lib/python3.7/site-packages/tensorflow/python/framework/ops.py in as_graph_element(self, obj, allow_tensor, allow_operation)
3476
3477 with self._lock:
-> 3478 return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
3479
3480 def _as_graph_element_locked(self, obj, allow_tensor, allow_operation):
/usr/local/anaconda3/envs/zume/lib/python3.7/site-packages/tensorflow/python/framework/ops.py in _as_graph_element_locked(self, obj, allow_tensor, allow_operation)
3536 if name not in self._nodes_by_name:
3537 raise KeyError("The name %s refers to an Operation not in the "
-> 3538 "graph." % repr(name))
3539 return self._nodes_by_name[name]
3540
KeyError: "The name 'boosted_trees/QuantileAccumulator/' refers to an Operation not in the graph."
If you are using Tensorflow Version, 1.x (1.14, 1.15), you can use
tf.compat.v1.saved_model.load or tf.compat.v1.saved_model.loader.load or
tf.saved_model.loader.load to Load a Saved Model.
If you are using Tensorflow Version 2, below is the code where Saving and Restoring is Successful using tf.estimator.BoostedTreesClassifier:
n_batches = 1
est = tf.estimator.BoostedTreesClassifier(feature_columns,
n_batches_per_layer=n_batches)
# The model will stop training once the specified number of trees is built, not
# based on the number of steps.
est.train(train_input_fn, max_steps=100)
# Eval.
result = est.evaluate(eval_input_fn)
clear_output()
print(pd.Series(result))
feature_spec = fc.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(feature_spec)
Exported_Path = est.export_saved_model('saved_model', serving_input_receiver_fn)
imported = tf.saved_model.load(Exported_Path)
For the complete working code using Tensorflow Version 2, please find this Github Gist.