I can't realize why this neuraxle pipeline does't works.
I just want scale data and apply LinearSVC.
What I am doing wrong?
This is what I am trying to do:
import numpy as np
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVC
from neuraxle.hyperparams.distributions import RandInt
from neuraxle.hyperparams.space import HyperparameterSpace
from neuraxle.metaopt.auto_ml import AutoML, InMemoryHyperparamsRepository, \
ValidationSplitter
from neuraxle.metaopt.callbacks import MetricCallback, ScoringCallback
from neuraxle.pipeline import Pipeline
from neuraxle.steps.sklearn import SKLearnWrapper, RidgeModelStacking
DATA_INPUTS = np.random.randint(0, 100, (100, 3))
EXPECTED_OUTPUTS = np.random.randint(0, 3, 100)
p = Pipeline([
SKLearnWrapper(StandardScaler()),
SKLearnWrapper(LinearSVC(),
HyperparameterSpace({'C': RandInt(0, 10000)})),
])
auto_ml = AutoML(
p,
validation_splitter=ValidationSplitter(0.20),
refit_trial=True,
n_trials=10,
epochs=10,
cache_folder_when_no_handle='cache',
scoring_callback=ScoringCallback(mean_squared_error,
higher_score_is_better=False),
callbacks=[MetricCallback('mse', metric_function=mean_squared_error,
higher_score_is_better=False)],
hyperparams_repository=InMemoryHyperparamsRepository(
cache_folder='cache')
)
random_search = auto_ml.fit(DATA_INPUTS, EXPECTED_OUTPUTS)
Output:
new trial:
{
"SKLearnWrapper_LinearSVC": {
"C": 7794
}
}
trial 1/10
fitting trial 1/10 split 1/1
hyperparams: {
"SKLearnWrapper_LinearSVC__C": 7794,
"SKLearnWrapper_LinearSVC__class_weight": null,
"SKLearnWrapper_LinearSVC__dual": true,
"SKLearnWrapper_LinearSVC__fit_intercept": true,
"SKLearnWrapper_LinearSVC__intercept_scaling": 1,
"SKLearnWrapper_LinearSVC__loss": "squared_hinge",
"SKLearnWrapper_LinearSVC__max_iter": 1000,
"SKLearnWrapper_LinearSVC__multi_class": "ovr",
"SKLearnWrapper_LinearSVC__penalty": "l2",
"SKLearnWrapper_LinearSVC__random_state": null,
"SKLearnWrapper_LinearSVC__tol": 0.0001,
"SKLearnWrapper_LinearSVC__verbose": 0,
"SKLearnWrapper_StandardScaler__copy": true,
"SKLearnWrapper_StandardScaler__with_mean": true,
"SKLearnWrapper_StandardScaler__with_std": true
}
epoch 1/10
main train: 1.475
main validation: 0.9
mse train: 1.475
mse validation: 0.9
epoch 2/10
<neuraxle.metaopt.trial.Trial object at 0x7f764b20e190>
Traceback (most recent call last):
File "/home/alxkolm/projects/Neuraxle/neuraxle/metaopt/auto_ml.py", line 660, in _fit_data_container
repo_trial_split = self._execute_trial(
File "/home/alxkolm/projects/Neuraxle/neuraxle/metaopt/trial.py", line 243, in __exit__
raise exc_val
File "/home/alxkolm/projects/Neuraxle/neuraxle/metaopt/auto_ml.py", line 660, in _fit_data_container
repo_trial_split = self._execute_trial(
File "/home/alxkolm/projects/Neuraxle/neuraxle/metaopt/auto_ml.py", line 725, in _execute_trial
self.print_func('success trial {} score: {}'.format(
File "/home/alxkolm/projects/Neuraxle/neuraxle/metaopt/trial.py", line 489, in __exit__
raise exc_val
File "/home/alxkolm/projects/Neuraxle/neuraxle/metaopt/auto_ml.py", line 716, in _execute_trial
repo_trial_split = self.trainer.fit_trial_split(
File "/home/alxkolm/projects/Neuraxle/neuraxle/metaopt/auto_ml.py", line 484, in fit_trial_split
trial_split = trial_split.fit_trial_split(train_data_container, context)
File "/home/alxkolm/projects/Neuraxle/neuraxle/metaopt/trial.py", line 294, in fit_trial_split
self.pipeline = self.pipeline.handle_fit(train_data_container, context)
File "/home/alxkolm/projects/Neuraxle/neuraxle/base.py", line 983, in handle_fit
new_self = self._fit_data_container(data_container, context)
File "/home/alxkolm/projects/Neuraxle/neuraxle/pipeline.py", line 173, in _fit_data_container
step, data_container = step.handle_fit_transform(data_container, context)
File "/home/alxkolm/projects/Neuraxle/neuraxle/base.py", line 1002, in handle_fit_transform
new_self, data_container = self._fit_transform_data_container(data_container, context)
File "/home/alxkolm/projects/Neuraxle/neuraxle/base.py", line 1106, in _fit_transform_data_container
new_self, out = self.fit_transform(data_container.data_inputs, data_container.expected_outputs)
File "/home/alxkolm/projects/Neuraxle/neuraxle/steps/sklearn.py", line 60, in fit_transform
out = self.wrapped_sklearn_predictor.fit_transform(data_inputs, expected_outputs)
File "/home/alxkolm/projects/ttoy/.venv38/lib/python3.8/site-packages/sklearn/base.py", line 556, in fit_transform
return self.fit(X, y, **fit_params).transform(X)
File "/home/alxkolm/projects/ttoy/.venv38/lib/python3.8/site-packages/sklearn/preprocessing/data.py", line 639, in fit
return self.partial_fit(X, y)
File "/home/alxkolm/projects/ttoy/.venv38/lib/python3.8/site-packages/sklearn/preprocessing/data.py", line 661, in partial_fit
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
File "/home/alxkolm/projects/ttoy/.venv38/lib/python3.8/site-packages/sklearn/utils/validation.py", line 517, in check_array
raise ValueError(
ValueError: Expected 2D array, got 1D array instead:
array=[2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2.
2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2.
2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2. 2.
2. 2. 2. 2. 2. 2. 2. 2.].
Reshape your data either using array.reshape(-1, 1) if your data has a single feature or array.reshape(1, -1) if it contains a single sample.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/alxkolm/projects/ttoy/trainbox/case.py", line 39, in <module>
random_search = auto_ml.fit(DATA_INPUTS, EXPECTED_OUTPUTS)
File "/home/alxkolm/projects/Neuraxle/neuraxle/base.py", line 3144, in fit
new_self = self.handle_fit(data_container, context)
File "/home/alxkolm/projects/Neuraxle/neuraxle/base.py", line 983, in handle_fit
new_self = self._fit_data_container(data_container, context)
File "/home/alxkolm/projects/Neuraxle/neuraxle/metaopt/auto_ml.py", line 674, in _fit_data_container
self._get_trial_split_description(repo_trial, repo_trial_split, validation_splits, trial_number)))
UnboundLocalError: local variable 'repo_trial_split' referenced before assignment
I have fixed your issue here : https://github.com/Neuraxio/Neuraxle/pull/333
Basically, the AutoML loop was keeping the same DataContainer object for each epochs, but your pipelinem was changing the values inside the reference for the data inputs. I have added a shallow copy before each epoch. I tested your code with a unit test, and it works just fine now.
Related
i m very new to this but I am trying to solve the issue somehow. this is my convert.py file
import numpy as n
import tensorflow as tf
import coremltools as ct
print(n.__version__)
print(tf.__version__)
print(ct.__version__)
loaded_model = tf.saved_model.load("mobilenet_v2_130_224.h5")
mlmodel = ct.convert(loaded_model, inputs=[ct.ImageType()], classifier_config=ct.ClassifierConfig("labels.txt"), source='tensorflow')
mlmodel.short_description = "My Classifier"
mlmodel.license = "Apache 2.0"
spec = mlmodel.get_spec()
ct.utils.rename_feature(spec, "input_1", "imageInput")
ct.utils.rename_feature(spec, "Identity", "classResult")
mlmodel = ct.models.MLModel(spec)
print(mlmodel)
mlmodel.save("model_299x299.mlmodel")
I downloaded the model from [here][1] and unzipped on desktop. I have M1 iMac. why do I get these errors below? how can I convert this particular model into mlmodel? if my convert file is wrongly written what should be the best practice of it ?
Traceback (most recent call last):
File "/Users/asduskun/Desktop/convert.py", line 20, in <module>
mlmodel = ct.convert(loaded_model, inputs=[ct.ImageType()], classifier_config=ct.ClassifierConfig("labels.txt"), source='tensorflow')
File "/Users/asduskun/miniconda3/lib/python3.10/site-packages/coremltools/converters/_converters_entry.py", line 444, in convert
mlmodel = mil_convert(
File "/Users/asduskun/miniconda3/lib/python3.10/site-packages/coremltools/converters/mil/converter.py", line 187, in mil_convert
return _mil_convert(model, convert_from, convert_to, ConverterRegistry, MLModel, compute_units, **kwargs)
File "/Users/asduskun/miniconda3/lib/python3.10/site-packages/coremltools/converters/mil/converter.py", line 211, in _mil_convert
proto, mil_program = mil_convert_to_proto(
File "/Users/asduskun/miniconda3/lib/python3.10/site-packages/coremltools/converters/mil/converter.py", line 281, in mil_convert_to_proto
prog = frontend_converter(model, **kwargs)
File "/Users/asduskun/miniconda3/lib/python3.10/site-packages/coremltools/converters/mil/converter.py", line 99, in __call__
return tf2_loader.load()
File "/Users/asduskun/miniconda3/lib/python3.10/site-packages/coremltools/converters/mil/frontend/tensorflow/load.py", line 61, in load
self._graph_def = self._graph_def_from_model(output_names)
File "/Users/asduskun/miniconda3/lib/python3.10/site-packages/coremltools/converters/mil/frontend/tensorflow2/load.py", line 133, in _graph_def_from_model
cfs, graph_def = self._get_concrete_functions_and_graph_def()
File "/Users/asduskun/miniconda3/lib/python3.10/site-packages/coremltools/converters/mil/frontend/tensorflow2/load.py", line 125, in _get_concrete_functions_and_graph_def
raise NotImplementedError(msg.format(self.model))
NotImplementedError: Expected model format: [SavedModel | [concrete_function] | tf.keras.Model | .h5 | GraphDef], got <tensorflow.python.saved_model.load.Loader._recreate_base_user_object.<locals>._UserObject object at 0x1676455a0>
[1]: https://tfhub.dev/google/imagenet/mobilenet_v2_130_224/classification/5
I have some difficulty with tensorflow_datasets when I was trying to load mnist.
python:3.7
tensorflow : 2.1.0
tensorflow_datasets has been upgraded to latest version 4.6, because the default version of tensorflow_datasets from tensorflow installation has no attribute 'load'
But now the problem is data can not be downloaded and extracted successfully.
with the following command:
datasets = tfds.load(name="mnist")
the error message is :
Downloading and preparing dataset Unknown size (download: Unknown size, generated: Unknown size, total: Unknown size) to ~\tensorflow_datasets\mnist\3.0.1...
Extraction completed...: 0 file [00:00, ? file/s]██████████████████████████████████████████████████████████████| 4/4 [00:00<00:00, 138.37 url/s]
Dl Size...: 100%|██████████████████████████████████████████████████████████████████████████| 11594722/11594722 [00:00<00:00, 373172106.07 MiB/s]
Dl Completed...: 100%|█████████████████████████████████████████████████████████████████████████████████████████| 4/4 [00:00<00:00, 122.03 url/s]
Traceback (most recent call last):
File "", line 1, in
File "C:\Users\Wilso\Anaconda3\envs\tfgpu\lib\site-packages\tensorflow_datasets\core\load.py", line 327, in load
dbuilder.download_and_prepare(**download_and_prepare_kwargs)
File "C:\Users\Wilso\Anaconda3\envs\tfgpu\lib\site-packages\tensorflow_datasets\core\dataset_builder.py", line 483, in download_and_prepare
download_config=download_config,
File "C:\Users\Wilso\Anaconda3\envs\tfgpu\lib\site-packages\tensorflow_datasets\core\dataset_builder.py", line 1222, in _download_and_prepare
disable_shuffling=self.info.disable_shuffling,
File "C:\Users\Wilso\Anaconda3\envs\tfgpu\lib\site-packages\tensorflow_datasets\core\split_builder.py", line 310, in submit_split_generation
return self._build_from_generator(**build_kwargs)
File "C:\Users\Wilso\Anaconda3\envs\tfgpu\lib\site-packages\tensorflow_datasets\core\split_builder.py", line 376, in _build_from_generator
leave=False,
File "C:\Users\Wilso\Anaconda3\envs\tfgpu\lib\site-packages\tqdm\std.py", line 1195, in iter
for obj in iterable:
File "C:\Users\Wilso\Anaconda3\envs\tfgpu\lib\site-packages\tensorflow_datasets\image_classification\mnist.py", line 151, in _generate_examples
images = _extract_mnist_images(data_path, num_examples)
File "C:\Users\Wilso\Anaconda3\envs\tfgpu\lib\site-packages\tensorflow_datasets\image_classification\mnist.py", line 350, in _extract_mnist_images
f.read(16) # header
File "C:\Users\Wilso\Anaconda3\envs\tfgpu\lib\site-packages\tensorflow_core\python\lib\io\file_io.py", line 122, in read
self._preread_check()
File "C:\Users\Wilso\Anaconda3\envs\tfgpu\lib\site-packages\tensorflow_core\python\lib\io\file_io.py", line 84, in _preread_check
compat.as_bytes(self.__name), 1024 * 512)
File "C:\Users\Wilso\Anaconda3\envs\tfgpu\lib\site-packages\tensorflow_core\python\util\compat.py", line 87, in as_bytes
(bytes_or_text,))
TypeError: Expected binary or unicode string, got WindowsGPath('C:\Users\Wilso\tensorflow_datasets\downloads\extracted\GZIP.cvdf-datasets_mnist_train-images-idx3-ubyteRA_Kv3PMVG-iFHXoHqNwJlYF9WviEKQCTSyo8gNSNgk.gz')
Try:
(ds_train, ds_test), ds_info = tfds.load(
"mnist",
split=["train", "test"],
shuffle_files=True,
as_supervised=True, # will return tuple (img, label) otherwise dict
with_info=True, # able to get info about dataset
)
I have a pandas dataframe raw_data with two columns: 'T' and 'BP':
T BP
0 -0.500 115.790
1 -0.499 115.441
2 -0.498 115.441
3 -0.497 115.441
4 -0.496 115.790
... ... ...
647163 646.663 105.675
647164 646.664 105.327
647165 646.665 105.327
647166 646.666 105.327
647167 646.667 104.978
[647168 rows x 2 columns]
I want to apply the Hodges-Lehmann mean (it's a robust average) over a rolling window and create a new column. Here's the function:
def hodgesLehmannMean(x):
m = np.add.outer(x, x)
ind = np.tril_indices(len(x), 0)
return 0.5 * np.median(m[ind])
I therefore write:
raw_data[new_col] = raw_data['BP'].rolling(21, min_periods=1, center=True,
win_type=None, axis=0, closed=None).agg(hodgesLehmannMean)
but I get a string of error messages:
Traceback (most recent call last):
File "C:\Users\tkpme\miniconda3\lib\runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "C:\Users\tkpme\miniconda3\lib\runpy.py", line 87, in _run_code
exec(code, run_globals)
File "c:\Users\tkpme\.vscode\extensions\ms-python.python-2020.8.101144\pythonFiles\lib\python\debugpy\__main__.py", line 45, in <module>
cli.main()
File "c:\Users\tkpme\.vscode\extensions\ms-python.python-2020.8.101144\pythonFiles\lib\python\debugpy/..\debugpy\server\cli.py", line 430, in main
run()
File "c:\Users\tkpme\.vscode\extensions\ms-python.python-2020.8.101144\pythonFiles\lib\python\debugpy/..\debugpy\server\cli.py", line 267, in run_file
runpy.run_path(options.target, run_name=compat.force_str("__main__"))
File "C:\Users\tkpme\miniconda3\lib\runpy.py", line 265, in run_path
return _run_module_code(code, init_globals, run_name,
File "C:\Users\tkpme\miniconda3\lib\runpy.py", line 97, in _run_module_code
_run_code(code, mod_globals, init_globals,
File "C:\Users\tkpme\miniconda3\lib\runpy.py", line 87, in _run_code
exec(code, run_globals)
File "c:\Users\tkpme\OneDrive\Documents\Work\CMC\BP Satya and Suresh\Code\Naveen_peak_detect test.py", line 227, in <module>
main()
File "c:\Users\tkpme\OneDrive\Documents\Work\CMC\BP Satya and Suresh\Code\Naveen_peak_detect test.py", line 75, in main
raw_data[new_col] = raw_data['BP'].rolling(FILTER_WINDOW, min_periods=1, center=True, win_type=None,
File "C:\Users\tkpme\miniconda3\lib\site-packages\pandas\core\window\rolling.py", line 1961, in aggregate
return super().aggregate(func, *args, **kwargs)
File "C:\Users\tkpme\miniconda3\lib\site-packages\pandas\core\window\rolling.py", line 523, in aggregate
return self.apply(func, raw=False, args=args, kwargs=kwargs)
File "C:\Users\tkpme\miniconda3\lib\site-packages\pandas\core\window\rolling.py", line 1987, in apply
return super().apply(
File "C:\Users\tkpme\miniconda3\lib\site-packages\pandas\core\window\rolling.py", line 1300, in apply
return self._apply(
File "C:\Users\tkpme\miniconda3\lib\site-packages\pandas\core\window\rolling.py", line 507, in _apply
result = calc(values)
File "C:\Users\tkpme\miniconda3\lib\site-packages\pandas\core\window\rolling.py", line 495, in calc
return func(x, start, end, min_periods)
File "C:\Users\tkpme\miniconda3\lib\site-packages\pandas\core\window\rolling.py", line 1326, in apply_func
return window_func(values, begin, end, min_periods)
File "pandas\_libs\window\aggregations.pyx", line 1375, in pandas._libs.window.aggregations.roll_generic_fixed
File "c:\Users\tkpme\OneDrive\Documents\Work\CMC\BP Satya and Suresh\Code\Naveen_peak_detect test.py", line 222, in hodgesLehmannMean
m = np.add.outer(x, x)
File "C:\Users\tkpme\miniconda3\lib\site-packages\pandas\core\series.py", line 705, in __array_ufunc__
return construct_return(result)
File "C:\Users\tkpme\miniconda3\lib\site-packages\pandas\core\series.py", line 694, in construct_return
raise NotImplementedError
NotImplementedError
which appear to be driven by the line
m = np.add.outer(x, x)
and points to something not being implemented or numpy being missing. But I import numpy right at the beginning as follows:
import numpy as np
import pandas as pd
The function works perfectly well on its own if I feed it a list or a numpy array, so I'm not sure what the problem is. Interestingly, if I use the median instead of the Hodges-Lehmann Mean, it runs like a charm
raw_data[new_col] = raw_data['BP'].rolling(21, min_periods=1, center=True,
win_type=None, axis=0, closed=None).median()
What is the cause of my problem, and how do I fix it?
Sincerely
Thomas Philips
I've tried your code with a small dataframe and it worked well, so maybe there is something on your dataframe that must be cleaned or transformed.
Solved it. It turns out that
m = np.add.outer(x, x)
requires x to be array like. When I tested it using lists, numpy arrays, etc. it worked perfectly, just as it did for you. But the .rolling line generates a slice of a dataframe, which is not array like, and the function fails with a confusing error message. I modified the function to create a numpy array from the input and it now works as it should.
def hodgesLehmannMean(x):
x_array = np.array(x)
m = np.add.outer(x_array, x_array)
ind = np.tril_indices(len(x_array), 0)
return 0.5 * np.median(m[ind])
Thanks for looking at it!
I was trying to use this code as it is on Tensorflow 1.13.1. However, it throws the following error:
sherlock#mybox:~/cs273/autocat/bert$ python streaming2.py
Traceback (most recent call last):
File "streaming2.py", line 233, in <module>
tf_f1 = tf_f1_score(t, p)
File "streaming2.py", line 161, in tf_f1_score
f1s[2] = tf.reduce_sum(f1 * weights)
File "/home/sherlock/.virtualenvs/autocat/local/lib/python2.7/site-packages/tensorflow/python/ops/math_ops.py", line 812, in binary_op_wrapper
return func(x, y, name=name)
File "/home/sherlock/.virtualenvs/autocat/local/lib/python2.7/site-packages/tensorflow/python/ops/math_ops.py", line 1078, in _mul_dispatch
return gen_math_ops.mul(x, y, name=name)
File "/home/sherlock/.virtualenvs/autocat/local/lib/python2.7/site-packages/tensorflow/python/ops/gen_math_ops.py", line 5860, in mul
"Mul", x=x, y=y, name=name)
File "/home/sherlock/.virtualenvs/autocat/local/lib/python2.7/site-packages/tensorflow/python/framework/op_def_library.py", line 547, in _apply_op_helper
inferred_from[input_arg.type_attr]))
TypeError: Input 'y' of 'Mul' Op has type float64 that does not match type int64 of argument 'x'.
Tried fixing the casts for some time, but failed to find a minimal change that makes the code work. Can anyone please help me on this?
I could reproduce your error: it happens with Python 2 but not 3.
So either switch to Python 3 or change the code with tf.cast
f1 = tf.cast(f1, tf.float64)
f1s[2] = tf.reduce_sum(f1 * weights)
and maybe in other locations but that's the idea
For the past few days I have been having an issue with serializing data to tfrecord format and then subsequently deserializing it using parse_single_sequence example. I am attempting to retrieve data for use with a fairly standard RNN model, however this is my first attempt at using the tfrecords format and the associated pipeline that goes with it.
Here is a toy example to reproduce the issue I am having:
import tensorflow as tf
import tempfile
from IPython import embed
sequences = [[1, 2, 3], [4, 5, 1], [1, 2]]
label_sequences = [[0, 1, 0], [1, 0, 0], [1, 1]]
def make_example(sequence, labels):
ex = tf.train.SequenceExample()
sequence_length = len(sequence)
ex.context.feature["length"].int64_list.value.append(sequence_length)
fl_tokens = ex.feature_lists.feature_list["tokens"]
fl_labels = ex.feature_lists.feature_list["labels"]
for token, label in zip(sequence, labels):
fl_tokens.feature.add().int64_list.value.append(token)
fl_labels.feature.add().int64_list.value.append(label)
return ex
writer = tf.python_io.TFRecordWriter('./test.tfrecords')
for sequence, label_sequence in zip(sequences, label_sequences):
ex = make_example(sequence, label_sequence)
writer.write(ex.SerializeToString())
writer.close()
tf.reset_default_graph()
file_name_queue = tf.train.string_input_producer(['./test.tfrecords'], num_epochs=None)
reader = tf.TFRecordReader()
context_features = {
"length": tf.FixedLenFeature([], dtype=tf.int64)
}
sequence_features = {
"tokens": tf.FixedLenSequenceFeature([], dtype=tf.int64),
"labels": tf.FixedLenSequenceFeature([], dtype=tf.int64)
}
ex = reader.read(file_name_queue)
# Parse the example (returns a dictionary of tensors)
context_parsed, sequence_parsed = tf.parse_single_sequence_example(
serialized=ex,
context_features=context_features,
sequence_features=sequence_features
)
context = tf.contrib.learn.run_n(context_parsed, n=1, feed_dict=None)
print(context[0])
sequence = tf.contrib.learn.run_n(sequence_parsed, n=1, feed_dict=None)
print(sequence[0])
The associated stack trace is:
Traceback (most recent call last):
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/framework/common_shapes.py", line 594, in call_cpp_shape_fn
status)
File "/usr/lib/python3.5/contextlib.py", line 66, in exit
next(self.gen)
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/framework/errors.py", line 463, in raise_exception_on_not_ok_status
pywrap_tensorflow.TF_GetCode(status))
tensorflow.python.framework.errors.InvalidArgumentError: Shape must be rank 0 but is rank 1
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "my_test.py", line 51, in
sequence_features=sequence_features
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/ops/parsing_ops.py", line 640, in parse_single_sequence_example
feature_list_dense_defaults, example_name, name)
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/ops/parsing_ops.py", line 837, in _parse_single_sequence_example_raw
name=name)
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/ops/gen_parsing_ops.py", line 285, in _parse_single_sequence_example
name=name)
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/framework/op_def_library.py", line 749, in apply_op
op_def=op_def)
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/framework/ops.py", line 2382, in create_op
set_shapes_for_outputs(ret)
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/framework/ops.py", line 1783, in set_shapes_for_outputs
shapes = shape_func(op)
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/framework/common_shapes.py", line 596, in call_cpp_shape_fn
raise ValueError(err.message)
ValueError: Shape must be rank 0 but is rank 1
I posted this as a potential issue over on github though it seems I may just be using it incorrectly: Tensorflow Github Issue
So with the background information out of the way, I'm just wondering if I am in fact making an error here? Any help in the right direction would be greatly appreciated, its been a few days and my poking around hasn't panned out. Thanks all!
Got it and it was a bad assumption on my part. The tf.TFRecordReader.read(queue, name=None) returns a tuple when I assumed it would have returned just the value not (key, value) which I was directly passing into the example parser.