Feeding example to tf predictor.from_saved_model() for estimator trained with tf hub module - tensorflow

I try to export the model for text classification with tf hub modules, and then infer a prediction from it for a single string example using predictor.from_saved_model(). I saw some examples of similar ideas, but still couldn't make it work for the case when using tf hub modules to build features. Here is what I do:
train_input_fn = tf.estimator.inputs.pandas_input_fn(
train_df, train_df['label_ids'], num_epochs= None, shuffle=True)
# Prediction on the whole training set.
predict_train_input_fn = tf.estimator.inputs.pandas_input_fn(
train_df, train_df['label_ids'], shuffle=False)
embedded_text_feature_column = hub.text_embedding_column(
key='sentence',
module_spec='https://tfhub.dev/google/nnlm-de-dim128/1')
#Estimator
estimator = tf.estimator.DNNClassifier(
hidden_units=[500, 100],
feature_columns=[embedded_text_feature_column],
n_classes=num_of_class,
optimizer=tf.train.AdagradOptimizer(learning_rate=0.003) )
# Training
estimator.train(input_fn=train_input_fn, steps=1000)
#prediction on training set
train_eval_result = estimator.evaluate(input_fn=predict_train_input_fn)
print('Training set accuracy: {accuracy}'.format(**train_eval_result))
feature_spec = tf.feature_column.make_parse_example_spec([embedded_text_feature_column])
serving_input_receiver_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(feature_spec)
export_dir_base = self.cfg['model_path']
servable_model_path = estimator.export_savedmodel(export_dir_base, serving_input_receiver_fn)
# Example message for inference
message = "Was ist denn los"
saved_model_predictor = predictor.from_saved_model(export_dir=servable_model_path)
content_tf_list = tf.train.BytesList(value=[str.encode(message)])
example = tf.train.Example(
features=tf.train.Features(
feature={
'sentence': tf.train.Feature(
bytes_list=content_tf_list
)
}
)
)
with tf.python_io.TFRecordWriter('the_message.tfrecords') as writer:
writer.write(example.SerializeToString())
reader = tf.TFRecordReader()
data_path = 'the_message.tfrecords'
filename_queue = tf.train.string_input_producer([data_path], num_epochs=1)
_, serialized_example = reader.read(filename_queue)
output_dict = saved_model_predictor({'inputs': [serialized_example]})
And the output:
Traceback (most recent call last):
File "/Users/dimitrs/component-pythia/src/pythia.py", line 321, in _train
model = algo.generate_model(samples, generation_id)
File "/Users/dimitrs/component-pythia/src/algorithm_layer/algorithm.py", line 56, in generate_model
model = self._process_training(samples, generation)
File "/Users/dimitrs/component-pythia/src/algorithm_layer/tf_hub_classifier.py", line 91, in _process_training
output_dict = saved_model_predictor({'inputs': [serialized_example]})
File "/Users/dimitrs/anaconda3/envs/pythia/lib/python3.6/site-packages/tensorflow/contrib/predictor/predictor.py", line 77, in __call__
return self._session.run(fetches=self.fetch_tensors, feed_dict=feed_dict)
File "/Users/dimitrs/anaconda3/envs/pythia/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 900, in run
run_metadata_ptr)
File "/Users/dimitrs/anaconda3/envs/pythia/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1135, in _run
feed_dict_tensor, options, run_metadata)
File "/Users/dimitrs/anaconda3/envs/pythia/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1316, in _do_run
run_metadata)
File "/Users/dimitrs/anaconda3/envs/pythia/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1335, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InternalError: Unable to get element as bytes.
Isn't serialized_example the right input that is suggested by serving_input_receiver_fn ?

So, all I need was serialized_example = example.SerializeToString()
Writing the example on a file requires to start a session before reading it back. Simply serialising is enough:
# Example message for inference
message = "Was ist denn los"
saved_model_predictor = predictor.from_saved_model(export_dir=servable_model_path)
content_tf_list = tf.train.BytesList(value=[message.encode('utf-8')])
sentence = tf.train.Feature(bytes_list=content_tf_list)
sentence_dict = {'sentence': sentence}
features = tf.train.Features(feature=sentence_dict)
example = tf.train.Example(features=features)
serialized_example = example.SerializeToString()
output_dict = saved_model_predictor({'inputs': [serialized_example]})

Related

InvalidArgumentError : ConcatOp : Dimensions of inputs should match

Tensorflow 1.7 when using dynamic_rnn.It runs fine at first , but at the 32th(it changes when i run the code) step , the error appears. When i used smaller batch , it seems the code can run longer , however the error still poped up .Just cannt figure out what's wrong.
from mapping import *
def my_input_fn(features, targets, batch_size=20, shuffle=True, num_epochs=None, sequece_lenth=None):
ds = tf.data.Dataset.from_tensor_slices(
(features, targets, sequece_lenth)) # warning: 2GB limit
ds = ds.batch(batch_size).repeat(num_epochs)
if shuffle:
ds = ds.shuffle(10000)
features, labels, sequence = ds.make_one_shot_iterator().get_next()
return features, labels, sequence
def lstm_cell(lstm_size=50):
return tf.contrib.rnn.BasicLSTMCell(lstm_size)
class RnnModel:
def __init__(self,
batch_size,
hidden_units,
time_steps,
num_features
):
self.batch_size = batch_size
self.hidden_units = hidden_units
stacked_lstm = tf.contrib.rnn.MultiRNNCell(
[lstm_cell(i) for i in self.hidden_units])
self.initial_state = stacked_lstm.zero_state(batch_size, tf.float32)
self.model = stacked_lstm
self.state = self.initial_state
self.time_steps = time_steps
self.num_features = num_features
def loss_mean_squre(self, outputs, targets):
pos = tf.add(outputs, tf.ones(self.batch_size))
eve = tf.div(pos, 2)
error = tf.subtract(eve,
targets)
return tf.reduce_mean(tf.square(error))
def train(self,
num_steps,
learningRate,
input_fn,
inputs,
targets,
sequenceLenth):
periods = 10
step_per_periods = int(num_steps / periods)
input, target, sequence = input_fn(inputs, targets, self.batch_size, shuffle=True, sequece_lenth=sequenceLenth)
initial_state = self.model.zero_state(self.batch_size, tf.float32)
outputs, state = tf.nn.dynamic_rnn(self.model, input, initial_state=initial_state)
loss = self.loss_mean_squre(tf.reshape(outputs, [self.time_steps, self.batch_size])[-1], target)
optimizer = tf.train.AdamOptimizer(learning_rate=learningRate)
grads_and_vars = optimizer.compute_gradients(loss, self.model.variables)
optimizer.apply_gradients(grads_and_vars)
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
for i in range(num_steps):
sess.run(init_op)
state2, current_loss= sess.run([state, loss])
if i % step_per_periods == 0:
print("period " + str(int(i / step_per_periods)) + ":" + str(current_loss))
return self.model, self.state
def processFeature(df):
df = df.drop('class', 1)
features = []
for i in range(len(df["vecs"])):
features.append(df["vecs"][i])
aa = pd.Series(features).tolist() # tramsform into list
featuresList = []
for i in features:
p1 = []
for k in i:
p1.append(list(k))
featuresList.append(p1)
return featuresList
def processTargets(df):
selected_features = df[
"class"]
processed_features = selected_features.copy()
return tf.convert_to_tensor(processed_features.astype(float).tolist())
if __name__ == '__main__':
dividNumber = 30
"""
some code here to modify my data to input
it looks like this:
inputs before use input function : [fullLenth, charactorLenth, embeddinglenth]
"""
model = RnnModel(15, [100, 80, 80, 1], time_steps=dividNumber, num_features=25)
model.train(5000, 0.0001, my_input_fn, training_examples, training_targets, sequenceLenth=trainSequenceL)
And error is under here
Traceback (most recent call last):
File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\client\session.py", line 1330, in _do_call
return fn(*args)
File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\client\session.py", line 1315, in _run_fn
options, feed_dict, fetch_list, target_list, run_metadata)
File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\client\session.py", line 1423, in _call_tf_sessionrun
status, run_metadata)
File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\framework\errors_impl.py", line 516, in __exit__
c_api.TF_GetCode(self.status.status))
tensorflow.python.framework.errors_impl.InvalidArgumentError: ConcatOp : Dimensions of inputs should match: shape[0] = [20,25] vs. shape[1] = [30,100]
[[Node: rnn/while/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/concat = ConcatV2[N=2, T=DT_FLOAT, Tidx=DT_INT32, _device="/job:localhost/replica:0/task:0/device:CPU:0"](rnn/while/TensorArrayReadV3, rnn/while/Switch_4:1, rnn/while/rnn/multi_rnn_cell/cell_3/basic_lstm_cell/Const)]]
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "D:/programming/mlwords/dnn_gragh.py", line 198, in <module>
model.train(5000, 0.0001, my_input_fn, training_examples, training_targets, sequenceLenth=trainSequenceL)
File "D:/programming/mlwords/dnn_gragh.py", line 124, in train
state2, current_loss, nowAccuracy = sess.run([state, loss, accuracy])
File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\client\session.py", line 908, in run
run_metadata_ptr)
File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\client\session.py", line 1143, in _run
feed_dict_tensor, options, run_metadata)
File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\client\session.py", line 1324, in _do_run
run_metadata)
File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\client\session.py", line 1343, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: ConcatOp : Dimensions of inputs should match: shape[0] = [20,25] vs. shape[1] = [30,100]
[[Node: rnn/while/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/concat = ConcatV2[N=2, T=DT_FLOAT, Tidx=DT_INT32, _device="/job:localhost/replica:0/task:0/device:CPU:0"](rnn/while/TensorArrayReadV3, rnn/while/Switch_4:1, rnn/while/rnn/multi_rnn_cell/cell_3/basic_lstm_cell/Const)]]
Caused by op 'rnn/while/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/concat', defined at:
File "D:/programming/mlwords/dnn_gragh.py", line 198, in <module>
model.train(5000, 0.0001, my_input_fn, training_examples, training_targets, sequenceLenth=trainSequenceL)
File "D:/programming/mlwords/dnn_gragh.py", line 95, in train
outputs, state = tf.nn.dynamic_rnn(self.model, input, initial_state=initial_state)#,sequence_length=sequence
File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\ops\rnn.py", line 627, in dynamic_rnn
dtype=dtype)
File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\ops\rnn.py", line 824, in _dynamic_rnn_loop
swap_memory=swap_memory)
File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\ops\control_flow_ops.py", line 3205, in while_loop
result = loop_context.BuildLoop(cond, body, loop_vars, shape_invariants)
File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\ops\control_flow_ops.py", line 2943, in BuildLoop
pred, body, original_loop_vars, loop_vars, shape_invariants)
File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\ops\control_flow_ops.py", line 2880, in _BuildLoop
body_result = body(*packed_vars_for_body)
File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\ops\control_flow_ops.py", line 3181, in <lambda>
body = lambda i, lv: (i + 1, orig_body(*lv))
File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\ops\rnn.py", line 795, in _time_step
(output, new_state) = call_cell()
File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\ops\rnn.py", line 781, in <lambda>
call_cell = lambda: cell(input_t, state)
File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py", line 232, in __call__
return super(RNNCell, self).__call__(inputs, state)
File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\layers\base.py", line 714, in __call__
outputs = self.call(inputs, *args, **kwargs)
File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py", line 1283, in call
cur_inp, new_state = cell(cur_inp, cur_state)
File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py", line 339, in __call__
*args, **kwargs)
File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\layers\base.py", line 714, in __call__
outputs = self.call(inputs, *args, **kwargs)
File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py", line 620, in call
array_ops.concat([inputs, h], 1), self._kernel)
File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\ops\array_ops.py", line 1181, in concat
return gen_array_ops.concat_v2(values=values, axis=axis, name=name)
File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\ops\gen_array_ops.py", line 1101, in concat_v2
"ConcatV2", values=values, axis=axis, name=name)
File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 787, in _apply_op_helper
op_def=op_def)
File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\framework\ops.py", line 3309, in create_op
op_def=op_def)
File "D:\Anaconda3\envs\tensorflow-cpu\lib\site-packages\tensorflow\python\framework\ops.py", line 1669, in __init__
self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
InvalidArgumentError (see above for traceback): ConcatOp : Dimensions of inputs should match: shape[0] = [20,25] vs. shape[1] = [30,100]
[[Node: rnn/while/rnn/multi_rnn_cell/cell_0/basic_lstm_cell/concat = ConcatV2[N=2, T=DT_FLOAT, Tidx=DT_INT32, _device="/job:localhost/replica:0/task:0/device:CPU:0"](rnn/while/TensorArrayReadV3, rnn/while/Switch_4:1, rnn/while/rnn/multi_rnn_cell/cell_3/basic_lstm_cell/Const)]]
this is my code used to check my input
def checkData(inputs, targets, sequencelence):
batch_size = 20
features, target, sequece = my_input_fn(inputs, targets, batch_size=batch_size, shuffle=True, num_epochs=None,
sequece_lenth=sequencelence)
with tf.Session() as sess:
for i in range(1000):
features1, target1, sequece1 = sess.run([features, target, sequece])
assert len(features1) == batch_size
for sentence in features1 :
assert len(sentence) == 30
for word in sentence:
assert len(word) == 25
assert len(target1) == batch_size
assert len(sequece1) == batch_size
print(target1)
print("OK")
The error is coming from LSTMCell.call call method. There we are trying to tf.concat([inputs, h], 1) meaning that we want to concatenate the next input with the current hidden state before matmul'ing with the kernel variables matrix. The error is saying that you can't do it because the batch (0th) dimensions don't match up - your input is shaped [20,25] and your hidden state is shaped [30,100].
For some reason on your 32nd iteration, or whenever you see the error, the input is not batched to 30, but only to 20. This usually happens at the end of your training data when the total number of training examples does not evenly divide your batch size. This hypothesis is also consistent with "When i used smaller batch , it seems the code can run longer" statement.
I had the same issue. When I corrected the image input size to match the input shape, it ran without errors.

OutOfRangeError when creating batch from tfrecord file

I'm writing a script that saves certain features of my data to tfrecord. The features are numpy arrays (float32). When I read the tfrecord file I get the following error:
OutOfRangeError (see above for traceback): RandomShuffleQueue '_1_shuffle_batch/random_shuffle_queue' is closed and has insufficient elements (requested 20, current size 0)
[[Node: shuffle_batch = QueueDequeueManyV2[component_types=[DT_UINT8, DT_UINT8], timeout_ms=-1, _device="/job:localhost/replica:0/task:0/device:CPU:0"](shuffle_batch/random_shuffle_queue, shuffle_batch/n)]]
I searched a lot, and apparently this error can be caused by different things. So far, I was not able to fix it. I recreated the problem with the following minimal code:
saving the toy data:
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
writer = tf.python_io.TFRecordWriter('stuff.tfrecords')
for i in range(100):
seq = np.random.uniform(size=(500,300)).astype(np.float32)
lbl = np.random.uniform(size=(90,1)).astype(np.float32)
feature = {'train/lbl': _bytes_feature(tf.compat.as_bytes(lbl.tostring())),
'train/seq': _bytes_feature(tf.compat.as_bytes(seq.tostring()))}
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
writer.close()
sys.stdout.flush()
Reading the data:
def read_and_decode_single_example(filename):
filename_queue = tf.train.string_input_producer([filename], num_epochs=1)
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
f = {'train/lbl': tf.FixedLenFeature([], tf.string),
'train/seq': tf.FixedLenFeature([], tf.string)}
features = tf.parse_single_example(serialized_example, features=f)
seq = tf.decode_raw(features['train/seq'], tf.float32)
lbl = tf.decode_raw(features['train/lbl'], tf.float32)
seq = tf.reshape(seq, [ 500,300 ])
lbl = tf.reshape(lbl, [ 90,1 ])
sbatch, lbatch = tf.train.shuffle_batch([seq, lbl],
batch_size= batch_size,
capacity=3*batch_size,
min_after_dequeue=batch_size)
return sbatch, lbatch
sbatch, lbatch = read_and_decode_single_example("stuff.tfrecords" )
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
s,l = sess.run([sbatch, lbatch])
coord.request_stop()
coord.join(threads)
I'm using Tensorflow-GPU v. 1.4.0.
Here is some error code, that may be informative:
Caused by op 'shuffle_batch', defined at:
File "teststuff.py", line 59, in <module>
sbatch, lbatch = read_and_decode_single_example("stuff.tfrecords" )
File "teststuff.py", line 54, in read_and_decode_single_example
min_after_dequeue=batch_size)
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/training/input.py", line 1225, in shuffle_batch
name=name)
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/training/input.py", line 796, in _shuffle_batch
dequeued = queue.dequeue_many(batch_size, name=name)
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/ops/data_flow_ops.py", line 464, in dequeue_many
self._queue_ref, n=n, component_types=self._dtypes, name=name)
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/ops/gen_data_flow_ops.py", line 2418, in _queue_dequeue_many_v2
component_types=component_types, timeout_ms=timeout_ms, name=name)
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/framework/op_def_library.py", line 787, in _apply_op_helper
op_def=op_def)
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/framework/ops.py", line 2956, in create_op
op_def=op_def)
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/framework/ops.py", line 1470, in __init__
self._traceback = self._graph._extract_stack() # pylint: disable=protected-access

InvalidArgumentError: Retval[0] does not have value is Thrown when Using tflearn Trainer

I followed this example on using tflearn trainer and coded this:
image_paths, labels = dataset_utils.read_dataset_list('../test/dummy_labels_file.txt')
data_dir = "../test/dummy_data/"
images = dataset_utils.read_images(data_dir=data_dir, image_paths=image_paths, image_extension='png')
print('Done reading images')
images = dataset_utils.resize(images, (1596, 48))
images = dataset_utils.transpose(images)
labels = dataset_utils.encode(labels)
x_train, x_test, y_train, y_test = dataset_utils.split(features=images, test_size=0.5, labels=labels)
... # parameters initiailized here
with tf.Graph().as_default():
X = tf.placeholder(tf.float32, [None, None, num_features])
Y = tf.placeholder(tf.int32)
sparse_Y = network_utils.dense_to_sparse(Y, num_classes)
seq_lens = tf.placeholder(tf.int32, [None])
def dnn(x):
layer = network_utils.bidirectional_grid_lstm(inputs=x, num_hidden=num_hidden_units)
layer = network_utils.get_time_major(inputs=layer, batch_size=network_utils.get_shape(x)[0],
num_classes=num_classes, num_hidden_units=num_hidden_units * 2)
return layer
net = dnn(X)
cost = network_utils.cost(network_utils.ctc_loss(inputs=net, labels=sparse_Y, sequence_length=seq_lens))
optimizer = network_utils.get_optimizer(learning_rate=learning_rate, optimizer_name=optimizer_name)
train_op = tflearn.TrainOp(loss=cost, optimizer=optimizer)
trainer = tflearn.Trainer(train_ops=train_op)
trainer.fit(feed_dicts={X: x_train, Y: y_train, seq_lens: dataset_utils.get_seq_lens(x_train)},
val_feed_dicts={X: x_test, Y: y_test, seq_lens: dataset_utils.get_seq_lens(x_test)},
n_epoch=1) #error happens here
The training starts when I run it but I encounter this error:
Traceback (most recent call last):
File ".../Optimized_OCR/main/train_using_tflearn_trainer.py", line 53, in <module>
tf.app.run(main=main)
File "...\tensorflow\python\platform\app.py", line 48, in run
_sys.exit(main(_sys.argv[:1] + flags_passthrough))
File ".../Optimized_OCR/main/train_using_tflearn_trainer.py", line 49, in main
n_epoch=1)
File "...\tflearn\helpers\trainer.py", line 338, in fit
show_metric)
File "...\tflearn\helpers\trainer.py", line 817, in _train
feed_batch)
File "...\tensorflow\python\client\session.py", line 889, in run
run_metadata_ptr)
File "...\tensorflow\python\client\session.py", line 1120, in _run
feed_dict_tensor, options, run_metadata)
File "...\tensorflow\python\client\session.py", line 1317, in _do_run
options, run_metadata)
File "...\tensorflow\python\client\session.py", line 1336, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Retval[0] does not have value
Has anyone else encountered this? How do fix this? I do want to use tflearn trainer to have an easier time training and testing my ocr models and I think this is the only thing I need to fix to be able to use it.

String input to categorical column via dataset

I'm trying to learn how to use the Estimator API, using input_fn to provide Dataset backed input to a feature_column generated input layer.
My code looks like
import tensorflow as tf import random
tf.logging.set_verbosity(tf.logging.DEBUG)
def input_fn():
def gen():
for i in range(100000):
for j in range(10):
yield {"in": str(j)}, [str(j+1)]
data = tf.data.Dataset.from_generator(gen, ({"in": tf.string}, tf.string))
data = data.batch(10)
iterator = data.make_one_shot_iterator()
return iterator.get_next()
vocabulary_feature_column = tf.feature_column.categorical_column_with_vocabulary_list(
key="in",
vocabulary_list=map(lambda i: str(i), range(11)))
embedding_column = tf.feature_column.embedding_column(
categorical_column=vocabulary_feature_column,
dimension=2)
with tf.Session() as sess:
print(sess.run(input_fn()))
classifier = tf.estimator.DNNClassifier(
feature_columns = [embedding_column],
hidden_units = [5,5],
n_classes = 11,
model_dir = '/tmp/predict/snap')
classifier.train(
input_fn=input_fn)
but running it I get
Traceback (most recent call last):
File "predict.py", line 33, in
input_fn=input_fn)
File "/usr/lib/python2.7/site-packages/tensorflow/python/estimator/estimator.py", line 302, in train
loss = self._train_model(input_fn, hooks, saving_listeners)
File "/usr/lib/python2.7/site-packages/tensorflow/python/estimator/estimator.py", line 711, in _train_model
features, labels, model_fn_lib.ModeKeys.TRAIN, self.config)
File "/usr/lib/python2.7/site-packages/tensorflow/python/estimator/estimator.py", line 694, in _call_model_fn
model_fn_results = self._model_fn(features=features, **kwargs)
File "/usr/lib/python2.7/site-packages/tensorflow/python/estimator/canned/dnn.py", line 334, in _model_fn
config=config)
File "/usr/lib/python2.7/site-packages/tensorflow/python/estimator/canned/dnn.py", line 190, in _dnn_model_fn
logits = logit_fn(features=features, mode=mode)
File "/usr/lib/python2.7/site-packages/tensorflow/python/estimator/canned/dnn.py", line 89, in dnn_logit_fn
features=features, feature_columns=feature_columns)
File "/usr/lib/python2.7/site-packages/tensorflow/python/feature_column/feature_column.py", line 230, in input_layer
trainable=trainable)
File "/usr/lib/python2.7/site-packages/tensorflow/python/feature_column/feature_column.py", line 1837, in _get_dense_tensor
inputs, weight_collections=weight_collections, trainable=trainable)
File "/usr/lib/python2.7/site-packages/tensorflow/python/feature_column/feature_column.py", line 2123, in _get_sparse_tensors
return _CategoricalColumn.IdWeightPair(inputs.get(self), None)
File "/usr/lib/python2.7/site-packages/tensorflow/python/feature_column/feature_column.py", line 1533, in get
transformed = column._transform_feature(self) # pylint: disable=protected-access
File "/usr/lib/python2.7/site-packages/tensorflow/python/feature_column/feature_column.py", line 2091, in _transform_feature
input_tensor = _to_sparse_input(inputs.get(self.key))
File "/usr/lib/python2.7/site-packages/tensorflow/python/feature_column/feature_column.py", line 1631, in _to_sparse_input
raise ValueError('Undefined input_tensor shape.')
ValueError: Undefined input_tensor shape.
Looking at the tf sources I get the impression I that the categorical_column_with_vocabulary_list expects a tensor as output instead of a string, but I have a hard time understanding how to make my input_fn provide that the right way.
Does anyone have any idea what I'm doing wrong here?
As a comparison, the following code works perfectly fine: https://pastebin.com/28QUNAjA
EDIT
I noticed that replacing tf.data.Dataset.from_generator with tf.data.Dataset.from_tensor_slices makes the code run.
I.e. the following actually works:
import tensorflow as tf
import random
tf.logging.set_verbosity(tf.logging.DEBUG)
def input_fn():
data = tf.data.Dataset.from_tensor_slices(({"in": map(lambda i: str(i), range(10))}, range(1,11)))
data = data.repeat(1000)
data = data.batch(10)
iterator = data.make_one_shot_iterator()
return iterator.get_next()
vocabulary_feature_column = tf.feature_column.categorical_column_with_vocabulary_list(
key="in",
vocabulary_list=map(lambda i: str(i), range(11)))
embedding_column = tf.feature_column.embedding_column(
categorical_column=vocabulary_feature_column,
dimension=2)
with tf.Session() as sess:
print(sess.run(input_fn()))
classifier = tf.estimator.DNNClassifier(
feature_columns = [embedding_column],
hidden_units = [5,5],
n_classes = 11,
model_dir = '/usr/local/google/home/zond/tmp/predict/snap')
classifier.train(
input_fn=input_fn)
This ought to be a bug, so I created https://github.com/tensorflow/tensorflow/issues/15178.

TensorFlow creating an Ai, error: You must feed a value for placeholder tensor 'input_1/X'

I am currently working on an AI for openai, I am trying to pass random data collected to make a model of a neural network, then use that model to create new data. When I try to make another model using the new trained data it wont let e create a new model and gives an
InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'input_1/X' with dtype float
[[Node: input_1/X = Placeholder[dtype=DT_FLOAT, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]].
my code:
import gym
import random
import numpy as np
import tensorflow as tf
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
from statistics import median, mean
from collections import Counter
import matplotlib.pyplot as plt
env = gym.make("CartPole-v1")
env.reset()#restarts the enviroment
epoch = 5
LR = 2e-4
max_score = 500
number_of_training_games = 100
generations = 3
training_scores = []
random_gen_score = []
def create_random_training_data():
x = 0
accepted_training_data = []
scores_and_data = []
array_of_scores = []
for i in range(number_of_training_games):
score = 0
prev_observation = []
training_data = []
for _ in range(max_score):
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
if len(prev_observation) > 0:
training_data.append([prev_observation, action])
prev_observation = observation
score += reward
if done:
array_of_scores.append(score)
break
for i in training_data:
scores_and_data.append([score,i[0],i[1]])
# reset enviroment
env.reset()
training_scores = array_of_scores
for data in scores_and_data:
if data[0] > median(array_of_scores):
if data[2] == 1:
output = [0,1]
elif data[2] == 0:
output = [1,0]
accepted_training_data.append([data[1], output])
return accepted_training_data
def training_model(sample_data):
inputs = np.array([i[0] for i in sample_data]).reshape(-1,4,1)
correct_output = [i[1] for i in sample_data]
model = neural_network(input_size = len(inputs[0]))
model.fit({'input': inputs}, {'targets': correct_output}, n_epoch=epoch , snapshot_step=500, show_metric=True, run_id='openai_learning')
print(input)
return model
def neural_network(input_size):
# this is where our observation data will go
network = input_data(shape=[None, input_size, 1], name = 'input')
# our neural networks
network = fully_connected(network, 128, activation = 'relu')
#dropout is used to drop randon nodes inorder to reduce over training
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation = 'relu')
network = dropout(network, 0.8)
network = fully_connected(network, 512, activation = 'relu')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation = 'relu')
network = dropout(network, 0.8)
network = fully_connected(network, 128, activation = 'relu')
network = dropout(network, 0.8)
# this is the output
network = fully_connected(network, 2, activation = 'softmax')
network = regression(network, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', name='targets')
model = tflearn.DNN(network, tensorboard_dir='log')
return model
def run_generation():
random_sample_data = []
trained_data = []
for i in range(generations):
if len(random_sample_data) ==0:
random_sample_data = create_random_training_data()
model1 = training_model(random_sample_data)
else:
trained_data = one_generation(model1)
model2 = training_model(trained_data)
return model2
def one_generation(model):
accepted_training_data = []
scores_and_data = []
array_of_scores = []
for i in range(number_of_training_games):
score = 0
prev_observation = []
training_data = []
for _ in range(max_score):
if len(prev_observation) == 0:
action = env.action_space.sample()
else:
action = np.argmax(model.predict(prev_observation.reshape(-1,len(prev_observation),1))[0])
observation, reward, done, info = env.step(action)
if len(prev_observation) > 0:
training_data.append([prev_observation, action])
prev_observation = observation
score += reward
if done:
array_of_scores.append(score)
break
for i in training_data:
scores_and_data.append([score,i[0],i[1]])
# reset enviroment
env.reset()
for data in scores_and_data:
if data[0] > median(array_of_scores):
if data[2] == 1:
output = [0,1]
elif data[2] == 0:
output = [1,0]
accepted_training_data.append([data[1], output])
return accepted_training_data
def testing():
scores = []
model = run_generation()
for _ in range(100):
score = 0
game_memory = []
prev_obs = []
env.reset()
for _ in range(max_score):
env.render()
#first move is going to be random
if len(prev_obs)==0:
action = random.randrange(0,2)
else:
action = np.argmax(model.predict(prev_obs.reshape(-1,len(prev_obs),1))[0])
#records actions
new_observation, reward, done, info = env.step(action)
prev_obs = new_observation
game_memory.append([new_observation, action])
score+=reward
if done: break
scores.append(score)
#print('Average training Score:',sum(training_scores)/len(training_scores))
print('Average Score:',sum(scores)/len(scores))
print (scores)
testing()
error:
Traceback (most recent call last):
File "/anaconda/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1039, in _do_call
return fn(*args)
File "/anaconda/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1021, in _run_fn
status, run_metadata)
File "/anaconda/lib/python3.6/contextlib.py", line 89, in __exit__
next(self.gen)
File "/anaconda/lib/python3.6/site-packages/tensorflow/python/framework/errors_impl.py", line 466, in raise_exception_on_not_ok_status
pywrap_tensorflow.TF_GetCode(status))
tensorflow.python.framework.errors_impl.InvalidArgumentError: You must feed a value for placeholder tensor 'input_1/X' with dtype float
[[Node: input_1/X = Placeholder[dtype=DT_FLOAT, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/Duncan/Desktop/ai projects/evolutionDNN.py", line 259, in <module>
testing()
File "/Users/Duncan/Desktop/ai projects/evolutionDNN.py", line 219, in testing
model = run_generation()
File "/Users/Duncan/Desktop/ai projects/evolutionDNN.py", line 148, in run_generation
model2 = training_model(trained_data)
File "/Users/Duncan/Desktop/ai projects/evolutionDNN.py", line 92, in training_model
model.fit({'input': inputs}, {'targets': correct_output}, n_epoch=epoch , snapshot_step=500, show_metric=True, run_id='openai_learning')
File "/anaconda/lib/python3.6/site-packages/tflearn/models/dnn.py", line 215, in fit
callbacks=callbacks)
File "/anaconda/lib/python3.6/site-packages/tflearn/helpers/trainer.py", line 336, in fit
show_metric)
File "/anaconda/lib/python3.6/site-packages/tflearn/helpers/trainer.py", line 777, in _train
feed_batch)
File "/anaconda/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 778, in run
run_metadata_ptr)
File "/anaconda/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 982, in _run
feed_dict_string, options, run_metadata)
File "/anaconda/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1032, in _do_run
target_list, options, run_metadata)
File "/anaconda/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1052, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: You must feed a value for placeholder tensor 'input_1/X' with dtype float
[[Node: input_1/X = Placeholder[dtype=DT_FLOAT, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
Caused by op 'input_1/X', defined at:
File "/Users/Duncan/Desktop/ai projects/evolutionDNN.py", line 259, in <module>
testing()
File "/Users/Duncan/Desktop/ai projects/evolutionDNN.py", line 219, in testing
model = run_generation()
File "/Users/Duncan/Desktop/ai projects/evolutionDNN.py", line 148, in run_generation
model2 = training_model(trained_data)
File "/Users/Duncan/Desktop/ai projects/evolutionDNN.py", line 90, in training_model
model = neural_network(input_size = len(inputs[0]))
File "/Users/Duncan/Desktop/ai projects/evolutionDNN.py", line 100, in neural_network
network = input_data(shape=[None, input_size, 1], name = 'input')
File "/anaconda/lib/python3.6/site-packages/tflearn/layers/core.py", line 81, in input_data
placeholder = tf.placeholder(shape=shape, dtype=dtype, name="X")
File "/anaconda/lib/python3.6/site-packages/tensorflow/python/ops/array_ops.py", line 1507, in placeholder
name=name)
File "/anaconda/lib/python3.6/site-packages/tensorflow/python/ops/gen_array_ops.py", line 1997, in _placeholder
name=name)
File "/anaconda/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py", line 768, in apply_op
op_def=op_def)
File "/anaconda/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 2336, in create_op
original_op=self._default_original_op, op_def=op_def)
File "/anaconda/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 1228, in __init__
self._traceback = _extract_stack()
InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'input_1/X' with dtype float
[[Node: input_1/X = Placeholder[dtype=DT_FLOAT, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
this line:
network = input_data(shape=[None, input_size, 1], name = 'input')
should be
network = input_data(shape=[None, input_size,input_size , 1], name = 'input')
there should be 4 arguments first is taken as place holder.
Try this.