tensor forest estimator value error at fitting the training part - tensorflow

Code :
from sklearn import cross_validation as cv
import numpy as np
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.tensor_forest.python import tensor_forest
X = np.array([[ 74., 166., 331., 161., 159., 181., 180.],
[ 437., 427., 371., 361., 393., 465., 464.],
[ 546., 564., 588., 595., 536., 537., 520.],
[ 89., 89., 87., 87., 108., 113., 111.],
[ 75., 90., 74., 89., 130., 140., 135.]])
Y = np.array([[ 51., 43., 29., 43., 43., 41., 42.],
[ 22., 23., 26., 27., 25., 19., 19.],
[ 7., 7., 5., 5., 9., 8., 10.],
[ 55., 54., 55., 53., 51., 51., 51.],
[ 58., 57., 57., 58., 55., 55., 55.]])
train_X, test_X, train_Y, test_Y = cv.train_test_split(X, Y,
test_size=0.50, random_state=42)
def build_estimator() :
params = tensor_forest.ForestHParams(num_classes=7, num_features=7,
num_trees=30, max_nodes=100)
graph_builder_class = tensor_forest.RandomForestGraphs
graph_builder_class = tensor_forest.TrainingLossForest
return estimator.SKCompat(random_forest.TensorForestEstimator(
params, graph_builder_class=graph_builder_class,
model_dir=None))
est = build_estimator()
train_X = train_X.astype(dtype = np.float32)
train_Y = train_Y.astype(dtype = np.float32)
est = est.fit(x=train_X, y=train_Y, batch_size = 100)
My both input and output shape is [Number_of_samples,7]. It perfectly runs with scikitlearn random forest classifier. However for tf learn, I get the following error when fitting the estimator :
INFO:tensorflow:Constructing forest with params =
INFO:tensorflow:{'valid_leaf_threshold': 1, 'split_after_samples':
250, 'num_output_columns': 8, 'feature_bagging_fraction': 1.0,
'split_initializations_per_input': 1, 'bagged_features': None,
'min_split_samples': 5, 'max_nodes': 100, 'num_features': 7,
'num_trees': 30, 'num_splits_to_consider': 7, 'base_random_seed': 0,
'num_outputs': 1, 'dominate_fraction': 0.99, 'max_fertile_nodes': 50,
'bagged_num_features': 7, 'dominate_method': 'bootstrap',
'bagging_fraction': 1.0, 'regression': False, 'num_classes': 7}
ValueErrorTraceback (most recent call last)
in ()
----> 1 est = est.fit(x=train_X, y=train_Y, batch_size = 100)
/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.pyc
in fit(self, x, y, batch_size, steps, max_steps, monitors) 1351
steps=steps, 1352 max_steps=max_steps,
-> 1353 monitors=all_monitors) 1354 return self 1355
/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/tensor_forest/client/random_forest.pyc
in fit(self, x, y, input_fn, steps, batch_size, monitors, max_steps)
262 elif input is not None:
263 self._estimator.fit(input_fn=input_fn, steps=steps, monitors=monitors,
--> 264 max_steps=max_steps)
265 else:
266 raise ValueError('fit: Must provide either both x and y or input_fn.')
/usr/local/lib/python2.7/dist-packages/tensorflow/python/util/deprecation.pyc
in new_func(*args, **kwargs)
278 _call_location(), decorator_utils.get_qualified_name(func),
279 func.module, arg_name, date, instructions)
--> 280 return func(*args, **kwargs)
281 new_func.doc = _add_deprecated_arg_notice_to_docstring(
282 func.doc, date, instructions)
/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.pyc
in fit(self, x, y, input_fn, steps, batch_size, monitors, max_steps)
424 hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))
425
--> 426 loss = self._train_model(input_fn=input_fn, hooks=hooks)
427 logging.info('Loss for final step: %s.', loss)
428 return self
/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.pyc
in _train_model(self, input_fn, hooks)
932 features, labels = input_fn()
933 self._check_inputs(features, labels)
--> 934 model_fn_ops = self._call_legacy_get_train_ops(features, labels)
935 ops.add_to_collection(ops.GraphKeys.LOSSES, model_fn_ops.loss)
936 all_hooks.extend([
/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.pyc
in _call_legacy_get_train_ops(self, features, labels) 1001 1002
def _call_legacy_get_train_ops(self, features, labels):
-> 1003 train_ops = self._get_train_ops(features, labels) 1004 if isinstance(train_ops, model_fn_lib.ModelFnOps): # Default
signature 1005 return train_ops
/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.pyc
in _get_train_ops(self, features, labels) 1160 ModelFnOps
object. 1161 """
-> 1162 return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN) 1163 1164 def
_get_eval_ops(self, features, labels, metrics):
/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.pyc
in _call_model_fn(self, features, labels, mode) 1131 if
'model_dir' in model_fn_args: 1132 kwargs['model_dir'] =
self.model_dir
-> 1133 model_fn_results = self._model_fn(features, labels, **kwargs) 1134 1135 if isinstance(model_fn_results, model_fn_lib.ModelFnOps):
/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/tensor_forest/client/random_forest.pyc
in _model_fn(features, labels)
128 if labels is not None:
129 training_loss = graph_builder.training_loss(
--> 130 features, labels, name=LOSS_NAME)
131 training_graph = control_flow_ops.group(
132 graph_builder.training_graph(
/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/tensor_forest/python/tensor_forest.pyc
in training_loss(self, features, labels, name)
559
560 def training_loss(self, features, labels, name='training_loss'):
--> 561 return array_ops.identity(self._get_loss(features, labels), name=name)
562
563
/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/tensor_forest/python/tensor_forest.pyc
in _get_loss(self, features, labels)
548 self._loss = control_flow_ops.cond(
549 self.average_size() > 0, _average_loss,
--> 550 lambda: constant_op.constant(sys.maxsize, dtype=dtypes.float32))
551
552 return self._loss
/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.pyc
in cond(pred, fn1, fn2, name) 1757 context_t =
CondContext(pred, pivot_1, branch=1) 1758 context_t.Enter()
-> 1759 orig_res, res_t = context_t.BuildCondBranch(fn1) 1760 context_t.ExitResult(res_t) 1761 context_t.Exit()
/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.pyc
in BuildCondBranch(self, fn) 1658 def BuildCondBranch(self, fn):
1659 """Add the subgraph defined by fn() to the graph."""
-> 1660 r = fn() 1661 original_r = r 1662 result = []
/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/tensor_forest/python/tensor_forest.pyc
in _average_loss()
544 probs = self.inference_graph(features)
545 return math_ops.reduce_sum(self.loss_fn(
--> 546 probs, labels)) / math_ops.to_float(array_ops.shape(labels)[0])
547
548 self._loss = control_flow_ops.cond(
/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/tensor_forest/python/tensor_forest.pyc
in _loss(probs, targets)
508 def _loss(probs, targets):
509 if targets.get_shape().ndims > 1:
--> 510 targets = array_ops.squeeze(targets, squeeze_dims=[1])
511 one_hot_labels = array_ops.one_hot(
512 math_ops.to_int32(targets),
/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/array_ops.pyc
in squeeze(input, axis, name, squeeze_dims) 2270 if
np.isscalar(axis): 2271 axis = [axis]
-> 2272 return gen_array_ops._squeeze(input, axis, name) 2273 2274
/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gen_array_ops.pyc
in _squeeze(input, squeeze_dims, name) 3496 """ 3497 result
= _op_def_lib.apply_op("Squeeze", input=input,
-> 3498 squeeze_dims=squeeze_dims, name=name) 3499 return result 3500
/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/op_def_library.pyc
in apply_op(self, op_type_name, name, **keywords)
761 op = g.create_op(op_type_name, inputs, output_types, name=scope,
762 input_types=input_types, attrs=attr_protos,
--> 763 op_def=op_def)
764 if output_structure:
765 outputs = op.outputs
/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.pyc
in create_op(self, op_type, inputs, dtypes, input_types, name, attrs,
op_def, compute_shapes, compute_device) 2395
original_op=self._default_original_op, op_def=op_def) 2396 if
compute_shapes:
-> 2397 set_shapes_for_outputs(ret) 2398 self._add_op(ret) 2399
self._record_op_seen_by_control_dependencies(ret)
/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.pyc
in set_shapes_for_outputs(op) 1755 shape_func =
_call_cpp_shape_fn_and_require_op 1756
-> 1757 shapes = shape_func(op) 1758 if shapes is None: 1759 raise RuntimeError(
/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.pyc
in call_with_requiring(op) 1705 1706 def
call_with_requiring(op):
-> 1707 return call_cpp_shape_fn(op, require_shape_fn=True) 1708 1709 _call_cpp_shape_fn_and_require_op =
call_with_requiring
/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/common_shapes.pyc
in call_cpp_shape_fn(op, input_tensors_needed,
input_tensors_as_shapes_needed, debug_python_shape_fn,
require_shape_fn)
608 res = _call_cpp_shape_fn_impl(op, input_tensors_needed,
609 input_tensors_as_shapes_needed,
--> 610 debug_python_shape_fn, require_shape_fn)
611 if not isinstance(res, dict):
612 # Handles the case where _call_cpp_shape_fn_impl calls unknown_shape(op).
/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/common_shapes.pyc
in _call_cpp_shape_fn_impl(op, input_tensors_needed,
input_tensors_as_shapes_needed, debug_python_shape_fn,
require_shape_fn)
673 missing_shape_fn = True
674 else:
--> 675 raise ValueError(err.message)
676
677 if missing_shape_fn:
ValueError: Can not squeeze dim[1], expected a dimension of 1, got 7
for 'cond/Squeeze' (op: 'Squeeze') with input shapes: [?,7].

Right now, when using TrainingLossForest it assumes a classification problem. I will fix this internally, but for now you can get around it with something like:
from tensorflow.contrib.losses.python.losses import loss_ops
def _loss_fn(values, targets):
return loss_ops.mean_squared_error(values, targets)
def _builder_class(params, **kwargs):
return tensor_forest.TrainingLossForest(
params, loss_fn=_loss_fn, **kwargs)
TensorForestEstimator(..., graph_builder_class=_builder_class)
Or whatever loss function you want (MSE here as example). Also, simply using (graph_builder_class = tensor_forest.RandomForestGraphs) should suffice, but the loss is the number of nodes, so training stops when the forest stops growing or reaches max_nodes, which is maybe not what you want.

Related

UnImplimented Error while training on TPU in Colab

When trying to train my model on TPU in Colab
model.fit(train_dataset,
steps_per_epoch = len(df_train) // config.BATCH_SIZE,
validation_data = valid_dataset,
epochs = config.EPOCHS)
I got this error with whole traceback:
UnimplementedError Traceback (most recent call last)
<ipython-input-37-92afbe2b5ae5> in <module>()
2 steps_per_epoch = len(df_train) // config.BATCH_SIZE,
3 validation_data = valid_dataset,
----> 4 epochs = config.EPOCHS)
13 frames
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
1186 logs = tmp_logs # No error, now safe to assign to logs.
1187 end_step = step + data_handler.step_increment
-> 1188 callbacks.on_train_batch_end(end_step, logs)
1189 if self.stop_training:
1190 break
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/callbacks.py in on_train_batch_end(self, batch, logs)
455 """
456 if self._should_call_train_batch_hooks:
--> 457 self._call_batch_hook(ModeKeys.TRAIN, 'end', batch, logs=logs)
458
459 def on_test_batch_begin(self, batch, logs=None):
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/callbacks.py in _call_batch_hook(self, mode, hook, batch, logs)
315 self._call_batch_begin_hook(mode, batch, logs)
316 elif hook == 'end':
--> 317 self._call_batch_end_hook(mode, batch, logs)
318 else:
319 raise ValueError('Unrecognized hook: {}'.format(hook))
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/callbacks.py in _call_batch_end_hook(self, mode, batch, logs)
335 self._batch_times.append(batch_time)
336
--> 337 self._call_batch_hook_helper(hook_name, batch, logs)
338
339 if len(self._batch_times) >= self._num_batches_for_timing_check:
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/callbacks.py in _call_batch_hook_helper(self, hook_name, batch, logs)
373 for callback in self.callbacks:
374 hook = getattr(callback, hook_name)
--> 375 hook(batch, logs)
376
377 if self._check_timing:
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/callbacks.py in on_train_batch_end(self, batch, logs)
1027
1028 def on_train_batch_end(self, batch, logs=None):
-> 1029 self._batch_update_progbar(batch, logs)
1030
1031 def on_test_batch_end(self, batch, logs=None):
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/callbacks.py in _batch_update_progbar(self, batch, logs)
1099 if self.verbose == 1:
1100 # Only block async when verbose = 1.
-> 1101 logs = tf_utils.sync_to_numpy_or_python_type(logs)
1102 self.progbar.update(self.seen, list(logs.items()), finalize=False)
1103
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/utils/tf_utils.py in sync_to_numpy_or_python_type(tensors)
517 return t # Don't turn ragged or sparse tensors to NumPy.
518
--> 519 return nest.map_structure(_to_single_numpy_or_python_type, tensors)
520
521
/usr/local/lib/python3.7/dist-packages/tensorflow/python/util/nest.py in map_structure(func, *structure, **kwargs)
865
866 return pack_sequence_as(
--> 867 structure[0], [func(*x) for x in entries],
868 expand_composites=expand_composites)
869
/usr/local/lib/python3.7/dist-packages/tensorflow/python/util/nest.py in <listcomp>(.0)
865
866 return pack_sequence_as(
--> 867 structure[0], [func(*x) for x in entries],
868 expand_composites=expand_composites)
869
/usr/local/lib/python3.7/dist-packages/tensorflow/python/keras/utils/tf_utils.py in _to_single_numpy_or_python_type(t)
513 def _to_single_numpy_or_python_type(t):
514 if isinstance(t, ops.Tensor):
--> 515 x = t.numpy()
516 return x.item() if np.ndim(x) == 0 else x
517 return t # Don't turn ragged or sparse tensors to NumPy.
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/ops.py in numpy(self)
1092 """
1093 # TODO(slebedev): Consider avoiding a copy for non-CPU or remote tensors.
-> 1094 maybe_arr = self._numpy() # pylint: disable=protected-access
1095 return maybe_arr.copy() if isinstance(maybe_arr, np.ndarray) else maybe_arr
1096
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/ops.py in _numpy(self)
1060 return self._numpy_internal()
1061 except core._NotOkStatusException as e: # pylint: disable=protected-access
-> 1062 six.raise_from(core._status_to_exception(e.code, e.message), None) # pylint: disable=protected-access
1063
1064 #property
/usr/local/lib/python3.7/dist-packages/six.py in raise_from(value, from_value)
UnimplementedError: 9 root error(s) found.
(0) Unimplemented: {{function_node __inference_train_function_88574}} Asked to propagate a dynamic dimension from hlo convolution.24975#{}#2 to hlo %all-reduce.24980 = f32[3,3,<=3,32]{3,2,1,0} all-reduce(f32[3,3,<=3,32]{3,2,1,0} %convolution.24975), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=%sum.24976, metadata={op_type="CrossReplicaSum" op_name="while/body/_1/while/Adam/CrossReplicaSum"}, which is not implemented.
[[{{node TPUReplicate/_compile/_18168620323984915962/_4}}]]
[[while/body/_1/while/strided_slice_1/_253]]
(1) Unimplemented: {{function_node __inference_train_function_88574}} Asked to propagate a dynamic dimension from hlo convolution.24975#{}#2 to hlo %all-reduce.24980 = f32[3,3,<=3,32]{3,2,1,0} all-reduce(f32[3,3,<=3,32]{3,2,1,0} %convolution.24975), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=%sum.24976, metadata={op_type="CrossReplicaSum" op_name="while/body/_1/while/Adam/CrossReplicaSum"}, which is not implemented.
[[{{node TPUReplicate/_compile/_18168620323984915962/_4}}]]
[[TPUReplicate/_compile/_18168620323984915962/_4/_243]]
(2) Unimplemented: {{function_node __inference_train_function_88574}} Asked to propagate a dynamic dimension from hlo convolution.24975#{}#2 to hlo %all-reduce.24980 = f32[3,3,<=3,32]{3,2,1,0} all-reduce(f32[3,3,<=3,32]{3,2,1,0} %convolution.24975), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=%sum.24976, metadata={op_type="CrossReplicaSum" op_name="while/body/_1/while/Adam/CrossReplicaSum"}, which is not implemented.[truncated]
Things That I have checked:
My data is in a GCS bucket and can be retrieved using the dataset object I created.
My model definition:
with strategy.scope():
base_model = efn.EfficientNetB0(include_top=False)
model = tf.keras.Sequential([
tf.keras.layers.Input(shape=(config.IMG_SIZE, config.IMG_SIZE, 3)),
base_model,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(5, activation='softmax')
])
model.compile(optimizer = tf.keras.optimizers.Adam(learning_rate=config.LR),
loss = tf.keras.losses.SparseCategoricalCrossentropy(),
metrics = [tf.keras.metrics.SparseCategoricalAccuracy()],
steps_per_execution = 32)
Any idea why this is happening. It says that a dynamic dimension was asked to propagate but I don't think this should be the case. Considering the model worked in GPU settings (with data present in the current session).

TF Dataset. Error = tensorflow:Error reported to Coordinator: No gradients provided for any variable: Tensorflow 2.2.0

I get the error INFO:tensorflow:Error reported to Coordinator: No gradients provided for any variable: when I run the code below.
You'll need the following to run the code
Tensorflow 2.2.0
efficientnet
keras_bert (https://pypi.org/project/keras-bert/)
numpy
pandas
You'll also need to download pre-trained BERT model weights from https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-12_H-768_A-12.zip
The code uses tensorflow dataset API to generate data on the fly.
import tensorflow
print('TensorFlow version =', tensorflow.__version__)
AUTO = tensorflow.data.experimental.AUTOTUNE
import efficientnet.tfkeras as efn
from efficientnet.tfkeras import preprocess_input
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D, Dropout, Input, Embedding, LSTM, Add
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.image import img_to_array as img_to_array
from tensorflow.keras.preprocessing.image import load_img as load_img
from tensorflow.keras.optimizers import SGD
import codecs
from keras_bert import load_trained_model_from_checkpoint
import ast
import pandas as pd
from keras_bert import Tokenizer
import numpy as np
import os
TF_KERAS = 1
pretrained_path = '../Data/BERT/uncased_L-12_H-768_A-12'
config_path = os.path.join(pretrained_path, 'bert_config.json')
checkpoint_path = os.path.join(pretrained_path, 'bert_model.ckpt')
vocab_path = os.path.join(pretrained_path, 'vocab.txt')
SEQ_LEN = 128
token_dict = {}
with codecs.open(vocab_path, 'r', 'utf8') as reader:
for line in reader:
token = line.strip()
token_dict[token] = len(token_dict)
tokenizer = Tokenizer(token_dict)
EPOCHS = 5
NUM_CLASSES = 10
def get_model(base_model, bert_model, NUM_CLASSES, emdedding_size=768):
# add a global spatial average pooling layer
x = base_model.output
x = Dropout(0.05)(x)
x = GlobalAveragePooling2D()(x)
x = Dense(1024, activation='relu')(x)
x = Dense(emdedding_size, activation='relu')(x)
# sequence model
dense = bert_model.get_layer('NSP-Dense').output
# decoder model
decoder1 = Add()([x, dense])
decoder2 = Dense(emdedding_size, activation='relu')(decoder1)
output = Dense(NUM_CLASSES, activation='softmax', name='output')(decoder2)
# tie it together
model = Model(inputs={'input_1': base_model.input, \
'Input-Token': bert_model.inputs[0],\
'Input-Segment': bert_model.inputs[1]}, \
outputs={'output': output})
return model
gpus = tensorflow.config.list_physical_devices('GPU'); print(gpus)
if len(gpus)==1: strategy = tensorflow.distribute.OneDeviceStrategy(device="/gpu:0")
else: strategy = tensorflow.distribute.MirroredStrategy()
max_length = 20
DIM = 224
with strategy.scope():
base_model = efn.EfficientNetB4(weights='imagenet', include_top=False, input_shape=(DIM,DIM,3)) # or weights='noisy-student'
bert_model = load_trained_model_from_checkpoint(
config_path,
checkpoint_path,
training=True,
trainable=True,
seq_len=SEQ_LEN,
)
model = get_model(base_model, bert_model, NUM_CLASSES)
model.compile(optimizer=SGD(lr=.00001, momentum = 0.9), loss ='categorical_crossentropy', metrics=['categorical_accuracy'])
def doaugmentation(img, rand_num=None):
if rand_num==None:
rand_num = random.randint(0, 2)
if rand_num == 0 :
return img
elif rand_num == 1: # brightness
return tensorflow.image.random_brightness( img, 0.4, seed=1 )
else:
return img
def get_dataset(csv_path, mode, batch_size, data_path, debug=False):
if debug: print ('[+] Inside the data function')
df = pd.read_csv(csv_path)
if debug: print ('[+] Read the csv file; shape=', df.shape)
image_paths = df.apply(lambda x: os.path.join(data_path, x['image_name']), axis=1).tolist()
if debug: print ('[+] Image paths recieved')
descriptions = df['text'].apply(lambda x: x.lower()).tolist()
if debug: print ('[+] Descriptions lower cased')
if mode != 'test': ## output
if debug: print ('[+] Mode= {}'.format(mode))
output = df['output'].apply(lambda x: ast.literal_eval(x)).tolist()
if debug: print ('[+] All Ids received')
dataset = tensorflow.data.Dataset.from_tensor_slices((image_paths, descriptions, output))
if debug: print ('[+] Tensor to slices done')
dataset = dataset.shuffle(len(df))
if debug: print ('[+] Dataset shuffled')
else:
dataset = tensorflow.data.Dataset.from_tensor_slices((image_paths, descriptions, [None]*len(image_paths)))
dataset = dataset.batch(batch_size)
if debug: print ('[+] Batch generated')
dataset = dataset.map(lambda img_path, description, output: tensorflow.py_function(process_data,\
[img_path, description, output],\
[tensorflow.float32, tensorflow.float32, tensorflow.float32, tensorflow.int32]), num_parallel_calls=AUTO)
if debug: print ('[+] Final Map done')
dataset = dataset.map(split, num_parallel_calls=AUTO)
if debug: print ('[+] Prefetching now...')
dataset = dataset.prefetch(AUTO)
return dataset
def split(image, description, description_like, output):
return {'input_2': image, 'Input-Token':description, 'Input-Segment': description_like, 'output':output}
def process_data(img_paths, descriptions, output):
global DIM
images = [process_image(img_path, DIM) for img_path in img_paths.numpy()]
desription, desription_like = [process_text(description)[0] for description in descriptions], [process_text(description)[1] for description in descriptions]
if output[0].numpy().any() == None:
return images, desription, desription_like
return images, desription, desription_like, output
def process_image(img_path, im_size):
image_string = tensorflow.io.read_file(img_path)
image = tensorflow.image.decode_jpeg(image_string, channels=3)
image = tensorflow.image.convert_image_dtype(image, tensorflow.float32)
image = tensorflow.image.resize(image, [im_size, im_size])
return image
def process_text(text):
global tokenizer, SEQ_LEN
desription = tokenizer.encode(tensorflow.compat.as_str_any(text.numpy()), max_len=SEQ_LEN)[0]
desription_like = np.zeros_like(desription)
return desription, desription_like
batch_size = 1
dataset_train = get_dataset(file_name, 'train', batch_size, dir_path, True)
dataset_val = get_dataset(file_name, 'val', batch_size, dir_path, True)
H = model.fit(x=dataset_train,
validation_data=dataset_val,
verbose=1,
epochs=1)
You'll also need this file to be in the same directory
text,image_name,output
Honeywell MN Series Portable Air Conditioner with Dehumidifier & Fan for Rooms Up To 450 Sq. Ft.,picture1.jpeg,"[1, 0, 0, 0, 0, 0, 0]"
"TCL 10,000 BTU White Window Air Conditioner with Wi-Fi",picture2.png,"[1, 0, 0, 0, 0, 0, 0]"
Honeywell MN Series Portable Air Conditioner with Dehumidifier & Fan for Rooms Up To 450 Sq. Ft.,picture1.jpeg,"[1, 0, 0, 0, 0, 0, 0]"
"TCL 10,000 BTU White Window Air Conditioner with Wi-Fi",picture2.png,"[1, 0, 0, 0, 0, 0, 0]"
Honeywell MN Series Portable Air Conditioner with Dehumidifier & Fan for Rooms Up To 450 Sq. Ft.,picture1.jpeg,"[1, 0, 0, 0, 0, 0, 0]"
"TCL 10,000 BTU White Window Air Conditioner with Wi-Fi",picture2.png,"[1, 0, 0, 0, 0, 0, 0]"
Honeywell MN Series Portable Air Conditioner with Dehumidifier & Fan for Rooms Up To 450 Sq. Ft.,picture1.jpeg,"[1, 0, 0, 0, 0, 0, 0]"
"TCL 10,000 BTU White Window Air Conditioner with Wi-Fi",picture2.png,"[1, 0, 0, 0, 0, 0, 0]"
and these pictures
I get the following error
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-31-f9c0025d2202> in <module>
2 validation_data=dataset_val,
3 verbose=1,
----> 4 epochs=1)
~\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\keras\engine\training.py in _method_wrapper(self, *args, **kwargs)
64 def _method_wrapper(self, *args, **kwargs):
65 if not self._in_multi_worker_mode(): # pylint: disable=protected-access
---> 66 return method(self, *args, **kwargs)
67
68 # Running inside `run_distribute_coordinator` already.
~\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
846 batch_size=batch_size):
847 callbacks.on_train_batch_begin(step)
--> 848 tmp_logs = train_function(iterator)
849 # Catch OutOfRangeError for Datasets of unknown size.
850 # This blocks until the batch has finished executing.
~\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\eager\def_function.py in __call__(self, *args, **kwds)
578 xla_context.Exit()
579 else:
--> 580 result = self._call(*args, **kwds)
581
582 if tracing_count == self._get_tracing_count():
~\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\eager\def_function.py in _call(self, *args, **kwds)
625 # This is the first call of __call__, so we have to initialize.
626 initializers = []
--> 627 self._initialize(args, kwds, add_initializers_to=initializers)
628 finally:
629 # At this point we know that the initialization is complete (or less
~\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\eager\def_function.py in _initialize(self, args, kwds, add_initializers_to)
504 self._concrete_stateful_fn = (
505 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
--> 506 *args, **kwds))
507
508 def invalid_creator_scope(*unused_args, **unused_kwds):
~\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\eager\function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2444 args, kwargs = None, None
2445 with self._lock:
-> 2446 graph_function, _, _ = self._maybe_define_function(args, kwargs)
2447 return graph_function
2448
~\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\eager\function.py in _maybe_define_function(self, args, kwargs)
2775
2776 self._function_cache.missed.add(call_context_key)
-> 2777 graph_function = self._create_graph_function(args, kwargs)
2778 self._function_cache.primary[cache_key] = graph_function
2779 return graph_function, args, kwargs
~\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\eager\function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
2665 arg_names=arg_names,
2666 override_flat_arg_shapes=override_flat_arg_shapes,
-> 2667 capture_by_value=self._capture_by_value),
2668 self._function_attributes,
2669 # Tell the ConcreteFunction to clean up its graph once it goes out of
~\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\framework\func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
979 _, original_func = tf_decorator.unwrap(python_func)
980
--> 981 func_outputs = python_func(*func_args, **func_kwargs)
982
983 # invariant: `func_outputs` contains only Tensors, CompositeTensors,
~\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\eager\def_function.py in wrapped_fn(*args, **kwds)
439 # __wrapped__ allows AutoGraph to swap in a converted function. We give
440 # the function a weak reference to itself to avoid a reference cycle.
--> 441 return weak_wrapped_fn().__wrapped__(*args, **kwds)
442 weak_wrapped_fn = weakref.ref(wrapped_fn)
443
~\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\framework\func_graph.py in wrapper(*args, **kwargs)
966 except Exception as e: # pylint:disable=broad-except
967 if hasattr(e, "ag_error_metadata"):
--> 968 raise e.ag_error_metadata.to_exception(e)
969 else:
970 raise
ValueError: in user code:
C:\Users\i24009\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\keras\engine\training.py:571 train_function *
outputs = self.distribute_strategy.run(
C:\Users\i24009\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\distribute\distribute_lib.py:951 run **
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
C:\Users\i24009\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\distribute\distribute_lib.py:2290 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
C:\Users\i24009\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\distribute\mirrored_strategy.py:770 _call_for_each_replica
fn, args, kwargs)
C:\Users\i24009\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\distribute\mirrored_strategy.py:201 _call_for_each_replica
coord.join(threads)
C:\Users\i24009\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\training\coordinator.py:389 join
six.reraise(*self._exc_info_to_raise)
C:\Users\i24009\Anaconda3\envs\py36TF2x1\lib\site-packages\six.py:703 reraise
raise value
C:\Users\i24009\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\training\coordinator.py:297 stop_on_exception
yield
C:\Users\i24009\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\distribute\mirrored_strategy.py:998 run
self.main_result = self.main_fn(*self.main_args, **self.main_kwargs)
C:\Users\i24009\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\keras\engine\training.py:541 train_step **
self.trainable_variables)
C:\Users\i24009\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\keras\engine\training.py:1804 _minimize
trainable_variables))
C:\Users\i24009\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\keras\optimizer_v2\optimizer_v2.py:521 _aggregate_gradients
filtered_grads_and_vars = _filter_grads(grads_and_vars)
C:\Users\i24009\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\keras\optimizer_v2\optimizer_v2.py:1219 _filter_grads
([v.name for _, v in grads_and_vars],))
Full error here https://pastebin.com/c25x2uxu
I'd like to seek community's guidance on the following:
What am I doing wrong in the above code?
Is there a better way to do this given that I am dealing with millions of training / validation images (I looked at data generator from TF dataset but haven't used it yet)
Any answers or suggestions would be highly appreciated.

How to set Keras layer to include `None` in return tuple

I am trying to make a Keras layer that returns None in its tuple.
class transformer_IO(tf.keras.layers.Layer):
def call(self, input):
return (input, None, None, None)
However, when I try to compile with this error, I get
AttributeError: 'NoneType' object has no attribute 'shape'
Here is an example
!pip install transformers
from transformers import TFBertModel
import tensorflow as tf
from copy import deepcopy
class transformer_IO(tf.keras.layers.Layer):
def call(self, input):
return (input, None, None, None)
def get_functional_model_protoFix():
bioRoberta_f = TFBertModel.from_pretrained('bert-base-uncased', from_pt=True)
Q_Tlayer0_f = deepcopy(bioRoberta_f.layers[0].encoder.layer[8])
Q_Tlayer0_f._name = Q_Tlayer0_f._name + 'Query_f'
Q_Tlayer1_f = deepcopy(bioRoberta_f.layers[0].encoder.layer[9])
Q_Tlayer1_f._name = Q_Tlayer1_f._name + 'Query_f'
transIO = transformer_IO()
inputIds = tf.keras.Input(shape=(None,), dtype=tf.int32, name='input_Q')
Q_outputs = bioRoberta_f(inputIds)[0]
Q_outputs = transIO(Q_outputs)
Q_outputs = Q_Tlayer0_f(Q_outputs)[0]
Q_outputs = transIO(Q_outputs)
Q_outputs = Q_Tlayer1_f(Q_outputs)[0]
modelNew = tf.keras.Model(inputs=inputIds, outputs=Q_outputs)
return modelNew
model_functional = get_functional_model_protoFix()
model_functional.compile(loss=loss_fn,
optimizer=tfa.optimizers.AdamW(weight_decay=1e-4, learning_rate=1e-5,
epsilon=1e-06))
Full error message
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-35-a029c18cecf9> in <module>()
----> 1 model_functional_new = get_functional_model_protoFix()
2 model_functional_new.compile(loss=loss_fn,
3 optimizer=tfa.optimizers.AdamW(weight_decay=1e-4, learning_rate=1e-5,
4 epsilon=1e-06))
7 frames
<ipython-input-34-693ee085f848> in get_functional_model_protoFix()
13
14 Q_outputs = bioRoberta_f(inputIds)[0]
---> 15 Q_outputs = transIO(Q_outputs)
16 Q_outputs = Q_Tlayer0_f(Q_outputs)[0]
17 Q_outputs = transIO(Q_outputs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer.py in __call__(self, *args, **kwargs)
952 kwargs.pop('mask')
953 inputs, outputs = self._set_connectivity_metadata_(
--> 954 inputs, outputs, args, kwargs)
955 self._handle_activity_regularization(inputs, outputs)
956 self._set_mask_metadata(inputs, outputs, input_masks)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer.py in _set_connectivity_metadata_(self, inputs, outputs, args, kwargs)
2312 # This updates the layer history of the output tensor(s).
2313 self._add_inbound_node(
-> 2314 input_tensors=inputs, output_tensors=outputs, arguments=arguments)
2315 return inputs, outputs
2316
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer.py in _add_inbound_node(self, input_tensors, output_tensors, arguments)
2342 input_tensors=input_tensors,
2343 output_tensors=output_tensors,
-> 2344 arguments=arguments)
2345
2346 # Update tensor history metadata.
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/node.py in __init__(self, outbound_layer, inbound_layers, node_indices, tensor_indices, input_tensors, output_tensors, arguments)
108 self.input_shapes = nest.map_structure(backend.int_shape, input_tensors)
109 # Nested structure of shape tuples, shapes of output_tensors.
--> 110 self.output_shapes = nest.map_structure(backend.int_shape, output_tensors)
111
112 # Optional keyword arguments to layer's `call`.
/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/nest.py in map_structure(func, *structure, **kwargs)
615
616 return pack_sequence_as(
--> 617 structure[0], [func(*x) for x in entries],
618 expand_composites=expand_composites)
619
/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/nest.py in <listcomp>(.0)
615
616 return pack_sequence_as(
--> 617 structure[0], [func(*x) for x in entries],
618 expand_composites=expand_composites)
619
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/backend.py in int_shape(x)
1201 """
1202 try:
-> 1203 shape = x.shape
1204 if not isinstance(shape, tuple):
1205 shape = tuple(shape.as_list())
AttributeError: 'NoneType' object has no attribute 'shape'

Using tf.data.Dataset with tf Hub Modules

How do I feed a tf.keras model, that includes a 1D input TF Hub module, with a tf.data.Dataset?
(Ultimately, the aim is to use a single tf.data.Dataset with a multi-input, multi-output keras funtional api model.)
Tried this:
import tensorflow as tf
import tensorflow_hub as hub
embed = "https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1"
hub_layer = hub.KerasLayer(embed, output_shape=[20], input_shape=[],
dtype=tf.string, trainable=True, name='hub_layer')
# From tf hub webpage: "The module takes a batch of sentences in a 1-D tensor of strings as input."
input_tensor = tf.keras.Input(shape=(), dtype=tf.string)
hub_tensor = hub_layer(input_tensor)
x = tf.keras.layers.Dense(16, activation='relu')(hub_tensor)#(x)
main_output = tf.keras.layers.Dense(units=4, activation='softmax', name='main_output')(x)
model = tf.keras.models.Model(inputs=[input_tensor], outputs=[main_output])
# This works as expected.
X_tensor = tf.constant(['Hello World', 'The Quick Brown Fox'])
model(X_tensor)
# This fails
X_ds = tf.data.Dataset.from_tensors(X_tensor)
X_ds.element_spec
model(X_ds)
Expectation was that the 1D tensor in the dataset would be automatically extracted and consumed by the model.
Error message:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
in
21 X_ds = tf.data.Dataset.from_tensors(X_tensor)
22 X_ds.element_spec
---> 23 model(X_ds)
24
25
~/projects/email_analysis/email_venv/lib/python3.6/site-packages/tensorflow/python/keras/engine/base_layer.py in __call__(self, *args, **kwargs)
966 with base_layer_utils.autocast_context_manager(
967 self._compute_dtype):
--> 968 outputs = self.call(cast_inputs, *args, **kwargs)
969 self._handle_activity_regularization(inputs, outputs)
970 self._set_mask_metadata(inputs, outputs, input_masks)
~/projects/email_analysis/email_venv/lib/python3.6/site-packages/tensorflow/python/keras/engine/network.py in call(self, inputs, training, mask)
717 return self._run_internal_graph(
718 inputs, training=training, mask=mask,
--> 719 convert_kwargs_to_constants=base_layer_utils.call_context().saving)
720
721 def compute_output_shape(self, input_shape):
~/projects/email_analysis/email_venv/lib/python3.6/site-packages/tensorflow/python/keras/engine/network.py in _run_internal_graph(self, inputs, training, mask, convert_kwargs_to_constants)
835 tensor_dict = {}
836 for x, y in zip(self.inputs, inputs):
--> 837 y = self._conform_to_reference_input(y, ref_input=x)
838 x_id = str(id(x))
839 tensor_dict[x_id] = [y] * self._tensor_usage_count[x_id]
~/projects/email_analysis/email_venv/lib/python3.6/site-packages/tensorflow/python/keras/engine/network.py in _conform_to_reference_input(self, tensor, ref_input)
959 # Dtype handling.
960 if isinstance(ref_input, (ops.Tensor, composite_tensor.CompositeTensor)):
--> 961 tensor = math_ops.cast(tensor, dtype=ref_input.dtype)
962
963 return tensor
~/projects/email_analysis/email_venv/lib/python3.6/site-packages/tensorflow/python/util/dispatch.py in wrapper(*args, **kwargs)
178 """Call target, and fall back on dispatchers if there is a TypeError."""
179 try:
--> 180 return target(*args, **kwargs)
181 except (TypeError, ValueError):
182 # Note: convert_to_eager_tensor currently raises a ValueError, not a
~/projects/email_analysis/email_venv/lib/python3.6/site-packages/tensorflow/python/ops/math_ops.py in cast(x, dtype, name)
785 # allows some conversions that cast() can't do, e.g. casting numbers to
786 # strings.
--> 787 x = ops.convert_to_tensor(x, name="x")
788 if x.dtype.base_dtype != base_type:
789 x = gen_math_ops.cast(x, base_type, name=name)
~/projects/email_analysis/email_venv/lib/python3.6/site-packages/tensorflow/python/framework/ops.py in convert_to_tensor(value, dtype, name, as_ref, preferred_dtype, dtype_hint, ctx, accepted_result_types)
1339
1340 if ret is None:
-> 1341 ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
1342
1343 if ret is NotImplemented:
~/projects/email_analysis/email_venv/lib/python3.6/site-packages/tensorflow/python/framework/constant_op.py in _constant_tensor_conversion_function(v, dtype, name, as_ref)
319 as_ref=False):
320 _ = as_ref
--> 321 return constant(v, dtype=dtype, name=name)
322
323
~/projects/email_analysis/email_venv/lib/python3.6/site-packages/tensorflow/python/framework/constant_op.py in constant(value, dtype, shape, name)
260 """
261 return _constant_impl(value, dtype, shape, name, verify_shape=False,
--> 262 allow_broadcast=True)
263
264
~/projects/email_analysis/email_venv/lib/python3.6/site-packages/tensorflow/python/framework/constant_op.py in _constant_impl(value, dtype, shape, name, verify_shape, allow_broadcast)
268 ctx = context.context()
269 if ctx.executing_eagerly():
--> 270 t = convert_to_eager_tensor(value, ctx, dtype)
271 if shape is None:
272 return t
~/projects/email_analysis/email_venv/lib/python3.6/site-packages/tensorflow/python/framework/constant_op.py in convert_to_eager_tensor(value, ctx, dtype)
94 dtype = dtypes.as_dtype(dtype).as_datatype_enum
95 ctx.ensure_initialized()
---> 96 return ops.EagerTensor(value, ctx.device_name, dtype)
97
98
ValueError: Attempt to convert a value () with an unsupported type () to a Tensor.
The point of a dataset is to provide a sequence of tensors, like here:
all_data = tf.constant([['Hello', 'World'], ['Brown Fox', 'lazy dog']])
ds = tf.data.Dataset.from_tensor_slices(all_data)
for tensor in ds:
print(tensor)
which outputs
tf.Tensor([b'Hello' b'World'], shape=(2,), dtype=string)
tf.Tensor([b'Brown Fox' b'lazy dog'], shape=(2,), dtype=string)
Instead of just printing tensor, you can compute with it:
for tensor in ds:
print(hub_layer(tensor))
which outputs 2 tensors of shape (2,20) each.
For more, see https://www.tensorflow.org/guide/data.

TensorFlow beginner use estimator for prediction after running experiment

I am following this guide by Google (https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/courses/machine_learning/tensorflow/d_experiment.ipynb) to build a simple linear regression model.
In the notebook it has used the Experiment class and learn_runner (a class that I cannot find any documentation) to train up the model. I am now trying to use the model for prediction. I tried the following but i got an error. Would you please let me know the correct way to do it? Thanks.
Code added to the bottom:
# load the saved model
estimator = tflearn.LinearRegressor(feature_columns=feature_cols, model_dir='taxi_trained')
estimator.predict(input_fn=get_test)
Error got:
INFO:tensorflow:Using default config.
INFO:tensorflow:Using config: {'_is_chief': True, '_model_dir': None, '_save_checkpoints_secs': 600, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x00000218611630F0>, '_master': '', '_task_id': 0, '_keep_checkpoint_every_n_hours': 10000, '_evaluation_master': '', '_environment': 'local', '_num_worker_replicas': 0, '_tf_random_seed': None, '_tf_config': gpu_options {
per_process_gpu_memory_fraction: 1
}
, '_save_checkpoints_steps': None, '_keep_checkpoint_max': 5, '_task_type': None, '_num_ps_replicas': 0, '_save_summary_steps': 100}
WARNING:tensorflow:From c:\users\tommy\appdata\local\programs\python\python35\lib\site-packages\tensorflow\python\util\deprecation.py:335: calling LinearRegressor.predict (from tensorflow.contrib.learn.python.learn.estimators.linear) with outputs=None is deprecated and will be removed after 2017-03-01.
Instructions for updating:
Please switch to predict_scores, or set `outputs` argument.
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-5-7f1903437174> in <module>()
1 with tf.Session() as sess:
2 estimator = tflearn.LinearRegressor(feature_columns=feature_cols, model_dir='taxi_trained')
----> 3 estimator.predict(input_fn=get_test)
c:\users\tommy\appdata\local\programs\python\python35\lib\site-packages\tensorflow\python\util\deprecation.py in new_func(*args, **kwargs)
333 _call_location(), decorator_utils.get_qualified_name(func),
334 func.__module__, arg_name, arg_value, date, instructions)
--> 335 return func(*args, **kwargs)
336 new_func.__doc__ = _add_deprecated_arg_notice_to_docstring(
337 func.__doc__, date, instructions)
c:\users\tommy\appdata\local\programs\python\python35\lib\site-packages\tensorflow\python\util\deprecation.py in new_func(*args, **kwargs)
333 _call_location(), decorator_utils.get_qualified_name(func),
334 func.__module__, arg_name, arg_value, date, instructions)
--> 335 return func(*args, **kwargs)
336 new_func.__doc__ = _add_deprecated_arg_notice_to_docstring(
337 func.__doc__, date, instructions)
c:\users\tommy\appdata\local\programs\python\python35\lib\site-packages\tensorflow\contrib\learn\python\learn\estimators\linear.py in predict(self, x, input_fn, batch_size, outputs, as_iterable)
755 input_fn=input_fn,
756 batch_size=batch_size,
--> 757 as_iterable=as_iterable)
758 return super(LinearRegressor, self).predict(
759 x=x,
c:\users\tommy\appdata\local\programs\python\python35\lib\site-packages\tensorflow\python\util\deprecation.py in new_func(*args, **kwargs)
333 _call_location(), decorator_utils.get_qualified_name(func),
334 func.__module__, arg_name, arg_value, date, instructions)
--> 335 return func(*args, **kwargs)
336 new_func.__doc__ = _add_deprecated_arg_notice_to_docstring(
337 func.__doc__, date, instructions)
c:\users\tommy\appdata\local\programs\python\python35\lib\site-packages\tensorflow\contrib\learn\python\learn\estimators\linear.py in predict_scores(self, x, input_fn, batch_size, as_iterable)
790 batch_size=batch_size,
791 outputs=[key],
--> 792 as_iterable=as_iterable)
793 if as_iterable:
794 return _as_iterable(preds, output=key)
c:\users\tommy\appdata\local\programs\python\python35\lib\site-packages\tensorflow\python\util\deprecation.py in new_func(*args, **kwargs)
279 _call_location(), decorator_utils.get_qualified_name(func),
280 func.__module__, arg_name, date, instructions)
--> 281 return func(*args, **kwargs)
282 new_func.__doc__ = _add_deprecated_arg_notice_to_docstring(
283 func.__doc__, date, instructions)
c:\users\tommy\appdata\local\programs\python\python35\lib\site-packages\tensorflow\contrib\learn\python\learn\estimators\estimator.py in predict(self, x, input_fn, batch_size, outputs, as_iterable)
563 feed_fn=feed_fn,
564 outputs=outputs,
--> 565 as_iterable=as_iterable)
566
567 def get_variable_value(self, name):
c:\users\tommy\appdata\local\programs\python\python35\lib\site-packages\tensorflow\contrib\learn\python\learn\estimators\estimator.py in _infer_model(self, input_fn, feed_fn, outputs, as_iterable, iterate_batches)
855 contrib_framework.create_global_step(g)
856 features = self._get_features_from_input_fn(input_fn)
--> 857 infer_ops = self._get_predict_ops(features)
858 predictions = self._filter_predictions(infer_ops.predictions, outputs)
859 mon_sess = monitored_session.MonitoredSession(
c:\users\tommy\appdata\local\programs\python\python35\lib\site-packages\tensorflow\contrib\learn\python\learn\estimators\estimator.py in _get_predict_ops(self, features)
1186 labels = tensor_signature.create_placeholders_from_signatures(
1187 self._labels_info)
-> 1188 return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.INFER)
1189
1190 def export_savedmodel(
c:\users\tommy\appdata\local\programs\python\python35\lib\site-packages\tensorflow\contrib\learn\python\learn\estimators\estimator.py in _call_model_fn(self, features, labels, mode)
1101 if 'model_dir' in model_fn_args:
1102 kwargs['model_dir'] = self.model_dir
-> 1103 model_fn_results = self._model_fn(features, labels, **kwargs)
1104
1105 if isinstance(model_fn_results, model_fn_lib.ModelFnOps):
c:\users\tommy\appdata\local\programs\python\python35\lib\site-packages\tensorflow\contrib\learn\python\learn\estimators\linear.py in _linear_model_fn(features, labels, mode, params, config)
159 num_outputs=head.logits_dimension,
160 weight_collections=[parent_scope],
--> 161 scope=scope)
162
163 def _train_op_fn(loss):
c:\users\tommy\appdata\local\programs\python\python35\lib\site-packages\tensorflow\contrib\layers\python\layers\feature_column_ops.py in weighted_sum_from_feature_columns(columns_to_tensors, feature_columns, num_outputs, weight_collections, trainable, scope)
529 # pylint: disable=protected-access
530 for column in sorted(set(feature_columns), key=lambda x: x.key):
--> 531 transformed_tensor = transformer.transform(column)
532 try:
533 embedding_lookup_arguments = column._wide_embedding_lookup_arguments(
c:\users\tommy\appdata\local\programs\python\python35\lib\site-packages\tensorflow\contrib\layers\python\layers\feature_column_ops.py in transform(self, feature_column)
880 return self._columns_to_tensors[feature_column]
881
--> 882 feature_column.insert_transformed_feature(self._columns_to_tensors)
883
884 if feature_column not in self._columns_to_tensors:
c:\users\tommy\appdata\local\programs\python\python35\lib\site-packages\tensorflow\contrib\layers\python\layers\feature_column.py in insert_transformed_feature(self, columns_to_tensors)
1406 """
1407 # Transform the input tensor according to the normalizer function.
-> 1408 input_tensor = self._normalized_input_tensor(columns_to_tensors[self.name])
1409 columns_to_tensors[self] = math_ops.to_float(input_tensor)
1410
KeyError: 'dropofflat'
I am using TensorFlow 1.1 with Python 3.5 on Windows 10. GPU enabled.