TypeError when trying to use EarlyStopping with f1-metric as stopping criterion - tensorflow

I want for training a CNN with Early Stopping and want to use the f1-metric as stopping criterion.
When I compile the code for the CNN model I get the a TypeError as error message.
I'm still using Tensorflow 1.4 would like to avoid an upgrade to 2.0, because I have in mind that my previous code doesn't work anymore.
The error message is as follows:
TypeError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/sklearn/utils/validation.py in _num_samples(x)
158 try:
--> 159 return len(x)
160 except TypeError:
14 frames
/usr/local/lib/python3.6/dist-
packages/tensorflow_core/python/framework/ops.py in __len__(self)
740 "Please call `x.shape` rather than `len(x)` for "
--> 741 "shape information.".format(self.name))
742
TypeError: len is not well defined for symbolic Tensors. (dense_16_target:0) Please call `x.shape` rather than `len(x)` for shape information.
During handling of the above exception, another exception occurred:
TypeError Traceback (most recent call last)
<ipython-input-44-cd3da16e057c> in <module>()
----> 1 model = model_cnn(False,False, False,True,6, 0.2, 0.5)
2 X_train, X_val, y_train, y_val = split_data(X_train, y_train,1)
3 cnn, ep = train_model_es(model, X_train, y_train, X_val, y_val, X_test, y_test, 50, 500,1)
<ipython-input-42-d275d9c69c03> in model_cnn(spat, extra_pool, avg_pool, cw, numb_conv, drop_conv, drop_dense)
36 if cw == True:
37 print("sparse categorical crossentropy")
---> 38 model.compile(loss="sparse_categorical_crossentropy", optimizer=Adam(), metrics=['accuracy', f1_metric])
39 #model.compile(loss="sparse_categorical_crossentropy", optimizer=Adam(), metrics=['accuracy'])
40 print("nothing")
/usr/local/lib/python3.6/dist-packages/keras/engine/training.py in compile(self, optimizer, loss, metrics, loss_weights, sample_weight_mode, weighted_metrics, target_tensors, **kwargs)
452 output_metrics = nested_metrics[i]
453 output_weighted_metrics = nested_weighted_metrics[i]
--> 454 handle_metrics(output_metrics)
455 handle_metrics(output_weighted_metrics, weights=weights)
456
/usr/local/lib/python3.6/dist-packages/keras/engine/training.py in handle_metrics(metrics, weights)
421 metric_result = weighted_metric_fn(y_true, y_pred,
422 weights=weights,
--> 423 mask=masks[i])
424
425 # Append to self.metrics_names, self.metric_tensors,
/usr/local/lib/python3.6/dist-packages/keras/engine/training_utils.py in weighted(y_true, y_pred, weights, mask)
426 """
427 # score_array has ndim >= 2
--> 428 score_array = fn(y_true, y_pred)
429 if mask is not None:
430 # Cast the mask to floatX to avoid float64 upcasting in Theano
<ipython-input-9-b21dc3bd89a6> in f1_metric(y_test, y_pred)
1 def f1_metric(y_test, y_pred):
----> 2 f1 = f1_score(y_test, y_pred, average='macro')
3 return f1
/usr/local/lib/python3.6/dist-packages/sklearn/metrics/_classification.py in f1_score(y_true, y_pred, labels, pos_label, average, sample_weight, zero_division)
1097 pos_label=pos_label, average=average,
1098 sample_weight=sample_weight,
-> 1099 zero_division=zero_division)
1100
1101
/usr/local/lib/python3.6/dist-packages/sklearn/metrics/_classification.py in fbeta_score(y_true, y_pred, beta, labels, pos_label, average, sample_weight, zero_division)
1224 warn_for=('f-score',),
1225 sample_weight=sample_weight,
-> 1226 zero_division=zero_division)
1227 return f
1228
/usr/local/lib/python3.6/dist-packages/sklearn/metrics/_classification.py in precision_recall_fscore_support(y_true, y_pred, beta, labels, pos_label, average, warn_for, sample_weight, zero_division)
1482 raise ValueError("beta should be >=0 in the F-beta score")
1483 labels = _check_set_wise_labels(y_true, y_pred, average, labels,
-> 1484 pos_label)
1485
1486 # Calculate tp_sum, pred_sum, true_sum ###
/usr/local/lib/python3.6/dist-packages/sklearn/metrics/_classification.py in _check_set_wise_labels(y_true, y_pred, average, labels, pos_label)
1299 str(average_options))
1300
-> 1301 y_type, y_true, y_pred = _check_targets(y_true, y_pred)
1302 present_labels = unique_labels(y_true, y_pred)
1303 if average == 'binary':
/usr/local/lib/python3.6/dist-packages/sklearn/metrics/_classification.py in _check_targets(y_true, y_pred)
78 y_pred : array or indicator matrix
79 """
---> 80 check_consistent_length(y_true, y_pred)
81 type_true = type_of_target(y_true)
82 type_pred = type_of_target(y_pred)
/usr/local/lib/python3.6/dist-packages/sklearn/utils/validation.py in check_consistent_length(*arrays)
206 """
207
--> 208 lengths = [_num_samples(X) for X in arrays if X is not None]
209 uniques = np.unique(lengths)
210 if len(uniques) > 1:
/usr/local/lib/python3.6/dist-packages/sklearn/utils/validation.py in <listcomp>(.0)
206 """
207
--> 208 lengths = [_num_samples(X) for X in arrays if X is not None]
209 uniques = np.unique(lengths)
210 if len(uniques) > 1:
/usr/local/lib/python3.6/dist-packages/sklearn/utils/validation.py in _num_samples(x)
159 return len(x)
160 except TypeError:
--> 161 raise TypeError(message)
162
163
TypeError: Expected sequence or array-like, got <class 'tensorflow.python.framework.ops.Tensor'>
And here is the relevant code:
def f1_metric(y_test, y_pred):
f1 = f1_score(y_test, y_pred, average='macro')
return f1
def train_model_es(model, X, y, X_val, y_val, X_test, y_test):
es = EarlyStopping(monitor='f1_metric', mode='max', patience=20, restore_best_weights=True)
y = np.argmax(y, axis=1)
y_val = np.argmax(y_val, axis=1)
y_test = np.argmax(y_test, axis=1)
class_weights = class_weight.compute_class_weight('balanced', np.unique(y), y)
class_weights = dict(enumerate(class_weights))
history = model.fit(X, y, class_weight=class_weights, batch_size=32,epochs=20, verbose=1,
validation_data=(X_val, y_val), callbacks=[es])
def model_cnn():
model = Sequential()
model.add(Conv2D(32, kernel_size=(3,3), input_shape=(28,28,1),padding='same'))
model.add(BatchNormalization())
model.add(ELU())
model.add(Conv2D(32, kernel_size=(3,3), padding='same'))
model.add(BatchNormalization())
model.add(ELU())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(256))
model.add(BatchNormalization())
model.add(ELU())
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
model.compile(loss="sparse_categorical_crossentroy",optimizer=Adam(),metrics=["accuracy",f1_metric])
return model
Does anyone have a tip on how to fix this error message.
Many thanks for every hint

As the error message suggests,
Error 1) You are doing a len() operation on symbolic tensor. You cannot do that operation on symbolic tensor. You can find difference between a variable tensor and symbolic tensor here.
Error 2) You are using a tensor for the operation which expects array as input. Can you please convert y_true and y_pred from tensor to array and use in the f1_score and other operations.
Example - To convert tensor to array
%tensorflow_version 1.x
print(tf.__version__)
import tensorflow as tf
import numpy as np
x = tf.constant([1,2,3,4,5,6])
print("Type of x:",x)
with tf.Session() as sess:
y = np.array(x.eval())
print("Type of y:",y.shape,y)
Output -
1.15.2
Type of x: Tensor("Const_24:0", shape=(6,), dtype=int32)
Type of y: (6,) [1 2 3 4 5 6]

Related

Tensorflow mixed_precision error `x` and `y` must have the same dtype, got tf.float16 != tf.float32

mixed_precision.set_global_policy(policy="mixed_float16") gives an error when I add this line
error =
TypeError Traceback (most recent call
last) in
5 #mixed_precision.set_global_policy(policy="float32")
6 input_shape = (224, 224, 3)
----> 7 base_model = tf.keras.applications.EfficientNetB0(include_top=False)
8 base_model.trainable = False # freeze base model layers
9
4 frames
/usr/local/lib/python3.7/dist-packages/keras/applications/efficientnet.py
in EfficientNetB0(include_top, weights, input_tensor, input_shape,
pooling, classes, classifier_activation, **kwargs)
559 classes=classes,
560 classifier_activation=classifier_activation,
--> 561 **kwargs)
562
563
/usr/local/lib/python3.7/dist-packages/keras/applications/efficientnet.py
in EfficientNet(width_coefficient, depth_coefficient, default_size,
dropout_rate, drop_connect_rate, depth_divisor, activation,
blocks_args, model_name, include_top, weights, input_tensor,
input_shape, pooling, classes, classifier_activation)
332 # original implementation.
333 # See https://github.com/tensorflow/tensorflow/issues/49930 for more details
--> 334 x = x / tf.math.sqrt(IMAGENET_STDDEV_RGB)
335
336 x = layers.ZeroPadding2D(
/usr/local/lib/python3.7/dist-packages/tensorflow/python/util/traceback_utils.py
in error_handler(*args, **kwargs)
151 except Exception as e:
152 filtered_tb = _process_traceback_frames(e.traceback)
--> 153 raise e.with_traceback(filtered_tb) from None
154 finally:
155 del filtered_tb
/usr/local/lib/python3.7/dist-packages/keras/layers/core/tf_op_layer.py
in handle(self, op, args, kwargs)
105 isinstance(x, keras_tensor.KerasTensor)
106 for x in tf.nest.flatten([args, kwargs])):
--> 107 return TFOpLambda(op)(*args, **kwargs)
108 else:
109 return self.NOT_SUPPORTED
/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py
in error_handler(*args, **kwargs)
65 except Exception as e: # pylint: disable=broad-except
66 filtered_tb = _process_traceback_frames(e.traceback)
---> 67 raise e.with_traceback(filtered_tb) from None
68 finally:
69 del filtered_tb
TypeError: Exception encountered when calling layer
"tf.math.truediv_3" (type TFOpLambda).
x and y must have the same dtype, got tf.float16 != tf.float32.
Call arguments received by layer "tf.math.truediv_3" (type
TFOpLambda): • x=tf.Tensor(shape=(None, None, None, 3),
dtype=float16) • y=tf.Tensor(shape=(3,), dtype=float32) •
name=None
this is code =
from tensorflow.keras import layers
# Create base model
mixed_precision.set_global_policy(policy="mixed_float16")
input_shape = (224, 224, 3)
base_model = tf.keras.applications.EfficientNetB0(include_top=False)
base_model.trainable = False # freeze base model layers
# Create Functional model
inputs = layers.Input(shape=input_shape, name="input_layer")
# Note: EfficientNetBX models have rescaling built-in but if your model didn't you could have a layer like below
# x = layers.Rescaling(1./255)(x)
x = base_model(inputs, training=False) # set base_model to inference mode only
x = layers.GlobalAveragePooling2D(name="pooling_layer")(x)
x = layers.Dense(len(class_names))(x) # want one output neuron per class
# Separate activation of output layer so we can output float32 activations
outputs = layers.Activation("softmax", dtype=tf.float32, name="softmax_float32")(x)
model = tf.keras.Model(inputs, outputs)
# Compile the model
model.compile(loss="sparse_categorical_crossentropy", # Use sparse_categorical_crossentropy when labels are *not* one-hot
optimizer=tf.keras.optimizers.Adam(),
metrics=["accuracy"])
When I change this line with float32 instead of mixed_float16,like
this mixed_precision.set_global_policy(policy="float32") the
error goes away. I want to use Mixed_precision, how can I do it?

Tensorflow - SGD with momentum optimizer update fails for variable with dynamic shape

I am trying to create a variable with a dynamic shape and updating it using SGD. Without momentum, the following code works :-
import tensorflow as tf
x = tf.Variable(tf.random.normal((32,3)), shape=[None,3])
with tf.GradientTape() as tape:
x.assign(tf.random.normal((20,3)))
y = tf.reduce_sum(x)
grads = tape.gradient(y, x)
opt = tf.keras.optimizers.SGD(0.01)
opt.apply_gradients([[grads, x]])
But, the replacing the line opt = tf.keras.optimizers.SGD(0.01) with opt = tf.keras.optimizers.SGD(0.01, momentum=0.9) throws an error -
<ipython-input-6-66726ccd04f3> in <module>()
9 grads = tape.gradient(y, x)
10 opt = tf.keras.optimizers.SGD(0.01, momentum=0.9)
---> 11 opt.apply_gradients([[grads, x]])
5 frames
/usr/local/lib/python3.7/dist-packages/keras/optimizer_v2/optimizer_v2.py in apply_gradients(self, grads_and_vars, name, experimental_aggregate_gradients)
637 # Create iteration if necessary.
638 with tf.init_scope():
--> 639 self._create_all_weights(var_list)
640
641 if not grads_and_vars:
/usr/local/lib/python3.7/dist-packages/keras/optimizer_v2/optimizer_v2.py in _create_all_weights(self, var_list)
823 _ = self.iterations
824 self._create_hypers()
--> 825 self._create_slots(var_list)
826
827 def __getattribute__(self, name):
/usr/local/lib/python3.7/dist-packages/keras/optimizer_v2/gradient_descent.py in _create_slots(self, var_list)
117 if self._momentum:
118 for var in var_list:
--> 119 self.add_slot(var, "momentum")
120
121 def _prepare_local(self, var_device, var_dtype, apply_state):
/usr/local/lib/python3.7/dist-packages/keras/optimizer_v2/optimizer_v2.py in add_slot(self, var, slot_name, initializer, shape)
913 dtype=var.dtype,
914 trainable=False,
--> 915 initial_value=initial_value)
916 backend.track_variable(weight)
917 slot_dict[slot_name] = weight
/usr/local/lib/python3.7/dist-packages/tensorflow/python/util/traceback_utils.py in error_handler(*args, **kwargs)
151 except Exception as e:
152 filtered_tb = _process_traceback_frames(e.__traceback__)
--> 153 raise e.with_traceback(filtered_tb) from None
154 finally:
155 del filtered_tb
/usr/local/lib/python3.7/dist-packages/keras/initializers/initializers_v2.py in __call__(self, shape, dtype, **kwargs)
143 if _PARTITION_SHAPE in kwargs:
144 shape = kwargs[_PARTITION_SHAPE]
--> 145 return tf.zeros(shape, dtype)
146
147
ValueError: Cannot convert a partially known TensorShape (None, 3) to a Tensor.
How can I resolve this?
Instead of taking shape as (20,3) in tf.GradientTape you can consider shape (20,3) while initializing the variable.
import tensorflow as tf
x = tf.Variable(tf.random.normal((20,3)))
with tf.GradientTape() as tape:
#x.assign(tf.random.normal((20,3)))
y = tf.reduce_sum(x)
grads = tape.gradient(y, x)
opt = tf.keras.optimizers.SGD(0.01)
opt.apply_gradients([[grads, x]])
The output of the above code is: <tf.Variable 'UnreadVariable' shape=() dtype=int64, numpy=1>
import tensorflow as tf
x = tf.Variable(tf.random.normal((20,3)))
with tf.GradientTape() as tape:
#x.assign(tf.random.normal((20,3)))
y = tf.reduce_sum(x)
grads = tape.gradient(y, x)
opt = tf.keras.optimizers.SGD(0.01,momentum=0.9)
opt.apply_gradients([[grads, x]])
The output of the above code is: <tf.Variable 'UnreadVariable' shape=() dtype=int64, numpy=1>

(CRNN OCR) Error while training! Invalid Argument: sequence_length(0) <= 18 node ctc/CTCLoss

I use CRNN (CNN + RNN + CTC Loss) for my model on OCR. I'm using Tensorflow Keras
here's my code [from CTC Loss]:
labels = Input(name='the_labels', shape=[max_label_len], dtype='float32')
input_length = Input(name='input_length', shape=[1], dtype='int64')
label_length = Input(name='label_length', shape=[1], dtype='int64')
def ctc_lambda_func(args):
y_pred, labels, input_length, label_length = args
return K.ctc_batch_cost(labels, y_pred, input_length, label_length)
loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')([outputs, labels, input_length, label_length])
#model to be used at training time
model = Model(inputs=[inputs, labels, input_length, label_length], outputs=loss_out)
model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer = 'adam')
filepath="best_model.hdf5"
checkpoint = ModelCheckpoint(filepath=filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='auto')
callbacks_list = [checkpoint]
training_img = np.array(training_img)
train_input_length = np.array(train_input_length)
train_label_length = np.array(train_label_length)
valid_img = np.array(valid_img)
valid_input_length = np.array(valid_input_length)
valid_label_length = np.array(valid_label_length)
Error here while training:
batch_size = 256
epochs = 10
model.fit(x=[training_img, train_padded_txt, train_input_length, train_label_length], y=np.zeros(len(training_img)),
batch_size=batch_size, epochs = epochs,
validation_data = ([valid_img, valid_padded_txt, valid_input_length, valid_label_length], [np.zeros(len(valid_img))]),
verbose = 1, callbacks = callbacks_list)
ERROR RESULT:
Train on 448 samples, validate on 49 samples
Epoch 1/10
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
<ipython-input-15-1322212af569> in <module>()
4 batch_size=batch_size, epochs = epochs,
5 validation_data = ([valid_img, valid_padded_txt, valid_input_length, valid_label_length], [np.zeros(len(valid_img))]),
----> 6 verbose = 1, callbacks = callbacks_list)
7 frames
/usr/local/lib/python3.6/dist-packages/tensorflow/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
58 ctx.ensure_initialized()
59 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
---> 60 inputs, attrs, num_outputs)
61 except core._NotOkStatusException as e:
62 if name is not None:
InvalidArgumentError: sequence_length(0) <= 18
[[node ctc/CTCLoss (defined at /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:3009) ]] [Op:__inference_keras_scratch_graph_12073]
Function call stack:
keras_scratch_graph
My CRNN architecture is inspired by VGG-16, I'm using 13 conv layers and 3 bi-directional LSTM Layer. I am using CTC Loss and then I got error.
My data is 1000 text-image contains 4-8 words (700 for training&valid, 300 for testing)
if you want to view my code: here's my code using google colab.
https://colab.research.google.com/drive/1nMRNUsLDNrpgeTxPFQ4mhobnFdpbmwUx
I fixed this error. It's because of this!
Before:
# split the 700 data into validation and training dataset as 10% and 90% respectively
if i%10 == 0:
valid_orig_txt.append(txt)
valid_label_length.append(len(txt))
valid_input_length.append(31)
valid_img.append(img)
valid_txt.append(encode_to_labels(txt))
else:
orig_txt.append(txt)
train_label_length.append(len(txt))
train_input_length.append(31)
training_img.append(img)
training_txt.append(encode_to_labels(txt))
After:
# split the 700 data into validation and training dataset as 10% and 90% respectively
if i%10 == 0:
valid_orig_txt.append(txt)
valid_label_length.append(len(txt))
valid_input_length.append(18)
valid_img.append(img)
valid_txt.append(encode_to_labels(txt))
else:
orig_txt.append(txt)
train_label_length.append(len(txt))
train_input_length.append(18)
training_img.append(img)
training_txt.append(encode_to_labels(txt))

Keras Layer Concatenation

I'm trying to see how I can create a model in Keras with multiple Embedding Layers and other inputs. Here's how my model is structured(E=Embedding Layer, [....]=Input Layer):
E E [V V V]
\ | /
\ | /
Dense
|
Dense
Here is my code so far:
model_a = Sequential()
model_a.add(Embedding(...))
model_b = Sequential()
model_b.add(Embedding(...))
model_c = Sequential()
model_c.add(Embedding(...))
model_values = Sequential()
model_values.add(Input(...))
classification_model = Sequential()
classification_layers = [
Concatenate([model_a,model_b,model_c, model_values]),
Dense(...),
Dense(...),
Dense(2, activation='softmax')
]
for layer in classification_layers:
classification_model.add(layer)
classification_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
classification_model.fit(train_data,one_hot_labels, epochs=1, validation_split=0.2)
However I get the following error:
ValueError: A `Concatenate` layer should be called on a list of at least 2 inputs
I am at a loss at what I'm doing wrong here. Here's the a little more detail for the error log:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-37-d5ab23b17e9d> in <module>()
----> 1 classification_model.fit(train_data,one_hot_labels, epochs=1, validation_split=0.2)
/usr/local/lib/python3.5/dist-packages/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)
953 sample_weight=sample_weight,
954 class_weight=class_weight,
--> 955 batch_size=batch_size)
956 # Prepare validation data.
957 do_validation = False
/usr/local/lib/python3.5/dist-packages/keras/engine/training.py in _standardize_user_data(self, x, y, sample_weight, class_weight, check_array_lengths, batch_size)
674 # to match the value shapes.
675 if not self.inputs:
--> 676 self._set_inputs(x)
677
678 if y is not None:
/usr/local/lib/python3.5/dist-packages/keras/engine/training.py in _set_inputs(self, inputs, outputs, training)
574 assert len(inputs) == 1
575 inputs = inputs[0]
--> 576 self.build(input_shape=(None,) + inputs.shape[1:])
577 return
578
/usr/local/lib/python3.5/dist-packages/keras/engine/sequential.py in build(self, input_shape)
225 self.inputs = [x]
226 for layer in self._layers:
--> 227 x = layer(x)
228 self.outputs = [x]
229
/usr/local/lib/python3.5/dist-packages/keras/engine/base_layer.py in __call__(self, inputs, **kwargs)
430 '`layer.build(batch_input_shape)`')
431 if len(input_shapes) == 1:
--> 432 self.build(input_shapes[0])
433 else:
434 self.build(input_shapes)
/usr/local/lib/python3.5/dist-packages/keras/layers/merge.py in build(self, input_shape)
339 # Used purely for shape validation.
340 if not isinstance(input_shape, list) or len(input_shape) < 2:
--> 341 raise ValueError('A `Concatenate` layer should be called '
342 'on a list of at least 2 inputs')
343 if all([shape is None for shape in input_shape]):
ValueError: A `Concatenate` layer should be called on a list of at least 2 inputs
input1 = Input(input_shape=...)
input2 = Input(...)
input3 = Input(...)
values = Input(...)
out1 = Embedding(...)(input1)
out2 = Embedding(...)(input2)
out3 = Embedding(...)(input3)
#make sure values has a shape compatible with the embedding outputs.
#usually it should have shape (equal_samples, equal_length, features)
joinedInput = Concatenate()([out1,out2,out3,values])
out = Dense(...)(joinedInput)
out = Dense(...)(out)
out = Dense(2, activation='softmax')(out)
model = Model([input1,input2,input3,values], out)
You are missing the 'axis' parameter in the call. If you are concatenating on the last dimension (it's unclear what the dimensions of these embeddings and input tensors are), use:
concatenate([model_a,model_b,model_c, model_values], axis=-1)

How to use tensorflow nce_loss in keras?

I am trying to do a large multiclass classification(Actually a translation).
I am trying to use tensorflow nce_loss in keras, but not able to make it work. Any help here?
I am not sure how can I pass weights,num_class and bias from previous layer to nce_loss.
I am getting following error:
import tensorflow as tf
from attention_decoder import AttentionDecoder
from keras.layers import Dropout,Masking,Embedding
def keras_nce_loss(tgt, pred):
return tf.nn.nce_loss(labels=tgt,inputs=pred,num_sampled=100)
model2 = Sequential()
model2.add(Embedding(input_features, input_embed_dimension, input_length=n_timesteps_in,mask_zero=True))
model2.add(Dropout(0.2))
model2.add(LSTM(LSTM_Unitsize,return_sequences=True,activation='relu'))
model2.add(Masking(mask_value=0.))
model2.add(AttentionDecoder(LSTM_Unitsize, n_features))
model2.compile(loss=keras_nce_loss, optimizer='adam', metrics=['acc'])
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-157-0d76d4053a42> in <module>()
11 model2.add(Masking(mask_value=0.))
12 model2.add(AttentionDecoder(LSTM_Unitsize, n_features))
---> 13 model2.compile(loss=keras_nce_loss, optimizer='adam', metrics=['acc'])
14 #model2.save("model2_compiled.hd5")
/usr/local/lib/python3.6/dist-packages/keras/models.py in compile(self, optimizer, loss, metrics, sample_weight_mode, **kwargs)
786 metrics=metrics,
787 sample_weight_mode=sample_weight_mode,
--> 788 **kwargs)
789 self.optimizer = self.model.optimizer
790 self.loss = self.model.loss
/usr/local/lib/python3.6/dist-packages/keras/engine/training.py in compile(self, optimizer, loss, metrics, loss_weights, sample_weight_mode, **kwargs)
909 loss_weight = loss_weights_list[i]
910 output_loss = weighted_loss(y_true, y_pred,
--> 911 sample_weight, mask)
912 if len(self.outputs) > 1:
913 self.metrics_tensors.append(output_loss)
/usr/local/lib/python3.6/dist-packages/keras/engine/training.py in weighted(y_true, y_pred, weights, mask)
434 """
435 # score_array has ndim >= 2
--> 436 score_array = fn(y_true, y_pred)
437 if mask is not None:
438 # Cast the mask to floatX to avoid float64 upcasting in theano
<ipython-input-155-ec20de882530> in keras_nce_loss(tgt, pred)
2
3 def keras_nce_loss(tgt, pred):
----> 4 return tf.nn.nce_loss(labels=tgt,inputs=pred,num_sampled=100)
TypeError: nce_loss() missing 3 required positional arguments: 'weights', 'biases', and 'num_classes'