'numpy.ndarray' object has no attribute 'lower' - numpy

I am fairly new to ML, I am trying to fit some data on my NB-classifier.
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import LinearSVC
# Naïve Bayes:
text_clf_nb = Pipeline([('tfidf', TfidfVectorizer()),
('clf', MultinomialNB()),
])
# Linear SVC:
text_clf_lsvc = Pipeline([('tfidf', TfidfVectorizer()),
('clf', LinearSVC()),
])
Code for fitting the data:
text_clf_nb.fit(X_train, y_train)
The shape of my training & test data is
X_train.shape, X_test.shape, y_train.shape, y_test.shape : ((169, 1), (84,), (169, 1), (84,))
But keep getting :'numpy.ndarray' object has no attribute 'lower'
Here is full trace to the error:
AttributeError Traceback (most recent call last)
<ipython-input-57-139757126594> in <module>
----> 1 text_clf_nb.fit(X_train, y_train)
~\miniconda3\envs\nlp_course\lib\site-packages\sklearn\pipeline.py in fit(self, X, y, **fit_params)
263 This estimator
264 """
--> 265 Xt, fit_params = self._fit(X, y, **fit_params)
266 if self._final_estimator is not None:
267 self._final_estimator.fit(Xt, y, **fit_params)
~\miniconda3\envs\nlp_course\lib\site-packages\sklearn\pipeline.py in _fit(self, X, y, **fit_params)
228 Xt, fitted_transformer = fit_transform_one_cached(
229 cloned_transformer, Xt, y, None,
--> 230 **fit_params_steps[name])
231 # Replace the transformer of the step with the fitted
232 # transformer. This is necessary when loading the transformer
~\miniconda3\envs\nlp_course\lib\site-packages\sklearn\externals\joblib\memory.py in __call__(self, *args, **kwargs)
340
341 def __call__(self, *args, **kwargs):
--> 342 return self.func(*args, **kwargs)
343
344 def call_and_shelve(self, *args, **kwargs):

You have checked the shape of arrays, but have you tried something like:
data = vectorizer.fit_transform(array.ravel())
This should do the trick for you

Related

How to fix "pop from empty list" error while using Keras tuner search method with TPU in google colab?

I previously was able to run the search method of keras tuner on my model with GPU runtime of Google colab. But when I switched to the TPU runtime, I get the following error. I haven't been able to come to the conclusion of how to access a google cloud storage for the TPU runtime to save the checkpoint folder that the keras tuner saves model checkpoints in. I also don't know how to do it and I'm getting the following error. Please help me resolve this issue.
My code:
def post_se(hp):
ip = Input(shape=(6, 128))
x = Masking()(ip)
x = LSTM(units=hp.Choice('lstm_1', values = [8,16,32,64,128,256,512]),return_sequences=True)(x)
x = Dropout(hp.Choice(name='Dropout', values = [0.0,0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]))(x)
x = LSTM(units=hp.Choice('lstm_2', values = [8,16,32,64,128,256,512]))(x)
x = Dropout(hp.Choice(name='Dropout_2', values = [0.0,0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]))(x)
y = Permute((2, 1))(ip)
y = Conv1D(hp.Choice('conv_1_filter', values = [32,64,128,256,512]), hp.Choice(name='conv_1_filter_size', values = [3,5,7,8,9]), padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = squeeze_excite_block(y)
y = Conv1D(hp.Choice('conv_2_filter', values = [32,64,128,256,512]), hp.Choice(name='conv_2_filter_size',values = [3,5,7,8,9]), padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = squeeze_excite_block(y)
y = Conv1D(hp.Choice('conv_3_filter', values = [32,64,128,256,512,]), hp.Choice(name='conv_3_filter_size',values = [3,5,7,8,9]), padding='same', kernel_initializer='he_uniform')(y)
y = BatchNormalization()(y)
y = Activation('relu')(y)
y = GlobalAveragePooling1D()(y)
x = concatenate([x,y])
# batch_size = hp.Choice('batch_size', values=[32, 64, 128, 256, 512, 1024, 2048, 4096])
out = Dense(num_classes, activation='softmax')(x)
model = Model(ip, out)
if gpu:
opt = keras.optimizers.Adam(learning_rate=0.001)
if tpu:
opt = keras.optimizers.Adam(learning_rate=8*0.001)
model.compile(optimizer=opt, loss='categorical_crossentropy',metrics=['accuracy'])
# model.summary()
return model
if gpu:
tuner = kt.tuners.BayesianOptimization(post_se,
objective='val_accuracy',
max_trials=30,
seed=42,
project_name='Model_gpu')
# Will stop training if the "val_loss" hasn't improved in 30 epochs.
tuner.search(X_train, train_label, epochs=200, validation_split=0.1, shuffle=True, callbacks=[tensorflow.keras.callbacks.EarlyStopping('val_loss', patience=30)])
if tpu:
print("TPU")
with strategy.scope():
tuner = kt.tuners.BayesianOptimization(post_se,
objective='val_accuracy',
max_trials=30,
seed=42,
project_name='Model_tpu')
# Will stop training if the "val_loss" hasn't improved in 30 epochs.
tuner.search(X_train, train_label, epochs=200, validation_split=0.1, shuffle=True, callbacks=[tensorflow.keras.callbacks.EarlyStopping('val_loss', patience=30)])
The error log
---------------------------------------------------------------------------
UnimplementedError Traceback (most recent call last)
/usr/lib/python3.7/contextlib.py in __exit__(self, type, value, traceback)
129 try:
--> 130 self.gen.throw(type, value, traceback)
131 except StopIteration as exc:
10 frames
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/ops.py in resource_creator_scope(resource_type, resource_creator)
2957 resource_creator):
-> 2958 yield
2959
<ipython-input-15-24c1e1bb603d> in <module>()
17 # Will stop training if the "val_loss" hasn't improved in 30 epochs.
---> 18 tuner.search(X_train, train_label, epochs=200, validation_split=0.1, shuffle=True, callbacks=[tensorflow.keras.callbacks.EarlyStopping('val_loss', patience=30)])
/usr/local/lib/python3.7/dist-packages/keras_tuner/engine/base_tuner.py in search(self, *fit_args, **fit_kwargs)
178 self.on_trial_begin(trial)
--> 179 results = self.run_trial(trial, *fit_args, **fit_kwargs)
180 # `results` is None indicates user updated oracle in `run_trial()`.
/usr/local/lib/python3.7/dist-packages/keras_tuner/engine/tuner.py in run_trial(self, trial, *args, **kwargs)
303 copied_kwargs["callbacks"] = callbacks
--> 304 obj_value = self._build_and_fit_model(trial, *args, **copied_kwargs)
305
/usr/local/lib/python3.7/dist-packages/keras_tuner/engine/tuner.py in _build_and_fit_model(self, trial, *args, **kwargs)
233 model = self._try_build(hp)
--> 234 return self.hypermodel.fit(hp, model, *args, **kwargs)
235
/usr/local/lib/python3.7/dist-packages/keras_tuner/engine/hypermodel.py in fit(self, hp, model, *args, **kwargs)
136 """
--> 137 return model.fit(*args, **kwargs)
138
/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py in error_handler(*args, **kwargs)
66 filtered_tb = _process_traceback_frames(e.__traceback__)
---> 67 raise e.with_traceback(filtered_tb) from None
68 finally:
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/ops.py in _numpy(self)
1116 except core._NotOkStatusException as e: # pylint: disable=protected-access
-> 1117 raise core._status_to_exception(e) from None # pylint: disable=protected-access
1118
UnimplementedError: File system scheme '[local]' not implemented (file: './untitled_project/trial_78ed6883514d67dc6222064095c134cb/checkpoints/epoch_0/checkpoint_temp/part-00000-of-00001')
Encountered when executing an operation using EagerExecutor. This error cancels all future operations and poisons their output tensors.
During handling of the above exception, another exception occurred:
IndexError Traceback (most recent call last)
<ipython-input-15-24c1e1bb603d> in <module>()
16 seed=42)
17 # Will stop training if the "val_loss" hasn't improved in 30 epochs.
---> 18 tuner.search(X_train, train_label, epochs=200, validation_split=0.1, shuffle=True, callbacks=[tensorflow.keras.callbacks.EarlyStopping('val_loss', patience=30)])
/usr/local/lib/python3.7/dist-packages/tensorflow/python/distribute/distribute_lib.py in __exit__(self, exception_type, exception_value, traceback)
454 "tf.distribute.set_strategy() out of `with` scope."),
455 e)
--> 456 _pop_per_thread_mode()
457
458
/usr/local/lib/python3.7/dist-packages/tensorflow/python/distribute/distribution_strategy_context.py in _pop_per_thread_mode()
64
65 def _pop_per_thread_mode():
---> 66 ops.get_default_graph()._distribution_strategy_stack.pop(-1) # pylint: disable=protected-access
67
68
IndexError: pop from empty list
For some extra info, I am attaching my code in this post.
This is your error:
UnimplementedError: File system scheme '[local]' not implemented (file: './untitled_project/trial_78ed6883514d67dc6222064095c134cb/checkpoints/epoch_0/checkpoint_temp/part-00000-of-00001')
See https://stackoverflow.com/a/62881833/14043558 for a solution.

AttributeError: 'numpy.ndarray' object has no attribute 'op'

I am have a time series data and I am trying to build and train an LSTM model over it. I have 1 input and 1 Output corresponding to my model. I am trying to build a Many to Many model where Input length is exactly equal to output length.
The shape of my inputs are
print(np.shape(X))
(1700,70,401)
#(examples, Timestep, Features)
Shape of my output is
print(np.shape(Y_1))
(1700,70,3)
#(examples, Timestep, Features)
Now When I am trying to approach this problem via sequential API everything is running fine.
model = Sequential()
model.add(LSTM(32, input_shape=(70,401), return_sequences=True))
model.add(Dense(3,activation='softmax'))
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.01),loss=tf.keras.losses.CategoricalCrossentropy())
model.fit(X, Y_1, epochs=2,verbose=1)
But When I am approaching it from the functional API approach then it is showing the error
AttributeError: 'numpy.ndarray' object has no attribute 'op'
input_layer = Input(shape=(70,401))
hidden = LSTM(32,return_sequences=True)(input_layer)
output_1 = Dense(3, activation='softmax')(hidden)
# output_2 = Dense(np.shape(Y_2)[2], activation='softmax')(hidden)
model_lstm = Model(inputs=X, outputs = Y_1)
My question is How do I resolve the error?
I can not use the sequential API to solve the problem because I want to use Multiple Outputs to train i.e. I have 2 different outputs on which I want to train(But for the scope of this question let's just assume I have one set of input and one set of output)!!
The Entire error that I am getting is
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-66-df3a5a1656f0> in <module>
----> 1 model_lstm = Model(X, Y_1)
/root/anaconda3/envs/TensorPy36/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training.py in __init__(self, *args, **kwargs)
144
145 def __init__(self, *args, **kwargs):
--> 146 super(Model, self).__init__(*args, **kwargs)
147 _keras_api_gauge.get_cell('model').set(True)
148 # initializing _distribution_strategy here since it is possible to call
/root/anaconda3/envs/TensorPy36/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/network.py in __init__(self, *args, **kwargs)
165 'inputs' in kwargs and 'outputs' in kwargs):
166 # Graph network
--> 167 self._init_graph_network(*args, **kwargs)
168 else:
169 # Subclassed network
/root/anaconda3/envs/TensorPy36/lib/python3.7/site-packages/tensorflow_core/python/training/tracking/base.py in _method_wrapper(self, *args, **kwargs)
455 self._self_setattr_tracking = False # pylint: disable=protected-access
456 try:
--> 457 result = method(self, *args, **kwargs)
458 finally:
459 self._self_setattr_tracking = previous_value # pylint: disable=protected-access
/root/anaconda3/envs/TensorPy36/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/network.py in _init_graph_network(self, inputs, outputs, name, **kwargs)
268
269 if any(not hasattr(tensor, '_keras_history') for tensor in self.outputs):
--> 270 base_layer_utils.create_keras_history(self._nested_outputs)
271
272 self._base_init(name=name, **kwargs)
/root/anaconda3/envs/TensorPy36/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/base_layer_utils.py in create_keras_history(tensors)
182 keras_tensors: The Tensors found that came from a Keras Layer.
183 """
--> 184 _, created_layers = _create_keras_history_helper(tensors, set(), [])
185 return created_layers
186
/root/anaconda3/envs/TensorPy36/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/base_layer_utils.py in _create_keras_history_helper(tensors, processed_ops, created_layers)
208 if getattr(tensor, '_keras_history', None) is not None:
209 continue
--> 210 op = tensor.op # The Op that created this Tensor.
211 if op not in processed_ops:
212 # Recursively set `_keras_history`.
AttributeError: 'numpy.ndarray' object has no attribute 'op'
Update
I tried type cast X and Y_1 to the tensor objects as suggested in the comments. It is perfectly working in the case of Sequential API but failing for Fnctional API.
X_tensor = tf.convert_to_tensor(X, dtype=tf.float32)
y_tensor=tf.convert_to_tensor(Y_1, dtype=tf.int32)
model_lstm = Model(X_tensor, y_tensor)
Error
AttributeError: Tensor.op is meaningless when eager execution is enabled.
AttributeError Traceback (most recent call last)
<ipython-input-100-d090ea2b5a90> in <module>
----> 1 model_lstm = Model(X_tensor, y_tensor)
/root/anaconda3/envs/TensorPy36/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training.py in __init__(self, *args, **kwargs)
144
145 def __init__(self, *args, **kwargs):
--> 146 super(Model, self).__init__(*args, **kwargs)
147 _keras_api_gauge.get_cell('model').set(True)
148 # initializing _distribution_strategy here since it is possible to call
/root/anaconda3/envs/TensorPy36/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/network.py in __init__(self, *args, **kwargs)
165 'inputs' in kwargs and 'outputs' in kwargs):
166 # Graph network
--> 167 self._init_graph_network(*args, **kwargs)
168 else:
169 # Subclassed network
/root/anaconda3/envs/TensorPy36/lib/python3.7/site-packages/tensorflow_core/python/training/tracking/base.py in _method_wrapper(self, *args, **kwargs)
455 self._self_setattr_tracking = False # pylint: disable=protected-access
456 try:
--> 457 result = method(self, *args, **kwargs)
458 finally:
459 self._self_setattr_tracking = previous_value # pylint: disable=protected-access
/root/anaconda3/envs/TensorPy36/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/network.py in _init_graph_network(self, inputs, outputs, name, **kwargs)
268
269 if any(not hasattr(tensor, '_keras_history') for tensor in self.outputs):
--> 270 base_layer_utils.create_keras_history(self._nested_outputs)
271
272 self._base_init(name=name, **kwargs)
/root/anaconda3/envs/TensorPy36/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/base_layer_utils.py in create_keras_history(tensors)
182 keras_tensors: The Tensors found that came from a Keras Layer.
183 """
--> 184 _, created_layers = _create_keras_history_helper(tensors, set(), [])
185 return created_layers
186
/root/anaconda3/envs/TensorPy36/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/base_layer_utils.py in _create_keras_history_helper(tensors, processed_ops, created_layers)
208 if getattr(tensor, '_keras_history', None) is not None:
209 continue
--> 210 op = tensor.op # The Op that created this Tensor.
211 if op not in processed_ops:
212 # Recursively set `_keras_history`.
/root/anaconda3/envs/TensorPy36/lib/python3.7/site-packages/tensorflow_core/python/framework/ops.py in op(self)
1078 def op(self):
1079 raise AttributeError(
-> 1080 "Tensor.op is meaningless when eager execution is enabled.")
1081
1082 #property
AttributeError: Tensor.op is meaningless when eager execution is enabled.
I made a mistake in the code itself while executing the Model part of in the functional API version.
model_lstm = Model(inputs=X, outputs = Y_1)
I have given the variables inside the above part of the code only!! While in this part we just define what our model is going to be. Hence Here we will just write what is our input layer that needs to be considered and what will be my output layer here!! Input layer while constructing my model will be input_layer in the code and output layer will be output_1. Hence code should be
model_lstm = Model(inputs=input_layer, outputs = output_1)
and after that we can do
model_lstm.fit(X,Y_1)
This will work perfectly fine now!!

Colab TPU error - InvalidArgumentError: Unsupported data type for TPU: string, caused by output cond_8/Identity_1:0

I get above error in colab TPU from the code below. Original model had epochs, steps_per_epoch and batch but removed that while debugging. Not sure what the issue is as I do not see a string.
None TPU version of code works. Most of the code is stock code with some modifications made. I tested the code to ensure images loaded properly.
import tensorflow as tf
from tensorflow.keras import backend as K
import os
import PIL
import csv
import shutil
import numpy as np
import sys
from PIL import Image
from tensorflow.keras import backend as K
import gc
import matplotlib.pyplot as plt
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import layers
from tensorflow.keras import Model
from tensorflow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.layers import Dense, Activation, Flatten
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='grpc://' + os.environ['COLAB_TPU_ADDR'])
tf.config.experimental_connect_to_cluster(resolver)
# This is the TPU initialization code that has to be at the beginning.
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.experimental.TPUStrategy(resolver)
list_ds = tf.data.Dataset.list_files(str(gcs_pattern))
# Reads an image from a file, decodes it into a dense tensor, and resizes it
# to a fixed shape.
def parse_image(filename):
parts = tf.strings.split(filename, os.sep)
label = parts[-2]
image = tf.io.read_file(filename)
image = tf.image.decode_jpeg(image)
image = tf.image.convert_image_dtype(image, tf.float32)
image = tf.image.resize(image, [400, 400])
return image, label
list_ds = list_ds.map(parse_image)
def create_model():
pre_trained_model = InceptionV3(input_shape = (400, 400,3), include_top = False, weights = 'imagenet')
input_tensor=None, input_shape=(1024, 1024,3))
for layer in pre_trained_model.layers:
if layer.name == 'mixed1':
break
layer.trainable = False
last_layer = pre_trained_model.get_layer('mixed7')
last_output = last_layer.output
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras import regularizers
x = Flatten()(last_output)
x = layers.Dense(1024, activation= 'relu')(x)
x = layers.Dropout(.2)(x)
x = layers.Dense(4, activation= 'softmax')(x)
modelin = Model(pre_trained_model.input, x)
return modelin
def get_callbacks(name_weights, patience_lr):
mcp_save = ModelCheckpoint(name_weights, save_best_only=True, monitor='val_acc', mode='max')
# reduce_lr_loss = ReduceLROnPlateau(monitor='loss', factor=0.1, patience=patience_lr, verbose=1, epsilon=1e-4, mode='min')
return [mcp_save] #, reduce_lr_loss]
batch_size = 16 * strategy.num_replicas_in_sync
for i in range(5):
dataset = list_ds.shuffle(buffer_size = 2280)
dataset = dataset.cache()
val = dataset.skip(i*456).take(456).batch(batch_size, drop_remainder=True).prefetch(4)
train = dataset.skip(i*456+456).take(1824).concatenate(dataset.take(456*i)).batch(batch_size, drop_remainder=True).prefetch(15)
name_weights = "/content/drive/My Drive/Plant/final_model_fold_D512_I400_mix_1_7_" + str(i) + ".{epoch:02d}-{val_acc:.2f}.h5"
# callbacks = get_callbacks(name_weights = name_weights, patience_lr=10)
with strategy.scope():
modelinc = create_model()
modelinc.compile(optimizer = 'adam', loss = 'sparse_categorical_crossentropy', metrics = ['acc'])
modelinc.fit(
train,
epochs=5)
print(modelinc.evaluate(val))
K.clear_session()
del name_weights
del callbacks
gc.collect()
'''
-
Error:
Epoch 1/5 --------------------------------------------------------------------------- InvalidArgumentError Traceback (most recent call last)
<ipython-input-4-bbe01274450b> in <module>()
31 modelinc.fit(
32 train,
---> 33 epochs=5)
34
35
10 frames /usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py in _method_wrapper(self, *args, **kwargs)
64 def _method_wrapper(self, *args, **kwargs):
65 if not self._in_multi_worker_mode(): # pylint: disable=protected-access ---> 66 return method(self, *args,
**kwargs)
67
68 # Running inside `run_distribute_coordinator` already.
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
853 context.async_wait()
854 logs = tmp_logs # No error, now safe to assign to logs. --> 855 callbacks.on_train_batch_end(step, logs)
856 epoch_logs = copy.copy(logs)
857
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/callbacks.py in on_train_batch_end(self, batch, logs)
387 """
388 if self._should_call_train_batch_hooks: --> 389 logs = self._process_logs(logs)
390 self._call_batch_hook(ModeKeys.TRAIN, 'end', batch, logs=logs)
391
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/callbacks.py in _process_logs(self, logs)
263 """Turns tensors into numpy arrays or Python scalars."""
264 if logs: --> 265 return tf_utils.to_numpy_or_python_type(logs)
266 return {}
267
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/utils/tf_utils.py in to_numpy_or_python_type(tensors)
521 return t # Don't turn ragged or sparse tensors to NumPy.
522 --> 523 return nest.map_structure(_to_single_numpy_or_python_type, tensors)
524
/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/nest.py in map_structure(func, *structure, **kwargs)
615
616 return pack_sequence_as( --> 617 structure[0], [func(*x) for x in entries],
618 expand_composites=expand_composites)
619
/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/nest.py in <listcomp>(.0)
615
616 return pack_sequence_as( --> 617 structure[0], [func(*x) for x in entries],
618 expand_composites=expand_composites)
619
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/utils/tf_utils.py in _to_single_numpy_or_python_type(t)
517 def _to_single_numpy_or_python_type(t):
518 if isinstance(t, ops.Tensor): --> 519 x = t.numpy()
520 return x.item() if np.ndim(x) == 0 else x
521 return t # Don't turn ragged or sparse tensors to NumPy.
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py in numpy(self)
959 """
960 # TODO(slebedev): Consider avoiding a copy for non-CPU or remote tensors. --> 961 maybe_arr = self._numpy() # pylint: disable=protected-access
962 return maybe_arr.copy() if isinstance(maybe_arr, np.ndarray) else maybe_arr
963
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py in _numpy(self)
927 return self._numpy_internal()
928 except core._NotOkStatusException as e: --> 929 six.raise_from(core._status_to_exception(e.code, e.message), None)
930
931 #property
/usr/local/lib/python3.6/dist-packages/six.py in raise_from(value, from_value)
InvalidArgumentError: Unsupported data type for TPU: string, caused by output cond_8/Identity_1:0

AttributeError: 'Tensor' object has no attribute 'assign' in an attention model

I try to build up a document classification model with attention using keras(part of model in paper Hierarchical Attention Networks for Document Classification). The following codes are the test codes. I create a birnn and a custom attention layer refer to https://github.com/person-lee/LSTM_ATTENTION_CLASSIFY/blob/master/utils.py and https://github.com/richliao/textClassifier/blob/master/textClassifierHATT.py. But I got an error(see details below).
The codes are:
from keras.models import Model
from keras.layers import Input
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import GRU
from keras.layers.wrappers import Bidirectional, TimeDistributed
from keras.layers.core import Dropout, Dense, Lambda, Masking
from keras.layers import merge
from keras.engine.topology import Layer
from keras import backend as K
from keras import initializers
import keras
class AttentionLayer(Layer):
'''
Attention layer.
'''
def __init__(self, init='glorot_uniform', **kwargs):
super(AttentionLayer, self).__init__(**kwargs)
self.supports_masking = True
self.init = initializers.get(init)
def build(self, input_shape):
input_dim = input_shape[-1]
self.Uw = self.init((input_dim, ))
self.trainable_weights = [self.Uw]
super(AttentionLayer, self).build(input_shape)
def compute_mask(self, input, mask):
return mask
def call(self, x, mask=None):
eij = K.tanh(K.squeeze(K.dot(x, K.expand_dims(self.Uw)), axis=-1))
ai = K.exp(eij)
weights = ai/K.expand_dims(K.sum(ai, axis=1),1)
weighted_input = x*K.expand_dims(weights,2)
return K.sum(weighted_input, axis=1)
def get_output_shape_for(self, input_shape):
newShape = list(input_shape)
newShape[-1] = 1
return tuple(newShape)
sentence_input = Input(shape=(None,5))
# embedded_sequences = embedding_layer(sentence_input)
l_lstm = Bidirectional(GRU(10, return_sequences=True),merge_mode='concat')(sentence_input)
# l_dense = TimeDistributed(Dense(200))(l_lstm)
l_att = AttentionLayer()(l_lstm)
cls = Dense(10, activation='softmax')(l_att)
sentEncoder = Model(sentence_input, cls)
sentEncoder.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
import numpy as np
x_train = np.array([[1,2,3,4,5],
[1,2,3,4,5],
[1,2,3,4,5],
[1,2,3,4,5],
[1,2,3,4,5],
[1,2,3,4,5],
[1,2,3,4,5],
[1,2,3,4,5],
[1,2,3,4,5],
[1,2,3,4,5]])
y_train = np.array([1,2,3,4,5,6,7,8,9,0])
y_train = keras.utils.to_categorical(y_train, 10)
x_train = np.expand_dims(x_train,0)
y_train = np.expand_dims(y_train,0)
sentEncoder.fit(x=x_train,y=y_train,validation_split=0.1)
And got the following errors:
AttributeError Traceback (most recent call last)
<ipython-input-13-3f6bb30d8618> in <module>()
----> 1 sentEncoder.fit(x=x_train,y=y_train,validation_split=0.1)
~/.conda/envs/21/lib/python3.6/site-packages/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)
1011 else:
1012 ins = x + y + sample_weights
-> 1013 self._make_train_function()
1014 f = self.train_function
1015
~/.conda/envs/21/lib/python3.6/site-packages/keras/engine/training.py in _make_train_function(self)
495 training_updates = self.optimizer.get_updates(
496 params=self._collected_trainable_weights,
--> 497 loss=self.total_loss)
498 updates = (self.updates +
499 training_updates +
~/.conda/envs/21/lib/python3.6/site-packages/keras/legacy/interfaces.py in wrapper(*args, **kwargs)
89 warnings.warn('Update your `' + object_name +
90 '` call to the Keras 2 API: ' + signature, stacklevel=2)
---> 91 return func(*args, **kwargs)
92 wrapper._original_function = func
93 return wrapper
~/.conda/envs/21/lib/python3.6/site-packages/keras/optimizers.py in get_updates(self, loss, params)
262 new_p = p.constraint(new_p)
263
--> 264 self.updates.append(K.update(p, new_p))
265 return self.updates
266
~/.conda/envs/21/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py in update(x, new_x)
968 The variable `x` updated.
969 """
--> 970 return tf.assign(x, new_x)
971
972
~/.conda/envs/21/lib/python3.6/site-packages/tensorflow/python/ops/state_ops.py in assign(ref, value, validate_shape, use_locking, name)
282 ref, value, use_locking=use_locking, name=name,
283 validate_shape=validate_shape)
--> 284 return ref.assign(value, name=name)
285
286
AttributeError: 'Tensor' object has no attribute 'assign'
I have no idea what is wrong. I googled and asked people good at this but did not figure it out. Is it because of the bidirectional? Does anybody know what is going wrong?
I guess it is the problem of shapes of the dataset and labels.
i meet the same problem and i solved it. the reason is K.update(p, new_p), 'p' type shouldn't tensor type, when you use K.update(p, new_p), 'p' type should be tf.Variable and 'new_p' type should be Tensor type, i hope it can solve your problem.

How to use tensorflow nce_loss in keras?

I am trying to do a large multiclass classification(Actually a translation).
I am trying to use tensorflow nce_loss in keras, but not able to make it work. Any help here?
I am not sure how can I pass weights,num_class and bias from previous layer to nce_loss.
I am getting following error:
import tensorflow as tf
from attention_decoder import AttentionDecoder
from keras.layers import Dropout,Masking,Embedding
def keras_nce_loss(tgt, pred):
return tf.nn.nce_loss(labels=tgt,inputs=pred,num_sampled=100)
model2 = Sequential()
model2.add(Embedding(input_features, input_embed_dimension, input_length=n_timesteps_in,mask_zero=True))
model2.add(Dropout(0.2))
model2.add(LSTM(LSTM_Unitsize,return_sequences=True,activation='relu'))
model2.add(Masking(mask_value=0.))
model2.add(AttentionDecoder(LSTM_Unitsize, n_features))
model2.compile(loss=keras_nce_loss, optimizer='adam', metrics=['acc'])
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-157-0d76d4053a42> in <module>()
11 model2.add(Masking(mask_value=0.))
12 model2.add(AttentionDecoder(LSTM_Unitsize, n_features))
---> 13 model2.compile(loss=keras_nce_loss, optimizer='adam', metrics=['acc'])
14 #model2.save("model2_compiled.hd5")
/usr/local/lib/python3.6/dist-packages/keras/models.py in compile(self, optimizer, loss, metrics, sample_weight_mode, **kwargs)
786 metrics=metrics,
787 sample_weight_mode=sample_weight_mode,
--> 788 **kwargs)
789 self.optimizer = self.model.optimizer
790 self.loss = self.model.loss
/usr/local/lib/python3.6/dist-packages/keras/engine/training.py in compile(self, optimizer, loss, metrics, loss_weights, sample_weight_mode, **kwargs)
909 loss_weight = loss_weights_list[i]
910 output_loss = weighted_loss(y_true, y_pred,
--> 911 sample_weight, mask)
912 if len(self.outputs) > 1:
913 self.metrics_tensors.append(output_loss)
/usr/local/lib/python3.6/dist-packages/keras/engine/training.py in weighted(y_true, y_pred, weights, mask)
434 """
435 # score_array has ndim >= 2
--> 436 score_array = fn(y_true, y_pred)
437 if mask is not None:
438 # Cast the mask to floatX to avoid float64 upcasting in theano
<ipython-input-155-ec20de882530> in keras_nce_loss(tgt, pred)
2
3 def keras_nce_loss(tgt, pred):
----> 4 return tf.nn.nce_loss(labels=tgt,inputs=pred,num_sampled=100)
TypeError: nce_loss() missing 3 required positional arguments: 'weights', 'biases', and 'num_classes'