Related
I am trying to implement a deep dream model using ResNet. there are many problems appear, one of them is the proble says that the object being unpacked is of type "NoneType," which means it has a value of None. In Python, you cannot unpack an object of type "NoneType" because it is not iterable. sometimes there is an error says that the value being converted is of type "NoneType" and that this type is unsupported. This means that TensorFlow cannot create an EagerTensor from a value of None.
this is a piece of code we try to use for solving error but it fail. and the second code is the all code
def deep_dream(input_image, model, steps=100, step_size=0.01):
# Define the loss and the optimizer
loss, intermediate_layer_model = calc_loss(input_image, model)
optimizer = tf.optimizers.SGD(learning_rate=step_size)
# Keep a list to hold the evolution of the image
image_list = []
# Run the optimization
for i in range(steps):
with tf.GradientTape() as tape:
tape.watch(input_image)
loss = calc_loss(input_image, model)[0]
grads, = tape.gradient(loss, input_image)
grads = tape.gradient(loss, input_image)
if grads is None:
return
optimizer.apply_gradients([(grads, input_image)])
image_list.append(input_image.numpy().copy())
# Return the final image
return input_image
# Load the ResNet50 model
#model = ResNet50(weights='imagenet')
from tensorflow import keras
model = keras.applications.ResNet50(weights='imagenet', include_top=False)
# Iterate over the layers in the ResNet50 model
for layer in model.layers:
print(f'{layer.name}---> {layer.output_shape}')
import cv2
# Function to calculate the loss
def calc_loss(input_image, model):
input_image_batch = tf.expand_dims(input_image, axis=0)
preprocessed_input = preprocess_input(input_image_batch.numpy().copy())
# Get the activations of a specific layer
layer_name = "conv5_block2_1_bn"
intermediate_layer_model = tf.keras.models.Model(inputs=model.input, outputs=model.get_layer(layer_name).output)
intermediate_output = intermediate_layer_model(preprocessed_input)
# Define the loss
loss = tf.math.reduce_mean(intermediate_output)
return loss, intermediate_layer_model
def deep_dream(input_image, model, steps=100, step_size=0.01):
for i in range(steps):
with tf.GradientTape() as tape:
# Compute the loss
loss, model_ = calc_loss(input_image, model)
# Get the gradients of the input image with respect to the loss
grads = tape.gradient(loss, input_image)
# Normalize the gradients
grads /= tf.math.reduce_std(grads) + 1e-8
# Update the input image
input_image += grads * step_size
input_image = tf.clip_by_value(input_image, 0, 255)
return input_image
# Load an image
# Load an image
#img_path = '/content/drive/MyDrive/Baghdad images/market.png'
#img = cv2.imread(img_path)
#img = cv2.resize(img, (224, 224))
#img = np.array(img, dtype=float)
# Preprocess the image
#original_image = np.copy(img)
#img = preprocess_input(np.expand_dims(img, axis=0))
img_path = cv2.resize(cv2.imread('/content/drive/MyDrive/Baghdad images/market.png'), (224, 224))
#img = image.load_img(img_path, target_size=(224, 224))
img = image.img_to_array(img)
# Preprocess the image
original_image = np.copy(img)
#img = preprocess_input(np.expand_dims(img, axis=0))
#input_image.set_shape([1,224,224,3])
#input_image = tf.constant(img, dtype=tf.float32)
#input_image = tf.expand_dims(input_image, axis=0)
#input_image = tf.squeeze(input_image, axis=0)
# Convert the image to a Tensor
input_image = tf.constant(img, dtype=tf.float32)
# Run the deep dream algorithm
dream_img = deep_dream(input_image, model)
# Deprocess the image
dream_img = tf.clip_by_value(dream_img[0], 0, 255).numpy().astype('uint8')
# Plot the original and dream images
plt.figure(figsize=(10,10))
plt.subplot(121)
plt.imshow(original_image.astype('uint8'))
plt.axis('off')
plt.title('Original Image')
plt.subplot(122)
plt.imshow(dream_img)
plt.axis('off')
plt.title('Dream Image')
plt.show()
I try to build deep dream model with ResNet to generate deep dream image, but the result is an error.
this error
TypeError Traceback (most recent call last)
<ipython-input-95-b14eebcfe038> in <module>
1 # Run the deep dream algorithm
----> 2 dream_img = deep_dream(input_image, model)
<ipython-input-92-27c78a6e0618> in deep_dream(input_image, model, steps, step_size)
12 tape.watch(input_image)
13 loss = calc_loss(input_image, model)[0]
---> 14 grads, = tape.gradient(loss, input_image)
15 grads = tape.gradient(loss, input_image)
16 if grads is None:
TypeError: cannot unpack non-iterable NoneType object
and this error
ValueError Traceback (most recent call last)
<ipython-input-48-09c4ef33f56c> in <module>
72
73 # Run the deep dream algorithm
---> 74 dream_img = deep_dream(input_image, model)
75
76 # Deprocess the image
16 frames
/usr/local/lib/python3.8/dist-packages/tensorflow/python/framework/constant_op.py in convert_to_eager_tensor(value, ctx, dtype)
96 dtype = dtypes.as_dtype(dtype).as_datatype_enum
97 ctx.ensure_initialized()
---> 98 return ops.EagerTensor(value, ctx.device_name, dtype)
99
100
ValueError: Attempt to convert a value (None) with an unsupported type (<class 'NoneType'>) to a Tensor.
I'm trying to compile and fit a model using the EpochModelCheckpoint class from this thread - I want the model to save regularly after each epoch.
But I get the following error which I absolutely don't understand:
Epoch 1/1000
Epoch 1: val_loss improved from inf to -0.86435, saving model to Models/HM0001/01
WARNING:absl:Found untraced functions such as _update_step_xla while saving (showing 1 of 1). These functions will not be directly callable after loading.
Traceback (most recent call last):
File "/home/au/find/Ex.py", line 92, in <module>
model = CompileFitModel (xTrain, yTrain, epochs, batchSize, optimizer, loss, activation1, activation2, verbose)
File "/home/au/find/Ex.py", line 71, in CompileFitModel
model.fit(xTrain, yTrain, epochs=epochs, verbose=verbose, batch_size=batchSize,validation_data=(xTrain, yTrain),
File "/home/au/.local/lib/python3.10/site-packages/keras/utils/traceback_utils.py", line 70, in error_handler
raise e.with_traceback(filtered_tb) from None
File "/home/au/find/Ex.py", line 30, in on_epoch_end
self._save_model(epoch=epoch, batch=None, logs=logs)
File "/home/au/.local/lib/python3.10/site-packages/tensorflow/dtensor/python/d_variable.py", line 60, in __init__
original_layout = api.fetch_layout(dvariable)
File "/home/au/.local/lib/python3.10/site-packages/tensorflow/dtensor/python/api.py", line 353, in fetch_layout
return _dtensor_device().fetch_layout(tensor)
File "/home/au/.local/lib/python3.10/site-packages/tensorflow/dtensor/python/dtensor_device.py", line 312, in fetch_layout
raise core._status_to_exception(e) from None # pylint: disable=protected-access
tensorflow.python.framework.errors_impl.InvalidArgumentError: FetchLayout expects a tensor placed on the layout device.
Any idea?
Full code follows. Source data - Meh100.npy (936K)
https://github.com/velkyvont/velkyvont/blob/main/Meh100.npy
import numpy as np
import tensorflow as tf
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.models import Sequential
from keras.layers import AlphaDropout, Dense
from keras import backend as K
from tensorflow.keras.utils import to_categorical
class EpochModelCheckpoint(tf.keras.callbacks.ModelCheckpoint):
def __init__(self,
filepath,
frequency=1,
monitor='val_loss',
verbose=2,
save_best_only=True,
save_weights_only=False,
mode='auto',
loss='MyLoss.hdf5',
options=None,
**kwargs):
super(EpochModelCheckpoint, self).__init__(filepath, monitor, verbose, save_best_only, save_weights_only, mode, "epoch", options)
self.epochs_since_last_save = 0
self.frequency = frequency
def on_epoch_end(self, epoch, logs=None):
self.epochs_since_last_save += 1
# pylint: disable=protected-access
if self.epochs_since_last_save % self.frequency == 0:
self._save_model(epoch=epoch, batch=None, logs=logs)
def on_train_batch_end(self, batch, logs=None):
pass
def GetXYFromDataIncludingOdds(data):
favOdds = data.T[6]
dogOdds = data.T[7]
drawOdds = data.T[8]
odds = np.array(list(zip(favOdds, dogOdds, drawOdds)))
y = data.T[1]
y = np.asarray(y).astype('float32')
y = to_categorical(y, 3)
y = np.hstack([y ,odds])
x = data.T[2:].T
x = tf.keras.utils.normalize(x)
return x, y
def MyLoss(yTrue, yPred):
favWin = yTrue[:,1:2]
dogWin = yTrue[:, 2:3]
draw = yTrue[:, 0:1]
favOdds = yTrue[:, 3:4]
dogOdds = yTrue[:, 4:5]
drawOdds = yTrue[:, 5:6]
gainLossVector = K.concatenate([
draw*drawOdds,
favWin*favOdds,
dogWin*dogOdds,
], axis=1)
return -1 * K.mean(K.sum(gainLossVector * yPred, axis=1))
def CompileFitModel(xTrain, yTrain, epochs, batchSize, optimizer, loss, activation1, activation2, verbose):
model = Sequential()
model.add(AlphaDropout(2000, input_dim=1191))
model.add(Dense(1000, activation=activation1))
model.add(AlphaDropout(500))
model.add(Dense(3, activation=activation2))
model.compile(optimizer=optimizer, loss = loss)
model.fit(xTrain, yTrain, epochs=epochs, verbose=verbose, batch_size=batchSize,validation_data=(xTrain, yTrain),
callbacks=[EarlyStopping(patience=100), EpochModelCheckpoint("Models/HM0001/{epoch:02d}", frequency=1)
])
return model
learningRate = 0.00001
batchSize = 128
loss = MyLoss
activation1 = 'elu'
activation2 = 'softmax'
verbose = 2
epochs = 1000
batchSize = 128
optimizer = tf.keras.dtensor.experimental.optimizers.RMSprop(learning_rate=learningRate,rho=0.9,momentum=0.0, epsilon=1e-07, centered=False, gradients_clip_option=None, ema_option=None, jit_compile=True, name='RMSprop', mesh=None)
numpyFilename = "Meh100.npy"
data = np.load(numpyFilename, allow_pickle=True)
xTrain, yTrain = GetXYFromDataIncludingOdds (data)
model = CompileFitModel (xTrain, yTrain, epochs, batchSize, optimizer, loss, activation1, activation2, verbose)
I was following TensorFlow tutorial on text classification (link), and the example is running fine, but when try to apply same steps on different dataset I'm constantly getting error that I'm unable to debug.
In tutorial they downloaded the data and used different data loader so that might be one of the issues, the other thing I suspect is vectorize_text function where dimensions are getting expanded, but I've tried almost everything I can imagine but no success.
CSV file that I'm using contains 2 columns, one with text data and other one is multiclass label.
From the error below, it seems that TextVectorization outputs tensor of shape (batch_size, 250), while model needs something like (batch_size, 250, 1) I guess?
Below is the code that I used
from sklearn.model_selection import train_test_split
import tensorflow as tf
import re
import numpy as np
import pandas as pd
import string
# load and split data
df = pd.read_csv('train.csv', index_col=[0])
X_train, X_test, y_train, y_test = train_test_split(df[['text']], pd.get_dummies(df['target']).values, test_size=0.2, random_state=1)
X_val, X_test, y_val, y_test = train_test_split(X_test, y_test, test_size=0.5, random_state=1)
# convert to tf dataset
raw_train_ds = tf.data.Dataset.from_tensor_slices((X_train, y_train))
raw_val_ds = tf.data.Dataset.from_tensor_slices((X_val, y_val))
raw_test_ds = tf.data.Dataset.from_tensor_slices((X_test, y_test))
# text cleanup
def custom_standardization(input_data):
new_line_replace = tf.strings.regex_replace(input_data, '\n', ' ')
non_alphanum_replace = tf.strings.regex_replace(new_line_replace, '[^a-zA-Z0-9_ ]', '')
stripped = tf.strings.strip(non_alphanum_replace)
lowercase = tf.strings.lower(stripped)
return tf.strings.regex_replace(lowercase,
'[%s]' % re.escape(string.punctuation),
'')
# creating layer for text vectoriazation
max_features = 10000
sequence_length = 250
vectorize_layer = tf.keras.layers.TextVectorization(
standardize=custom_standardization,
max_tokens=max_features,
output_mode='int',
output_sequence_length=sequence_length)
train_text = raw_train_ds.map(lambda x, y: x)
vectorize_layer.adapt(train_text)
def vectorize_text(text, label):
text = tf.expand_dims(text, -1)
return vectorize_layer(text), label
train_ds = raw_train_ds.map(vectorize_text)
val_ds = raw_val_ds.map(vectorize_text)
test_ds = raw_test_ds.map(vectorize_text)
train_ds = train_ds.cache().prefetch(buffer_size=tf.data.AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=tf.data.AUTOTUNE)
test_ds = test_ds.cache().prefetch(buffer_size=tf.data.AUTOTUNE)
model = tf.keras.Sequential([
vectorize_layer,
tf.keras.layers.Embedding(max_features + 1, 16),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(4)
])
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
epochs = 10
history = model.fit(
train_ds,
validation_data=val_ds,
epochs=epochs)
And this is the error I'm getting:
Epoch 1/100 WARNING:tensorflow:Model was constructed with shape
(None,) for input KerasTensor(type_spec=TensorSpec(shape=(None,),
dtype=tf.string, name='text_vectorization_2_input'),
name='text_vectorization_2_input', description="created by layer
'text_vectorization_2_input'"), but it was called on an input with
incompatible shape (None, 250). Output exceeds the size limit. Open
the full output data in a text editor
--------------------------------------------------------------------------- ValueError Traceback (most recent call
last) in
1 epochs = 100
----> 2 history = model.fit(
3 train_ds,
4 validation_data=val_ds,
5 epochs=epochs)
c:\Users\panto\anaconda3\lib\site-packages\keras\utils\traceback_utils.py
in error_handler(*args, **kwargs)
65 except Exception as e: # pylint: disable=broad-except
66 filtered_tb = _process_traceback_frames(e.traceback)
---> 67 raise e.with_traceback(filtered_tb) from None
68 finally:
69 del filtered_tb
c:\Users\panto\anaconda3\lib\site-packages\tensorflow\python\framework\func_graph.py
in autograph_handler(*args, **kwargs) 1145 except
Exception as e: # pylint:disable=broad-except 1146 if
hasattr(e, "ag_error_metadata"):
-> 1147 raise e.ag_error_metadata.to_exception(e) 1148 else: 1149 raise
ValueError: in user code: ...
When using TextVectorization to tokenize strings, the input rank must be 1 or the last shape dimension must be 1. Received:
inputs.shape=(None, 250) with rank=2
Call arguments received:
• inputs=tf.Tensor(shape=(None, 250), dtype=string)
Edit:
Update:
Data sample looks like this:
text
target
'such a lovely day'
'a'
'not so great'
'b'
'hello world'
'c'
... etc - in total 4 classes
Instead of vectorize_text function I've moved code to custom_standardization and it works now
def custom_standardization(input_data):
new_line_replace = tf.strings.regex_replace(input_data, '\n', ' ')
non_alphanum_replace = tf.strings.regex_replace(new_line_replace, '[^a-zA-Z0-9_ ]', '')
stripped = tf.strings.strip(non_alphanum_replace)
lowercase = tf.strings.lower(stripped)
return tf.expand_dims(tf.strings.regex_replace(lowercase,
'[%s]' % re.escape(string.punctuation),
''), -1)
new issue is target doesn't match - I get error although I one hot encoded variables
ValueError: Shapes (4, 1) and (None, 4) are incompatible
Try changing "categorical_crossentropy" to "sparse_categortical_crossentropy" as described here
I am trying to convert an workbook I did some time ago on Colab (using ImageDataGenerator) to one that uses tf.data.dataset as I now have a multi-gpu set up and am trying to learn how to do faster training. The model trains on the age/ gender/ race dataset from Kaggle but in this instance we're interested in just the sex and age prediction. Sex will either be 0 or 1 and the loss function is binarycrossentropy while age is an integer between 0 and 120 and the loss function is mse at it is regression.
import tensorflow as tf
import os
AUTOTUNE = tf.data.AUTOTUNE
batch_size = 64
#Load datasets from directories
train_gen = tf.data.Dataset.list_files(os.listdir(training_dir), shuffle = False)
valid_gen = tf.data.Dataset.list_files(os.listdir(validation_dir), shuffle = False)
def decode_img(img):
#Convert compressed string into a 3D tensor
img = tf.io.decode_jpeg(img, channels=3)
img = tf.image.convert_image_dtype(img, tf.float32)
#Resize the image to the desired size
return tf.image.resize(img, [128,128])
def get_label(file):
gender = get_sex(file) #returns either 0 or 1
age = get_age(file) #returns interger between 0 and about 120
return gender, age
def process_path(file):
file = file.numpy()
file_path = str(bytes.decode(file))
file = file_path.split(' ')[-1].split("\\")[-1]
labels = get_label(file)
# Load data from file as a String
img = tf.io.read_file(file_path)
img = decode_img(img)
img = img / 255.0
return img, labels
def _set_shapes(t1, t2):
t1.set_shape((128,128,3))
t2.set_shape((2,))
return (t1,t2)
train_gen = train_gen.map(lambda x: tf.py_function(process_path, [x], [tf.float32, tf.int32]), num_parallel_calls=AUTOTUNE)
valid_gen = valid_gen.map(lambda x: tf.py_function(process_path, [x], [tf.float32, tf.int32]), num_parallel_calls=AUTOTUNE)
train_gen = train_gen.map(_set_shapes,num_parallel_calls=AUTOTUNE)
valid_gen = valid_gen.map(_set_shapes, num_parallel_calls=AUTOTUNE)
train_gen = train_gen.batch(batch_size)
valid_gen = valid_gen.batch(batch_size)
train_gen
Output: <BatchDataset shapes: ((None, 128, 128, 3), (None, 2)), types: (tf.float32, tf.int32)>
#configure for performance
def config_for_performance(ds):
ds = ds.cache()
ds = ds.prefetch(buffer_size=AUTOTUNE)
return ds
train_gen = config_for_performance(train_gen)
valid_gen = config_for_performance(valid_gen)
The model itself:
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Dropout, Input, Activation, Flatten, BatchNormalization, PReLU
from tensorflow.keras.regularizers import l2
from tensorflow.keras.losses import BinaryCrossentropy
from keras.callbacks import EarlyStopping, ReduceLROnPlateau
from tensorflow.keras import mixed_precision
mixed_precision.set_global_policy('mixed_float16')
gpus = tf.config.list_logical_devices('GPU')
#print(gpus)
strategy = tf.distribute.MirroredStrategy(gpus,cross_device_ops=tf.distribute.ReductionToOneDevice())
with strategy.scope():
#Define the convolution layers
inp = Input(shape=(128,128,3))
cl1 = Conv2D(32,(3,3), padding='same', kernel_regularizer=l2(0.001), kernel_initializer='he_uniform')(inp)
bn1 = BatchNormalization()(cl1)
pr1 = PReLU(alpha_initializer='he_uniform')(bn1)
cl2 = Conv2D(32,(3,3), padding='same',kernel_regularizer=l2(0.001), kernel_initializer='he_uniform')(pr1)
bn2 = BatchNormalization()(cl2)
pr2 = PReLU(alpha_initializer='he_uniform')(bn2)
mp1 = MaxPool2D((2,2))(pr2)
cl3 = Conv2D(64,(3,3), padding='same',kernel_regularizer=l2(0.001), kernel_initializer='he_uniform')(mp1)
bn3 = BatchNormalization()(cl3)
pr3 = PReLU(alpha_initializer='he_uniform')(bn3)
cl4 = Conv2D(64,(3,3), padding='same',kernel_regularizer=l2(0.001), kernel_initializer='he_uniform')(pr3)
bn4 = BatchNormalization()(cl4)
pr4 = PReLU(alpha_initializer='he_uniform')(bn4)
mp2 = MaxPool2D((2,2))(pr4)
cl5 = Conv2D(128,(3,3), padding='same',kernel_regularizer=l2(0.001), kernel_initializer='he_uniform')(mp2)
bn5 = BatchNormalization()(cl5)
pr5 = PReLU(alpha_initializer='he_uniform')(bn5)
mp3 = MaxPool2D((2,2))(pr5)
cl6 = Conv2D(256,(3,3), padding='same',kernel_regularizer=l2(0.001), kernel_initializer='he_uniform')(mp3)
bn6 = BatchNormalization()(cl6)
pr6 = PReLU(alpha_initializer='he_uniform')(bn6)
mp4 = MaxPool2D((2,2))(pr6)
cl7 = Conv2D(512,(3,3), padding='same',kernel_regularizer=l2(0.001), kernel_initializer='he_uniform')(mp4)
bn7 = BatchNormalization()(cl7)
pr7 = PReLU(alpha_initializer='he_uniform')(bn7)
mp5 = MaxPool2D((2,2))(pr7)
flt = Flatten()(mp5)
#This layer predicts age
agelayer = Dense(128, activation='relu',kernel_regularizer=l2(0.001), kernel_initializer='he_uniform')(flt)
agelayer = BatchNormalization()(agelayer)
agelayer = Dropout(0.6)(agelayer)
agelayer = Dense(1, activation='relu', name='age_output', kernel_initializer='he_uniform', dtype='float32')(agelayer)
#This layer predicts gender
glayer = Dense(128, activation='relu',kernel_regularizer=l2(0.001), kernel_initializer='he_uniform')(flt)
glayer = BatchNormalization()(glayer)
glayer = Dropout(0.5)(glayer)
glayer = Dense(1, activation='sigmoid', name='gender_output', kernel_initializer='he_uniform', dtype='float32')(glayer)
modelA = Model(inputs=inp, outputs=[glayer,agelayer])
model_folder = 'C:/Users/mm/OneDrive/Documents/Age estimation & gender classification/models'
if not os.path.exists(model_folder):
os.mkdir(model_folder)
#Callback to control learning rate during training. Reduces learning rate by 5% after 3 epochs of no improvement on validation loss
lr_callback = ReduceLROnPlateau(monitor='val_loss', factor=0.95, patience=3,min_lr=0.000005)
#Callback to stop training if after 100 epochs of no improvement it stops and restores the best weights
es_callback = EarlyStopping(monitor='val_loss', patience=100, restore_best_weights=True, min_delta=0.001)
#Compile Model A
modelA.compile(optimizer='Adam', loss={'gender_output': BinaryCrossentropy(), 'age_output': 'mse'}, metrics={'gender_output': 'accuracy', 'age_output':'mae'})
#Training Model A
history = modelA.fit(train_gen, epochs=100, validation_data=valid_gen, callbacks=[es_callback,lr_callback])
The error message:
INFO:tensorflow:Using MirroredStrategy with devices ('/job:localhost/replica:0/task:0/device:GPU:0', '/job:localhost/replica:0/task:0/device:GPU:1')
Epoch 1/100
INFO:tensorflow:Error reported to Coordinator: logits and labels must have the same shape ((None, 1) vs (None, 2))
Traceback (most recent call last):
File "C:\Users\mm\AppData\Roaming\Python\Python39\site-packages\tensorflow\python\ops\nn_impl.py", line 130, in sigmoid_cross_entropy_with_logits
labels.get_shape().assert_is_compatible_with(logits.get_shape())
File "C:\Users\mm\AppData\Roaming\Python\Python39\site-packages\tensorflow\python\framework\tensor_shape.py", line 1161, in assert_is_compatible_with
raise ValueError("Shapes %s and %s are incompatible" % (self, other))
ValueError: Shapes (None, 2) and (None, 1) are incompatible
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\mm\AppData\Roaming\Python\Python39\site-packages\tensorflow\python\training\coordinator.py", line 297, in stop_on_exception
yield
File "C:\Users\mm\AppData\Roaming\Python\Python39\site-packages\tensorflow\python\distribute\mirrored_run.py", line 346, in run
self.main_result = self.main_fn(*self.main_args, **self.main_kwargs)
File "C:\Users\mm\AppData\Roaming\Python\Python39\site-packages\tensorflow\python\autograph\impl\api.py", line 692, in wrapper
return converted_call(f, args, kwargs, options=options)
File "C:\Users\mm\AppData\Roaming\Python\Python39\site-packages\tensorflow\python\autograph\impl\api.py", line 382, in converted_call
return _call_unconverted(f, args, kwargs, options)
File "C:\Users\mm\AppData\Roaming\Python\Python39\site-packages\tensorflow\python\autograph\impl\api.py", line 463, in _call_unconverted
return f(*args, **kwargs)
File "C:\Users\mm\AppData\Roaming\Python\Python39\site-packages\keras\engine\training.py", line 835, in run_step
outputs = model.train_step(data)
show more (open the raw output data in a text editor) ...
File "C:\Users\mm\AppData\Roaming\Python\Python39\site-packages\tensorflow\python\util\dispatch.py", line 206, in wrapper
return target(*args, **kwargs)
File "C:\Users\mm\AppData\Roaming\Python\Python39\site-packages\tensorflow\python\ops\nn_impl.py", line 132, in sigmoid_cross_entropy_with_logits
raise ValueError("logits and labels must have the same shape (%s vs %s)" %
ValueError: logits and labels must have the same shape ((None, 1) vs (None, 2))
Managed to resove this with a bit of research and trial and error. Main issues are:
The labels are being fed to the model as a tuple instead of being separated. When it is multiple output heads this is necessary:
def process_path(file):
file = file.numpy()
file_path = str(bytes.decode(file))
file = file_path.split("\\")[-1]
gender, age = get_label(file)
# Load data from file as a String
img = tf.io.read_file(file_path)
img = decode_img(img)
img = img / 255.0
return img, gender, age
NB: I made a modification to extracting the labels from the filename as it wasn't getting it right all the time:
file = file_path.split("\\")[-1]
Due to the change on 1, the map functions need an additional dtype for the other label, so it becomes:
train_gen = train_gen.map(lambda x: tf.py_function(process_path, [x], [tf.float32, tf.int32, tf.int32]), num_parallel_calls=AUTOTUNE)
valid_gen = valid_gen.map(lambda x: tf.py_function(process_path, [x], [tf.float32, tf.int32, tf.int32]), num_parallel_calls=AUTOTUNE)
Each label needs to be reshaped:
def _set_shapes(t1, t2, t3):
t1.set_shape((128,128,3))
t2.set_shape((1,))
t3.set_shape((1,))
t2 = tf.reshape(t2, [-1,1])
t3 = tf.reshape(t3, [-1,1])
return (t1,t2,t3)
I am trying to train a model for real disaster tweets prediction(Kaggle Competition) using the Hugging face bert model for classification of the tweets.
I have followed many tutorials and have used many models of bert but none could run in COlab and thros the error
My Code is:
!pip install transformers
import tensorflow as tf
import numpy as np
import pandas as pd
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.keras.callbacks import ModelCheckpoint
from transformers import DistilBertTokenizer, RobertaTokenizer
train = pd.read_csv("/content/drive/My Drive/Kaggle_disaster/train.csv")
test = pd.read_csv("/content/drive/My Drive/Kaggle_disaster/test.csv")
roberta = 'distilbert-base-uncased'
tokenizer = DistilBertTokenizer.from_pretrained(roberta, do_lower_case = True, add_special_tokens = True, max_length = 128, pad_to_max_length = True)
def tokenize(sentences, tokenizer):
input_ids, input_masks, input_segments = [], [], []
for sentence in sentences:
inputs = tokenizer.encode_plus(sentence, add_special_tokens = True, max_length = 128, pad_to_max_length = True, return_attention_mask = True, return_token_type_ids = True)
input_ids.append(inputs['input_ids'])
input_masks.append(inputs['attention_mask'])
input_segments.append(inputs['token_type_ids'])
return np.asarray(input_ids, dtype = "int32"), np.asarray(input_masks, dtype = "int32"), np.asarray(input_segments, dtype = "int32")
input_ids, input_masks, input_segments = tokenize(train.text.values, tokenizer)
from transformers import TFDistilBertForSequenceClassification, DistilBertConfig, TFDistilBertModel
distil_bert = 'distilbert-base-uncased'
config = DistilBertConfig(dropout=0.2, attention_dropout=0.2)
config.output_hidden_states = False
transformer_model = TFDistilBertModel.from_pretrained(distil_bert, config = config)
input_ids_in = tf.keras.layers.Input(shape=(128,), name='input_token', dtype=tf.int32)
input_masks_in = tf.keras.layers.Input(shape=(128,), name='masked_token', dtype=tf.int32)
embedding_layer = transformer_model(input_ids_in, attention_mask=input_masks_in)[0]
X = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(50, return_sequences=True, dropout=0.1, recurrent_dropout=0.1))(embedding_layer)
X = tf.keras.layers.GlobalMaxPool1D()(X)
X = tf.keras.layers.Dense(50, activation='relu')(X)
X = tf.keras.layers.Dropout(0.2)(X)
X = tf.keras.layers.Dense(1, activation='sigmoid')(X)
model = tf.keras.Model(inputs=[input_ids_in, input_masks_in], outputs = X)
model.compile(Adam(lr = 1e-5), loss = 'binary_crossentropy', metrics = ['accuracy'])
for layer in model.layers[:3]:
layer.trainable = False
bert_input = [
input_ids,
input_masks
]
checkpoint = ModelCheckpoint('/content/drive/My Drive/disaster_model/model_hugging_face.h5', monitor = 'val_loss', save_best_only= True)
train_history = model.fit(
bert_input,
validation_split = 0.2,
batch_size = 16,
epochs = 10,
callbacks = [checkpoint]
)
On running the above code in colab I get the following error:
Epoch 1/10
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-91-9df711c91040> in <module>()
9 batch_size = 16,
10 epochs = 10,
---> 11 callbacks = [checkpoint]
12 )
10 frames
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
966 except Exception as e: # pylint:disable=broad-except
967 if hasattr(e, "ag_error_metadata"):
--> 968 raise e.ag_error_metadata.to_exception(e)
969 else:
970 raise
ValueError: in user code:
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py:571 train_function *
outputs = self.distribute_strategy.run(
/usr/local/lib/python3.6/dist-packages/tensorflow/python/distribute/distribute_lib.py:951 run **
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/distribute/distribute_lib.py:2290 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/distribute/distribute_lib.py:2649 _call_for_each_replica
return fn(*args, **kwargs)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py:541 train_step **
self.trainable_variables)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py:1804 _minimize
trainable_variables))
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:521 _aggregate_gradients
filtered_grads_and_vars = _filter_grads(grads_and_vars)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:1219 _filter_grads
([v.name for _, v in grads_and_vars],))
ValueError: No gradients provided for any variable: ['tf_distil_bert_model_23/distilbert/embeddings/word_embeddings/weight:0', 'tf_distil_bert_model_23/distilbert/embeddings/position_embeddings/embeddings:0', 'tf_distil_bert_model_23/distilbert/embeddings/LayerNorm/gamma:0', 'tf_distil_bert_model_23/distilbert/embeddings/LayerNorm/beta:0', 'tf_distil_bert_model_23/distilbert/transformer/layer_._0/attention/q_lin/kernel:0', 'tf_distil_bert_model_23/distilbert/transformer/layer_._0/attention/q_lin/bias:0', 'tf_distil_bert_model_23/distilbert/transformer/layer_._0/attention/k_lin/kernel:0', 'tf_distil_bert_model_23/distilbert/transformer/layer_._0/attention/k_lin/bias:0', 'tf_distil_bert_model_23/distilbert/transformer/layer_._0/attention/v_lin/kernel:0', 'tf_distil_bert_model_23/distilbert/transformer/layer_._0/attention/v_lin/bias:0', 'tf_distil_bert_model_23/distilbert/transformer/layer_._0/attention/out_lin/kernel:0', 'tf_distil_bert_model_23/distilbert/transformer/layer_._0/attention/out_lin/bias:0', 'tf_distil_bert_model_23/distilbert/transformer/layer_._0/sa_layer_norm/gamma:0', 'tf_distil_bert_model_23/distilbert/transformer/layer_._0/sa_layer_norm/beta:0', 'tf_distil_bert_model_23/distilbert/transformer/layer_._0/ffn/lin1/kernel:0', 'tf_distil_bert_model_23/distilbert/transformer/layer_._0/ffn/lin1/bias:0', 'tf_distil_bert_model_23/distilbert/transformer/layer_._0/ffn/lin2/kernel:0', 'tf_distil_bert_model_23/distilbert/transformer/layer_._0/ffn/lin2/bias:0', 'tf_...
Follow this tutorial on Text classification using BERT: https://pysnacks.com/machine-learning/bert-text-classification-with-fine-tuning/
It has working code on Google Colab(using GPU) and Kaggle for binary, multi-class and multi-label text classification using BERT.
Hope that helps.
You need to use gpu.
Try this=
with torch.no_grad():