Tensorflow 2 /Google Colab / EfficientNet Training - AttributeError: 'Node' object has no attribute 'output_masks' - tensorflow

I am trying to train EfficientNetB1 on Google Colab and constantly running into different issues with correct import statements from Keras or Tensorflow.Keras, currently this is how my imports look like
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.layers.pooling import AveragePooling2D
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import SGD
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import argparse
import pickle
import cv2
import os
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
import efficientnet.keras as enet
from tensorflow.keras.layers import Dense, Dropout, Activation, BatchNormalization, Flatten, Input
and this is how my model looks like
load the ResNet-50 network, ensuring the head FC layer sets are left
# off
baseModel = enet.EfficientNetB1(weights="imagenet", include_top=False, input_tensor=Input(shape=(224, 224, 3)), pooling='avg')
# Adding 2 fully-connected layers to B0.
x = baseModel.output
x = BatchNormalization()(x)
x = Dropout(0.7)(x)
x = Dense(512)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dropout(0.5)(x)
x = Dense(512)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
# Output layer
predictions = Dense(len(lb.classes_), activation="softmax")(x)
model = Model(inputs = baseModel.input, outputs = predictions)
# loop over all layers in the base model and freeze them so they will
# *not* be updated during the training process
for layer in baseModel.layers:
layer.trainable = False
But for the life of me I can't figure out why I am getting the below error
AttributeError Traceback (most recent call last)
<ipython-input-19-269fe6fc6f99> in <module>()
----> 1 baseModel = enet.EfficientNetB1(weights="imagenet", include_top=False, input_tensor=Input(shape=(224, 224, 3)), pooling='avg')
2
3 # Adding 2 fully-connected layers to B0.
4 x = baseModel.output
5 x = BatchNormalization()(x)
5 frames
/usr/local/lib/python3.6/dist-packages/keras/engine/base_layer.py in _collect_previous_mask(input_tensors)
1439 inbound_layer, node_index, tensor_index = x._keras_history
1440 node = inbound_layer._inbound_nodes[node_index]
-> 1441 mask = node.output_masks[tensor_index]
1442 masks.append(mask)
1443 else:
AttributeError: 'Node' object has no attribute 'output_masks'

The problem is the way you import the efficientnet.
You import it from the Keras package and not from the TensorFlow.Keras package.
Change your efficientnet import to
import efficientnet.tfkeras as enet

Not sure, but this error maybe caused by wrong TF version. Google Colab for now comes with TF 1.x by default. Try this to change the TF version and see if this resolves the issue.
try:
%tensorflow_version 2.x
except:
print("Failed to load")

Related

Has the "ConvNeXt" family of models been removed from Keras?

When trying to use the ConvNeXtTiny model from Keras, I get the following error: AttributeError: module 'keras.applications' has no attribute 'ConvNeXtTiny'
filename = "ConvNextTiny_firstpass_model"
# layer construction
base_model = applications.ConvNeXtTiny( #preproccing included
input_shape=(targetWidth, targetHeight, 3),
include_top=False,
)
base_model.trainable = False
flatten_layer = layers.Flatten()
fc_layer = layers.Dense(1024, activation='relu')
dropout_layer = layers.Dropout(0.3)
#layer connecting
x = flip_layer(input_layer)
x = base_model(x, training=False)
x = flatten_layer(x)
x = fc_layer(x)
x = dropout_layer(x)
predictions = output_layer(x)
model = keras.Model(input_layer, predictions)
Here are my imports:
import tensorflow as tf
import keras
from keras import layers
from keras import optimizers
from keras import applications
from keras import losses
from keras import callbacks
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import cv2 as cv
import csv
from sklearn.utils import shuffle
Possibly relevant versioning:
ipython==8.5.0
tensorflow==2.10.0
keras==2.10.0
Keras-Preprocessing==1.1.2
pandas==1.4.4
numpy==1.23.3
matplotlib==3.6.0
opencv-python==4.6.0.66
sklearn==0.0
The previous imports placed above the convnext import were causing issues.
Moving from tensorflow.keras.applications import convnext to the top of all the imports allowed it to import properly.

Replacing of weights with set_weights or any other method

I am using tensorflow federated with following imports.
import tensorflow as tf
import tensorflow_federated as tff
import collections
import os
import random
import math
import time
import numpy as np
from numpy import sqrt
from numpy.fft import fft, ifft
from numpy.random import rand
import inspect
import tensorflow_probability as tfp
from matplotlib import pyplot as plt
from tensorflow.keras.models import Model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import BatchNormalization, AveragePooling2D, MaxPooling2D, Conv2D, Activation, Dropout,Flatten,Input,Dense,concatenate
from tensorflow.keras import layers, initializers
from tensorflow.python.eager import backprop, context, function
from tensorflow.python.framework import constant_op, dtypes, indexed_slices, ops
from tensorflow.python.ops import embedding_ops, math_ops, resource_variable_ops, resources, variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
Consider the following keras model
def create_keras_model():
return tf.keras.models.Sequential([
tf.keras.layers.Conv2D(filters=64, kernel_size=[5, 5],name='conv2d_1',activation=tf.nn.relu, use_bias=True, bias_initializer =tf.initializers.lecun_normal(seed=137), input_shape=(28 ,28 ,1)),
tf.keras.layers.MaxPool2D(pool_size=[2,2], strides=2),
tf.keras.layers.Conv2D(filters=32, kernel_size=[5,5 ],name='conv2d_2',activation=tf.nn.relu, use_bias = True, bias_initializer=tf.initializers.lecun_normal(seed=137)),
tf.keras.layers.MaxPool2D(pool_size=[2,2], strides=2),
tf.keras.layers.Reshape(target_shape=(4 * 4 * 32,)),
tf.keras.layers.Dense(units= 150, activation=tf.nn.relu, use_bias=True, bias_initializer=tf.initializers.lecun_normal(seed=137), name='dense_1'),
tf.keras.layers.Dense(units=10 , use_bias=True, bias_initializer=tf.initializers.lecun_normal(seed=137), activation=tf.nn.softmax, name='dense_2' ),
])
I made an instance of the create_keras_model, i.e.,
net_1 = create_keras_model()
I then call the following function
def model_fn():
# We _must_ create a new model here, and _not_ capture it from an external
# scope. TFF will call this within different graph contexts.
global_model = create_keras_model()
global_model.set_weights(net_1.get_weights())
return tff.learning.from_keras_model(
global_model,
input_spec=preprocessed_example_dataset.element_spec,
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
Following that, I call upon the iterative process
iterative_process = tff.learning.algorithms.build_weighted_fed_avg(
model_fn,
client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.02),
server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.00))
Which gives the following error
AttributeError Traceback (most recent call last)
<ipython-input-31-777247538e22> in <module>
2 model_fn,
3 client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.02),
----> 4 server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.00))
5 frames
/usr/local/lib/python3.7/dist-packages/keras/engine/training_v1.py in get_weights(self)
155 """
156 strategy = (self._distribution_strategy or
--> 157 self._compile_time_distribution_strategy)
158 if strategy:
159 with strategy.scope():
AttributeError: 'Sequential' object has no attribute '_compile_time_distribution_strategy'
Any suggestion for removing the error?

Keras and tensorflow conflict when transfer learning on MobileNetV3

I'm trying to do transfer learning with MobileNetV3 in Keras but I'm having some issues.
from keras.models import Model
from keras.layers import GlobalMaxPooling2D, Dense, Dropout
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from tensorflow.keras.applications import MobileNetV3Small
import numpy as np
from tqdm import tqdm
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
pretrained_model = MobileNetV3Small(input_shape=(224,224,3),
weights="imagenet",
include_top=False)
# freeze all layers except the last one
for layer in pretrained_model.layers:
layer.trainable = False
pretrained_model.layers[-1].trainable = True
# combine the model with some extra layers for classification
last_output = pretrained_model.layers[-1].output
x = GlobalMaxPooling2D()(last_output)
x = Dense(128, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(1, activation='sigmoid')(x)
model = Model(pretrained_model.input, x)
I get this error when I try to make the Dense layer:
TypeError: Cannot convert a symbolic Keras input/output to a numpy array. This error may indicate that you're trying to pass a symbolic value to a NumPy call, which is not supported. Or, you may be trying to pass Keras symbolic inputs/outputs to a TF API that does not register dispatching, preventing Keras from automatically converting the API call to a lambda layer in the Functional Model.
but it's fixed by adding the following code snippet:
from tensorflow.python.framework.ops import disable_eager_execution
disable_eager_execution()
When I include the code fix above, I get this error when I call model.fit():
FailedPreconditionError: 2 root error(s) found.
(0) Failed precondition: Could not find variable Conv_1_2/kernel. This could mean that the variable has been deleted. In TF1, it can also mean the variable is uninitialized. Debug info: container=localhost, status=Not found: Resource localhost/Conv_1_2/kernel/N10tensorflow3VarE does not exist.
[[{{node Conv_1_2/Conv2D/ReadVariableOp}}]]
[[_arg_dense_12_target_0_1/_100]]
(1) Failed precondition: Could not find variable Conv_1_2/kernel. This could mean that the variable has been deleted. In TF1, it can also mean the variable is uninitialized. Debug info: container=localhost, status=Not found: Resource localhost/Conv_1_2/kernel/N10tensorflow3VarE does not exist.
[[{{node Conv_1_2/Conv2D/ReadVariableOp}}]]
0 successful operations.
0 derived errors ignored.
How can I fix these issues and train the model?
From comments
Don't mix tf.keras and standalone keras. They are not compatible. Only use one of them (paraphrased from Frightera)
Working code as shown below
from tensorflow.keras.models import Model
from tensorflow.keras.layers import GlobalMaxPooling2D, Dense, Dropout
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.applications import MobileNetV3Small
import numpy as np
from tqdm import tqdm
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
pretrained_model = MobileNetV3Small(input_shape=(224,224,3),
weights="imagenet",
include_top=False)
# freeze all layers except the last one
for layer in pretrained_model.layers:
layer.trainable = False
pretrained_model.layers[-1].trainable = True
# combine the model with some extra layers for classification
last_output = pretrained_model.layers[-1].output
x = GlobalMaxPooling2D()(last_output)
x = Dense(128, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(1, activation='sigmoid')(x)
model = Model(pretrained_model.input, x)

Stacking Classifier doesn't recognize Keras

I'm using a StackingClassifier on 5 scikit-learn classifiers and a Keras one. It doesn't seem to recognize the Keras one as a classifier however.
Relevant code:
from tensorflow.keras import layers
from tensorflow import keras
from keras.constraints import maxnorm
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Input
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from tensorflow.keras import metrics
import joblib
from joblib import parallel_backend
np.random.seed(42)
from sklearn.model_selection import GridSearchCV
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
import sklearn
from sklearn.ensemble import StackingClassifier
def create_model ():
# create model
model = Sequential()
model.add(Dense(best_neurons, input_shape=(X_train.shape[1],), kernel_initializer=best_init_mode, activation='relu',
kernel_constraint=maxnorm(best_weight_constraint)))
model.add(Dropout(best_dropout_rate))
model.add(Flatten())
optimizer= tf.keras.optimizers.RMSprop(lr=best_learn_rate)
model.add(Dense(units = 1, kernel_initializer=best_init_mode, activation = 'sigmoid')) # Compile model
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=[keras.metrics.AUC(), 'accuracy'])
return model
NN_clf=KerasClassifier(build_fn=create_model, epochs=best_epochs, batch_size= best_batch_size)
RF_clf =RandomForestClassifier(max_depth=best_max_depth_rf, n_estimators=best_n_estimators_rf,
min_samples_leaf=best_min_samples_leaf_rf, max_features=best_max_features_rf,
class_weight=best_class_weight_rf, max_samples=best_max_samples_rf,
random_state=42, oob_score=True)
KN_clf =KNeighborsClassifier(n_neighbors=best_n_neighbors, p=best_p, leaf_size=best_leaf_size )
#DT_clf = DecisionTreeClassifier(max_depth=best_max_depth_dt, min_samples_leaf=best_min_samples_leaf_dt)
SV_clf = SVC(gamma=best_gamma_sv, C=best_c_sv, kernel=best_kernel_sv, random_state=42, probability=True)
GBC_clf = xgb.XGBClassifier(learning_rate=best_learning_rate_gbc, random_state=42, colsample_bytree=best_colsample_bytree_gbc,
max_depth=best_max_depth_gbc, n_estimators=best_n_estimators_gbc,
gamma=best_gamma_gbc, subsample=best_subsample_gbc)
EX_clf= ExtraTreesClassifier(max_depth=best_max_depth_ex, n_estimators=best_n_estimators_ex,
min_samples_leaf=best_min_samples_leaf_ex, max_features=best_max_features_ex,
warm_start=False, oob_score=True, bootstrap=True, random_state=42)
LR_clf=LogisticRegression(random_state=42, solver=best_solver, penalty=best_penalty, class_weight=best_class_weight, C=best_log_C)
estimators= [('RF', RF_clf), ('GBC', GBC_clf), ('EX', EX_clf), ('LR',LR_clf), ('KN', KN_clf),
('SV', SV_clf), ('NN', NN_clf) ]
clf = StackingClassifier(estimators=estimators, final_estimator=LogisticRegression(), n_jobs=-1)
clf.fit(X_train, y_train.values.ravel())
print("Stacking model score: %.3f" % clf.score(X_test, y_test.values.ravel()))
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-41-272df6aa838e> in <module>
2 ('SV', SV_clf), ('NN', NN_clf) ]
3 clf = StackingClassifier(estimators=estimators, final_estimator=LogisticRegression(), n_jobs=-1)
----> 4 clf.fit(X_train, y_train.values.ravel())
5 print("Stacking model score: %.3f" % clf.score(X_test, y_test.values.ravel()))
~\Anaconda3\lib\site-packages\sklearn\ensemble\_stacking.py in fit(self, X, y, sample_weight)
411 self._le = LabelEncoder().fit(y)
412 self.classes_ = self._le.classes_
--> 413 return super().fit(X, self._le.transform(y), sample_weight)
414
415 #if_delegate_has_method(delegate='final_estimator_')
~\Anaconda3\lib\site-packages\sklearn\ensemble\_stacking.py in fit(self, X, y, sample_weight)
129 # all_estimators contains all estimators, the one to be fitted and the
130 # 'drop' string.
--> 131 names, all_estimators = self._validate_estimators()
132 self._validate_final_estimator()
133
~\Anaconda3\lib\site-packages\sklearn\ensemble\_base.py in _validate_estimators(self)
247 raise ValueError(
248 "The estimator {} should be a {}.".format(
--> 249 est.__class__.__name__, is_estimator_type.__name__[3:]
250 )
251 )
ValueError: The estimator KerasClassifier should be a classifier.
I am using Sci-kit learn versions 2.2, TF ver 2.x. I've seen a similar error here but didn't want to rewrite my code and use the MLextend library.
This problem is because of the similar issue reported here for VotingClassifier.
The solution is just adding this _estimator_type='classifier' to KerasClassifier.
Note: please provide just the minimum code to reproduce your issue.
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.linear_model import LogisticRegression
import numpy as np
from tensorflow.keras import layers
from tensorflow import keras
from keras.constraints import maxnorm
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Input
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from tensorflow.keras import metrics
import joblib
from joblib import parallel_backend
np.random.seed(42)
from sklearn.model_selection import GridSearchCV
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
import sklearn
from sklearn.ensemble import StackingClassifier
from sklearn.neighbors import KNeighborsClassifier
def create_model ():
# create model
model = Sequential()
model.add(Dense(20, input_dim=20, activation='relu'))
model.add(Dropout(0.2))
model.add(Flatten())
optimizer= keras.optimizers.RMSprop(lr=0.001)
model.add(Dense(units = 1, activation = 'sigmoid')) # Compile model
model.compile(loss='binary_crossentropy',
optimizer=optimizer, metrics=[keras.metrics.AUC(), 'accuracy'])
return model
NN_clf=KerasClassifier(build_fn=create_model, epochs=15, batch_size= 32)
NN_clf._estimator_type = "classifier"
RF_clf =RandomForestClassifier(random_state=42, oob_score=True)
KN_clf =KNeighborsClassifier()
SV_clf = SVC(random_state=42, probability=True)
EX_clf= ExtraTreesClassifier(random_state=42)
LR_clf=LogisticRegression(random_state=42,)
estimators= [('RF', RF_clf), ('EX', EX_clf), ('LR',LR_clf), ('KN', KN_clf),
('SV', SV_clf), ('NN', NN_clf) ]
clf = StackingClassifier(estimators=estimators, final_estimator=LogisticRegression())
X, y = make_classification()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train , y_test = train_test_split(X, y, test_size=0.3)
clf.fit(X_train, y_train)
print("Stacking model score: %.3f" % clf.score(X_test, y_test))
# Stacking model score: 0.967

what is the corresponding function of K.gradients for tensorflow 2.0?

I want to visualize the classification result with tensorflow2.0. For keras, it need the following code for cam:
import tensorflow as tf
import keras.backend as K
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg16 import preprocess_input, decodpredictions
import numpy as np
import cv2
img_path = 'image/test.jpg'
model = VGG16(weights='imagenet')
img = image.load_img('image/test.jpg', target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
print('Predicted:', decode_predictions(preds, top=3)[0])
print np.argmax(preds[0])
african_elephant_output = model.output[:, 386]
last_conv_layer = model.get_layer('block5_conv3')
grads = K.gradients(african_elephant_output, last_conv_layer.output)[0]
But when I use tensorflow2.0, it seem no such gradient function. So what is the corresponding function for K.gradients for tensorflow2.0?
Here:
import keras.backend as K
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg16 import preprocess_input, decodpredictions
You are mixing the keras and tf.keras packages, which are NOT compatible with each other. You should import backend from tf.keras:
import tensorflow.keras.backend as K