multi_gpu_model : object of type 'NoneType' has no len() - tensorflow

I am getting this error while using keras multi_gpu_model. The code run fines if I eliminate this line. Also, with CNN model it works fines, it's just that while dense network it gives the error. Could you please help me to solve this issue. Thanks.
import numpy as np
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import LSTM, BatchNormalization,Flatten
from keras.utils.vis_utils import model_to_dot
from keras.optimizers import adam
from keras.models import load_model
import pylab
from sklearn.model_selection import train_test_split
from keras.utils import multi_gpu_model
from scipy.io import wavfile
X=np.ones(10000)
y=np.zeros(100)
x_train=X
y_train=y
x_train=np.array(x_train)
y_train=np.array(y_train)
x_train.shape=(1,10000)
y_train.shape=(1,100)
model = Sequential()
model.add(Dense(500,activation = 'tanh'))
model.add(Dense(450, activation = 'tanh'))
model.add(Dense(412, activation = 'tanh'))
model.add(Dense(100, activation = 'tanh'))
opt = adam(lr=0.002, decay=1e-6)
model = multi_gpu_model(model, gpus=4)
model.compile(loss='mae', optimizer=opt, metrics=['accuracy'])
model.fit(x_train,y_train,epochs=50, batch_size = 40000)
Error: Traceback (most recent call last):
File "p.py", line 37, in <module>
model = multi_gpu_model(model, gpus=4)
File "/home/ENG/benipas1/anaconda3/envs/new/lib/python3.7/site-packages/keras/utils/multi_gpu_utils.py", line 203, in multi_gpu_model
for i in range(len(model.outputs)):
TypeError: object of type 'NoneType' has no len()

The problem is here:
model = Sequential()
model.add(Dense(500,activation = 'tanh'))
You are not giving an input shape to the first layer, so the outputs of the model are completely undefined and model.outputs is None. If you provide the input shape to the first layer, then the outputs are defined and it should work fine. You are probably providing the input shape to your CNN models and that is why it works:
model.add(Dense(500,activation = 'tanh', input_shape=(something,)))

Related

Getting error even after enabling eager execution

I am working on tensorflow federated.
I have the following imports
!pip install --quiet tensorflow-federated==0.20.0 # The latest version of tensorflow-federated is not working with the colab python version
!pip install --quiet --upgrade nest-asyncio
import nest_asyncio
nest_asyncio.apply()
%load_ext tensorboard
tf.compat.v1.enable_eager_execution()
import tensorflow as tf
import tensorflow_federated as tff
import collections
import os
import random
import math
import time
import numpy as np
from numpy import sqrt
from numpy.fft import fft, ifft
from numpy.random import rand
import inspect
import tensorflow_probability as tfp
from matplotlib import pyplot as plt
from tensorflow.keras.models import Model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import concatenate
from tensorflow.keras import initializers
from keras import layers, initializers
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
Consider the following model
def create_model():
x_1=tf.Variable(24)
bias_initializer = tf.keras.initializers.HeNormal()
model = Sequential()
model.add(Conv2D(2, (5, 5), input_shape=(28,28,1),activation="relu", name='conv2d_1', use_bias=True,bias_initializer=bias_initializer))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(1, (5, 5), activation="relu",name='conv2d_2', use_bias=True,bias_initializer=bias_initializer))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(2, name='dense_1',activation="relu", use_bias=True,bias_initializer=bias_initializer),)
model.add(Dense(10, name='dense_2', activation="softmax", use_bias=True,bias_initializer=bias_initializer),)
a=model.weights[0]
b=model.weights[1]
c=model.weights[2]
d=model.weights[3]
e=model.weights[4]
f=model.weights[5]
g=model.weights[6]
h=model.weights[7]
print(h)
print(type(a))
L1,B1,L2,B2,L3,B3,L4,B4=processing_work(a,b,c,d,e,f,g,h,x_1)
print('L1 is',L1)
print(type(L1))
print(type(h))
kk=resource_variable_ops.ResourceVariable(L1)
print(type(kk))
KB=resource_variable_ops.ResourceVariable(B1)
print(type(KB))
L1=tf.Variable(L1, dtype='float32')#, name='conv2d_1/kernel:0')
B1=tf.Variable(B1, dtype='float32')#, name='conv2d_1/bias:0')
L2=tf.Variable(L2, dtype='float32')#, name='conv2d_2/kernel:0')
B2=tf.Variable(B2, dtype='float32')#, name='conv2d_2/bias:0')
L3=tf.Variable(L3, dtype='float32')#, name='dense_1/kernel:0')
B3=tf.Variable(B3, dtype='float32')#, name='dense_1/bias:0')
L4=tf.Variable(L4, dtype='float32')#, name='dense_2/kernel:0')
B4=tf.Variable(B4, dtype='float32')#, name='dense_2/bias:0')
model.get_layer('conv2d_1').set_weights([L1,B1])
model.get_layer('conv2d_2').set_weights([L2,B2])
model.get_layer('dense_1').set_weights([L3,B3])
model.get_layer('dense_2').set_weights([L4,B4])
return model
What I am doing in this model is; extracting the weights and biases of all the layers, performing various operations on them and re-assigning the processed/modified weights and biases to their respective layers.
I make an instance of the model here:
def model_fn():
# We _must_ create a new model here, and _not_ capture it from an external
# scope. TFF will call this within different graph contexts.
local_model = create_model()
return tff.learning.from_keras_model(
local_model,
input_spec=preprocessed_example_dataset.element_spec,
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
I then call the following algo:
iterative_process = tff.learning.algorithms.build_weighted_fed_avg(
model_fn, client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.02),
server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.00))
However, I get the following error.
<tf.Variable 'dense_2/bias:0' shape=(10,) dtype=float32>
<class 'tensorflow.python.ops.resource_variable_ops.ResourceVariable'>
<function reshape at 0x7efde367c830>
L1 is Tensor("Reshape_41:0", shape=(5, 5, 1, 2), dtype=float32)
<class 'tensorflow.python.framework.ops.Tensor'>
<class 'tensorflow.python.ops.resource_variable_ops.ResourceVariable'>
<class 'tensorflow.python.ops.resource_variable_ops.ResourceVariable'>
<class 'tensorflow.python.ops.resource_variable_ops.ResourceVariable'>
---------------------------------------------------------------------------
NotImplementedError Traceback (most recent call last)
<ipython-input-33-e5ea47468ee2> in <module>
1 iterative_process = tff.learning.algorithms.build_weighted_fed_avg(
2 model_fn, client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.02),
----> 3 server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.00))
8 frames
/usr/local/lib/python3.7/dist-packages/keras/backend.py in batch_set_value(tuples)
4024 feed_dict = {}
4025 for x, value in tuples:
-> 4026 value = np.asarray(value, dtype=dtype_numpy(x))
4027 tf_dtype = tf.as_dtype(x.dtype.name.split('_')[0])
4028 if hasattr(x, '_assign_placeholder'):
NotImplementedError: numpy() is only available when eager execution is enabled.
I have tried both types , i.e., L1 and B1 and kk and KB in
model.get_layer('conv2d_1').set_weights([L1,B1])
But I am getting the same error. Also at the start of the notebook, I added the following
tf.compat.v1.enable_eager_execution()
What might be causing this error?

Replacing of weights with set_weights or any other method

I am using tensorflow federated with following imports.
import tensorflow as tf
import tensorflow_federated as tff
import collections
import os
import random
import math
import time
import numpy as np
from numpy import sqrt
from numpy.fft import fft, ifft
from numpy.random import rand
import inspect
import tensorflow_probability as tfp
from matplotlib import pyplot as plt
from tensorflow.keras.models import Model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import BatchNormalization, AveragePooling2D, MaxPooling2D, Conv2D, Activation, Dropout,Flatten,Input,Dense,concatenate
from tensorflow.keras import layers, initializers
from tensorflow.python.eager import backprop, context, function
from tensorflow.python.framework import constant_op, dtypes, indexed_slices, ops
from tensorflow.python.ops import embedding_ops, math_ops, resource_variable_ops, resources, variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
Consider the following keras model
def create_keras_model():
return tf.keras.models.Sequential([
tf.keras.layers.Conv2D(filters=64, kernel_size=[5, 5],name='conv2d_1',activation=tf.nn.relu, use_bias=True, bias_initializer =tf.initializers.lecun_normal(seed=137), input_shape=(28 ,28 ,1)),
tf.keras.layers.MaxPool2D(pool_size=[2,2], strides=2),
tf.keras.layers.Conv2D(filters=32, kernel_size=[5,5 ],name='conv2d_2',activation=tf.nn.relu, use_bias = True, bias_initializer=tf.initializers.lecun_normal(seed=137)),
tf.keras.layers.MaxPool2D(pool_size=[2,2], strides=2),
tf.keras.layers.Reshape(target_shape=(4 * 4 * 32,)),
tf.keras.layers.Dense(units= 150, activation=tf.nn.relu, use_bias=True, bias_initializer=tf.initializers.lecun_normal(seed=137), name='dense_1'),
tf.keras.layers.Dense(units=10 , use_bias=True, bias_initializer=tf.initializers.lecun_normal(seed=137), activation=tf.nn.softmax, name='dense_2' ),
])
I made an instance of the create_keras_model, i.e.,
net_1 = create_keras_model()
I then call the following function
def model_fn():
# We _must_ create a new model here, and _not_ capture it from an external
# scope. TFF will call this within different graph contexts.
global_model = create_keras_model()
global_model.set_weights(net_1.get_weights())
return tff.learning.from_keras_model(
global_model,
input_spec=preprocessed_example_dataset.element_spec,
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
Following that, I call upon the iterative process
iterative_process = tff.learning.algorithms.build_weighted_fed_avg(
model_fn,
client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.02),
server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.00))
Which gives the following error
AttributeError Traceback (most recent call last)
<ipython-input-31-777247538e22> in <module>
2 model_fn,
3 client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.02),
----> 4 server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.00))
5 frames
/usr/local/lib/python3.7/dist-packages/keras/engine/training_v1.py in get_weights(self)
155 """
156 strategy = (self._distribution_strategy or
--> 157 self._compile_time_distribution_strategy)
158 if strategy:
159 with strategy.scope():
AttributeError: 'Sequential' object has no attribute '_compile_time_distribution_strategy'
Any suggestion for removing the error?

Input 0 of layer fc1 is incompatible with the layer: expected axis -1 of input shape to have value 25088 but received input with shape (None, 32768)

I'm implementing SRGAN (and am not very experienced in this field), which uses a pre-trained VGG19 model to extract features. The following code was working fine on Keras 2.1.2 and tf 1.15.0 till yesterday. then it started throwing an "AttributeError: module 'keras.utils.generic_utils' has no attribute 'populate_dict_with_module_objects'" So i updated the keras version to 2.4.3 and tf to 2.5.0. but then its showing a "Input 0 of layer fc1 is incompatible with the layer: expected axis -1 of input shape to have value 25088 but received input with shape (None, 32768)" on the following line
features = vgg(input_layer)
But here the input has to be (256,256,3).
I had downgraded the keras and tf versions to the one I mentioned before to get rid of this error in the first place and it was working well till yesterday.
changing the input shape to (224,224,3) does not work. Any help in solving this error will be very appreciated.
import glob
import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import keras
from keras.layers import Input
from keras.applications.vgg19 import VGG19
from keras.callbacks import TensorBoard
from keras.layers import BatchNormalization, Activation, LeakyReLU, Add, Dense,Flatten
from keras.layers.convolutional import Conv2D, UpSampling2D
from keras.models import Model
from keras.optimizers import Adam
from scipy.misc import imread, imresize
from PIL import Image
def build_vgg():
input_shape = (256, 256, 3)
vgg = VGG19(weights="imagenet")
vgg.outputs = [vgg.layers[9].output]
input_layer = Input(shape=input_shape)
features = vgg(input_layer)
model = Model(inputs=[input_layer], outputs=[features])
return model
vgg = build_vgg()
vgg.trainable = False
vgg.compile(loss='mse', optimizer=common_optimizer, metrics=['accuracy'])
# Build and compile the discriminator
discriminator = build_discriminator()
discriminator.compile(loss='mse', optimizer=common_optimizer, metrics=['accuracy'])
# Build the generator network
generator = build_generator()
The Error message
Im using google colab
Importing keras from tensorflow and setting include_top=False in
vgg = VGG19(weights="imagenet",include_top=False)
seems to work.

Tensorflow 2 /Google Colab / EfficientNet Training - AttributeError: 'Node' object has no attribute 'output_masks'

I am trying to train EfficientNetB1 on Google Colab and constantly running into different issues with correct import statements from Keras or Tensorflow.Keras, currently this is how my imports look like
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.layers.pooling import AveragePooling2D
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import SGD
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import argparse
import pickle
import cv2
import os
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
import efficientnet.keras as enet
from tensorflow.keras.layers import Dense, Dropout, Activation, BatchNormalization, Flatten, Input
and this is how my model looks like
load the ResNet-50 network, ensuring the head FC layer sets are left
# off
baseModel = enet.EfficientNetB1(weights="imagenet", include_top=False, input_tensor=Input(shape=(224, 224, 3)), pooling='avg')
# Adding 2 fully-connected layers to B0.
x = baseModel.output
x = BatchNormalization()(x)
x = Dropout(0.7)(x)
x = Dense(512)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dropout(0.5)(x)
x = Dense(512)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
# Output layer
predictions = Dense(len(lb.classes_), activation="softmax")(x)
model = Model(inputs = baseModel.input, outputs = predictions)
# loop over all layers in the base model and freeze them so they will
# *not* be updated during the training process
for layer in baseModel.layers:
layer.trainable = False
But for the life of me I can't figure out why I am getting the below error
AttributeError Traceback (most recent call last)
<ipython-input-19-269fe6fc6f99> in <module>()
----> 1 baseModel = enet.EfficientNetB1(weights="imagenet", include_top=False, input_tensor=Input(shape=(224, 224, 3)), pooling='avg')
2
3 # Adding 2 fully-connected layers to B0.
4 x = baseModel.output
5 x = BatchNormalization()(x)
5 frames
/usr/local/lib/python3.6/dist-packages/keras/engine/base_layer.py in _collect_previous_mask(input_tensors)
1439 inbound_layer, node_index, tensor_index = x._keras_history
1440 node = inbound_layer._inbound_nodes[node_index]
-> 1441 mask = node.output_masks[tensor_index]
1442 masks.append(mask)
1443 else:
AttributeError: 'Node' object has no attribute 'output_masks'
The problem is the way you import the efficientnet.
You import it from the Keras package and not from the TensorFlow.Keras package.
Change your efficientnet import to
import efficientnet.tfkeras as enet
Not sure, but this error maybe caused by wrong TF version. Google Colab for now comes with TF 1.x by default. Try this to change the TF version and see if this resolves the issue.
try:
%tensorflow_version 2.x
except:
print("Failed to load")

TypeError: object of type 'Bidirectional' has no len()

This is the code I'm running:
import pandas as pd
import numpy as np
from tqdm import tqdm, trange
import unicodedata
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.models import Model
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import LSTM, Embedding, Dense, TimeDistributed, Dropout,
Bidirectional
# Defining Constants
# Maximum length of text sentences
MAXLEN = 180
# Number of LSTM units
LSTM_N = 150
# batch size
BS=48
input = Input(shape=(MAXLEN,))
model = Embedding(input_dim=n_words, output_dim=MAXLEN, input_length=MAXLEN)(input)
model = Dropout(0.2)(model)
model = Bidirectional(LSTM(units=LSTM_N, return_sequences=True, recurrent_dropout=0.1))(model)
out = TimeDistributed(Dense(n_tags, activation="softmax"))(model) # softmax output layer
model = Model(input, out)
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
history = model.fit(X, np.array(y), batch_size=BS, epochs=2, validation_split=0.05, verbose=1)
I'm running a bidirectional LSTM. But this error is appearing:
TypeError: object of type 'Bidirectional' has no len()
What's wrong in this code? Please help.
You are not doing consistent imports. Simply change,
from keras.models import Model
to
from tensorflow.keras.models import Model
Changing from
from tensorflow.keras.layers import Input
to
from keras.layers import Input
solved my issue.