Graph execution error when Concatenate two inputs - tensorflow

inputs1 = keras.Input(shape=(8,1))
inputs2 = keras.Input(shape=(500,1))
combined = layers.Concatenate(axis=1)([inputs1, inputs2])
z = layers.Dense(2, activation="relu")(combined)
z = layers.Dense(1, activation="linear")(z)
model = keras.Model(inputs=[inputs1, inputs2], outputs=z)
I always get Graph execution errors in Keras when I concatenate two inputs together. What is the problem and solution ?

Related

Model fails to predict using float16

I'm trying to build a model that uses float16 in the inputs and outputs. However, I've noticed that none of the toy examples I've tried seems to work. For instance, the model below gives NaN:
def total_variation(im1):
score = tf.image.total_variation(im1)
return tf.cast(score, tf.float16)
input1 = tf.keras.Input(shape=(512,512,3), batch_size=1, dtype=tf.float16)
output = total_variation(input1)
matrix1 = np.arange(512*512*3,dtype='float16').reshape(1,512,512,3)
model = tf.keras.Model(inputs=[input1], outputs=output, name='test')
x = model.predict([matrix1])
x # NaN
However, the following gives the correct result:
def total_variation(im1):
score = tf.image.total_variation(im1)
return tf.cast(score, tf.float32)
input1 = tf.keras.Input(shape=(512,512,3), batch_size=1, dtype=tf.float32)
output = total_variation(input1)
matrix1 = np.arange(512*512*3,dtype='float32').reshape(1,512,512,3)
model = tf.keras.Model(inputs=[input1], outputs=output, name='test')
x = model.predict([matrix1])
x # array([1.207955e+09], dtype=float32)
Same result if tf.keras.backend.set_floatx('float16') was called instead. Does anyone know why this might be the case?

evaluating two inputs and one output model tensorflow

I am trying to evaluate a model with 2 inputs and 1 output, each input goes to separate pretrained model and then the output from both the models get averaged. I am using the same data for both the inputs.
test_dir = 'D:\Graduation_project\Damage type not collected'
test_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1./255,)
test_set = test_datagen.flow_from_directory(test_dir,
class_mode = 'categorical',
batch_size = 16,
target_size=(150,150))
test_set1 = test_datagen.flow_from_directory(test_dir,
class_mode = 'categorical',
batch_size = 16,
target_size=(150,150))
loading first model and renaming the layers
def load_dense_model():
densenet = tf.keras.models.load_model('D:\Graduation_project\saved models\damage_type_model.h5', compile=False)
for i, layer in enumerate(densenet.layers):
layer._name = 'Densenet_layer' + str(i)
return densenet
loading second model
def load_vgg19_model():
vgg19 = tf.keras.models.load_model('D:\Graduation_project\saved models\damage_type_VGG19.h5', compile=False)
return vgg19
creating ensemble model
def ensamble_model(first_model, second_model):
densenet = first_model()
vgg19 = second_model()
output_1 = densenet.get_layer('Densenet_layer613')
output_2 = vgg19.get_layer('dense_4')
avg = tf.keras.layers.Average()([output_1.output, output_2.output])
model = Model(inputs=[densenet.input, vgg19.input], outputs=avg)
return model
METRICS = [
'accuracy',
tf.metrics.TruePositives(name='tp'),
tf.metrics.FalsePositives(name='fp'),
tf.metrics.TrueNegatives(name='tn'),
tf.metrics.FalseNegatives(name='fn'),
tf.metrics.Precision(name='precision'),
tf.metrics.Recall(name='recall'),
tfa.metrics.F1Score(name='F1_Score', num_classes=5),
tfa.metrics.MultiLabelConfusionMatrix(num_classes=5)
]
model = ensamble_model(load_dense_model, load_vgg19_model)
compiling and evaluating the model
model.compile(optimizer = 'adam' , loss ='binary_crossentropy',
metrics = 'accuracy')
model.evaluate({'Densenet_layer0':test_set1, 'input_2':test_set})
evaluate() fails to run
ValueError: Failed to find data adapter that can handle input: (<class 'dict'> containing {"<class 'str'>"} keys and {"<class 'tensorflow.python.keras.preprocessing.image.DirectoryIterator'>"} values), <class 'NoneType'>
My guess is that your model complaining because you are feeding a dict/list of iterators that yield an image each, instead of feeding an iterator that yields the image twice (once for each model).
What would happen if you wrap your DirectoryIterator on a generator that can feed the data correctly?
def gen_itertest(test_dir):
test_set = test_datagen.flow_from_directory(test_dir,
class_mode = 'categorical',
batch_size = 16,
target_size=(150,150))
for i in range(len(test_set)):
x = test_set.next()
yield [x[0], x[0]], x[1] # Twice the input, only once the label
and then you can feed this to the evaluate
testset = gen_itertest('D:\Graduation_project\Damage type not collected')
result = model.evaluate(testset)
I am not sure this will work but because you haven't provide us with a minimal, reproducible example, I am not going to do one to test it.
Try calling the evaluate() like this:
result = model.evaluate(x=[test_set1, test_set])
Then you could get the name of the metrics doing something like this:
dict(zip(model.metrics_names, result))

Is there a way to activate only one particular neuron out of several neurons in tensorflow?

I am trying to create a design that would look something like this:
Right now, I am trying using the following code but what I need is the output of the second hidden layer in the 15th position of the next layer. In my case, it is being added to the 31st position using this code.
inputs = Input(shape=(30,), name='first_input')
hn = Dense(4, activation='relu')(inputs)
output = Dense(1, activation='linear')(hn)
first_model = Model(inputs=inputs, outputs=output)
second_input = Input(shape=(30,), name='second_input')
from_first_model = first_model.output
merge_layer = concatenate([second_input, from_first_model ])
hn = Dense(4, activation="relu")(merge_layer)
dnn_op_layer = Dense(1, activation='linear')(hn)
model_b = Model(inputs=[second_input, first_model.input], outputs=dnn_op_layer)
This should work - slicing the tensor into the two parts and then concatenating the two parts with the output from the second hidden layer.
test = tf.convert_to_tensor(np.random.rand(30), dtype=tf.float32)
test2 = tf.convert_to_tensor([100], dtype=tf.float32)
# create layer 3 as desired
temp, temp2 = test[:15], test[15:]
layer3 = tf.concat([temp, test2, temp2], axis=0)
edit - The error you were getting might have been from using the functional api. Additionally the previous example assumes the input has a shape (30,) when really the input should have a shape like (1, 30) to be consistent with tensorflow.
class model(tf.keras.Model):
def __init__(self):
super().__init__()
self.hidden1 = tf.keras.layers.Dense(4, activation='relu')
self.hidden2 = tf.keras.layers.Dense(1)
self.hidden4 = tf.keras.layers.Dense(4, activation='relu')
self.hidden5 = tf.keras.layers.Dense(1)
def call(self, inputs):
x = self.hidden1(inputs)
x = self.hidden2(x)
temp, temp2 = inputs[:,:15], inputs[:,15:]
layer3 = tf.concat([temp, x, temp2], axis=1)
x = self.hidden4(layer3)
x = self.hidden5(x)
return x
# test
mymodel = model()
inputs = tf.convert_to_tensor(np.random.rand(1,30), dtype=tf.float32)
mymodel(inputs)

How to use TimeDistributed layer for predicting sequences of dynamic length? PYTHON 3

So I am trying to build an LSTM based autoencoder, which I want to use for the time series data. These are spitted up to sequences of different lengths. Input to the model has thus shape [None, None, n_features], where the first None stands for number of samples and the second for time_steps of the sequence. The sequences are processed by LSTM with argument return_sequences = False, coded dimension is then recreated by function RepeatVector and ran through LSTM again. In the end I would like to use the TimeDistributed layer, but how to tell python that the time_steps dimension is dynamic? See my code:
from keras import backend as K
.... other dependencies .....
input_ae = Input(shape=(None, 2)) # shape: time_steps, n_features
LSTM1 = LSTM(units=128, return_sequences=False)(input_ae)
code = RepeatVector(n=K.shape(input_ae)[1])(LSTM1) # bottleneck layer
LSTM2 = LSTM(units=128, return_sequences=True)(code)
output = TimeDistributed(Dense(units=2))(LSTM2) # ??????? HOW TO ????
# no problem here so far:
model = Model(input_ae, outputs=output)
model.compile(optimizer='adam', loss='mse')
this function seems to do the trick
def repeat(x_inp):
x, inp = x_inp
x = tf.expand_dims(x, 1)
x = tf.repeat(x, [tf.shape(inp)[1]], axis=1)
return x
example
input_ae = Input(shape=(None, 2))
LSTM1 = LSTM(units=128, return_sequences=False)(input_ae)
code = Lambda(repeat)([LSTM1, input_ae])
LSTM2 = LSTM(units=128, return_sequences=True)(code)
output = TimeDistributed(Dense(units=2))(LSTM2)
model = Model(input_ae, output)
model.compile(optimizer='adam', loss='mse')
X = np.random.uniform(0,1, (100,30,2))
model.fit(X, X, epochs=5)
I'm using tf.keras with TF 2.2

Scikit-Learn cross validation with two inputs, one output

I'm building a CNN model to catagorise data, and am looking at cross validation with Scikit-Learn. I'm using two inputs, and would like to know how to cross validate with two inputs.
From what I can see it only accepts an X input and y output, and have tried using a list of inputs but they are read as a single list.
I've used the following code to iterate through one model and cross validate it, however I don't see any way of cross validating with two inputs.
np.random.seed(seed)
kfold = StratifiedKfold(n_splits=10m shuffle=True, random_state=seed)
for train, test in kfold.split(x, y):
input = Input(shape=(100, 150, 1))
dense = Dense(2, activation='relu')(input)
output = Dense(1, activation='relu')(dense)
model = Model(input, output)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X[train], Y[train], epochs=150, batch_size=10, verbose=0)
scores = model.evaluate(X[test], Y[test], verbose=0)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
cvscores.append(scores[1] * 100)
print("%.2f%% (+/- %.2f%%)" % (numpy.mean(cvscores), numpy.std(cvscores)))
I know it's kinda late but hopefully it will be useful for someone having the same problem.
One possible solution is to concatenate both inputs and then split them accordingly once inside the loop.
As an example:
I already concatenated both inputs into 'x_agg', the first input has 2001 columns.
for train_index, valid_index in kfold.split(x_agg, y):
X_train_fold = x_agg[train_index]
X_valid_fold = x_agg[valid_index]
y_train_fold = y[train_index]
y_valid_fold = y[valid_index]
x_train_global = X_train_fold[0:, :2001] # Split First Input (Training)
x_train_local = X_train_fold[0:, 2001:]
x_valid_global = X_valid_fold[0:, :2001] # Split First Input (Validation)
x_valid_local = X_valid_fold[0:, 2001:]
model.fit([x_train_global, x_train_local], y_train_fold,...]
score = model.evaluate([x_valid_global, x_valid_local], y_valid_fold, verbose=0)[1]