How to fix 'tensorflow.keras.layers' has no attribute 'input' - tensorflow

Followings are the string that I am trying to get it but I don't know why I keep getting
tensorflow.keras.layers' has no attribute 'input' can anyone give advice :).
I don't know why even though I have called out the data it keep saying there is no input for keras.
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import datasets
mnist = datasets.mnist
(train_x, train_y), (test_x, test_y) = mnist.load_data()
inputs = layers.input((28, 28, 1))
net = layers.Conv2D(32, (3, 3), padding ='SAME')(inputs)
net = layers.Activation('relu')(net)
net = layers.Conv2D(32, (3, 3), padding ='SAME')(net)
net = layers.Activation('relu')(net)
net = layers.MaxPooling2D(pool_size=(2, 2))(net)
net = layers.Dropout(0, 25)(net)
net = layers.Conv2D(64, (3, 3), padding ='SAME')(net)
net = layers.Activation('relu')(net)
net = layers.Conv2D(64, (3, 3), padding ='SAME')(net)
net = layers.Activation('relu')(net)
net = layers.MaxPooling2D(pool_size=(2, 2))(net)
net = layers.Dropout(0, 25)(net)
net = layers.Flatten()(net)
net = layers.Dense(512)(net)
net = layers.Activation('relu')(net)
net = layers.Dropout(0, 5)(net)
net = layers.Dense(10)(net)
net = layers.Activation('softmax')(net)
model = tf.keras.Model(inputs=inputs, outputs=net, name='Basic_CNN')
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-23-5b0ca3669e40> in <module>
----> 1 inputs = layers.input((28, 28, 1))
2 net = layers.Conv2D(32, (3, 3), padding ='SAME')(inputs)
3 net = layers.Activation('relu')(net)
4 net = layers.Conv2D(32, (3, 3), padding ='SAME')(net)
5 net = layers.Activation('relu')(net)
AttributeError: module 'tensorflow.keras.layers' has no attribute 'input'

You must use Input Instead of input.
Also what do you mean by net = layers.Dropout(0, 25)(net)?
I think you meant net = layers.Dropout(0.25)(net).

Related

Tensorflow image processing: how to predict mask

I am very much a beginner and currently trying to get started with CNNs. I wanted to try out lane detection.
Using the tusimple dataset, I have images of roads and create masks with the lanes. It looks like this:
I was following this blog where something similar is done. However, my results look nothing like in the blog. For simplicity reasons, I only use one image as dataset. This way, the network should very easily be able to detect the lane in this one image. However, this cnn basically just adds a red filter on the input image. The output looks somewhat like this:
Maybe you can point me into the right direction / tell me what I am doing wrong.
I posted the whole notebook here: https://colab.research.google.com/drive/1igOulIU-1HA-Ecf4diQTLXM-mrnAeFXz?usp=sharing
Or the most relevant code included:
def convolutional_block(inputs=None, n_filters=32, dropout_prob=0, max_pooling=True):
conv = Conv2D(n_filters,
kernel_size = 3,
activation='relu',
padding='same',
kernel_initializer=tf.keras.initializers.HeNormal())(inputs)
conv = Conv2D(n_filters,
kernel_size = 3,
activation='relu',
padding='same',
kernel_initializer=tf.keras.initializers.HeNormal())(conv)
if dropout_prob > 0:
conv = Dropout(dropout_prob)(conv)
if max_pooling:
next_layer = MaxPooling2D(pool_size=(2,2))(conv)
else:
next_layer = conv
#conv = BatchNormalization()(conv)
skip_connection = conv
return next_layer, skip_connection
def upsampling_block(expansive_input, contractive_input, n_filters=32):
up = Conv2DTranspose(
n_filters,
kernel_size = 3,
strides=(2,2),
padding='same')(expansive_input)
merge = concatenate([up, contractive_input], axis=3)
conv = Conv2D(n_filters,
kernel_size = 3,
activation='relu',
padding='same',
kernel_initializer=tf.keras.initializers.HeNormal())(merge)
conv = Conv2D(n_filters,
kernel_size = 3,
activation='relu',
padding='same',
kernel_initializer=tf.keras.initializers.HeNormal())(conv)
return conv
def unet_model(input_size=(720, 1280,3), n_filters=32, n_classes=3):
inputs = Input(input_size)
#contracting path
cblock1 = convolutional_block(inputs, n_filters)
cblock2 = convolutional_block(cblock1[0], 2*n_filters)
cblock3 = convolutional_block(cblock2[0], 4*n_filters)
cblock4 = convolutional_block(cblock3[0], 8*n_filters, dropout_prob=0.2)
cblock5 = convolutional_block(cblock4[0],16*n_filters, dropout_prob=0.2, max_pooling=None)
#expanding path
ublock6 = upsampling_block(cblock5[0], cblock4[1], 8 * n_filters)
ublock7 = upsampling_block(ublock6, cblock3[1], n_filters*4)
ublock8 = upsampling_block(ublock7,cblock2[1] , n_filters*2)
ublock9 = upsampling_block(ublock8,cblock1[1], n_filters)
conv9 = Conv2D(n_classes,
1,
activation='relu',
padding='same',
kernel_initializer='he_normal')(ublock9)
conv10 = Activation('softmax')(conv9)
model = tf.keras.Model(inputs=inputs, outputs=conv10)
return model

TypeError: 'NoneType' object is not callable, when I try to fit my model

I have a model:
import numpy as np
from keras import layers
import keras
from tensorflow.keras.models import Sequential
#some variables
act_function = 'tanh'
input_x = np.ones((1000, 2, 4, 5, 1))#arbitrary array as an example
target = np.ones((1000, 2, 64, 100, 1))
decoder = Sequential()
decoder.add(keras.layers.InputLayer(input_shape=(2, 4, 5, 1)))
decoder.add(layers.Conv3D(8, (1, 3, 3), activation=act_function, padding='same', name = 'h8'))
decoder.add(layers.UpSampling3D((1, 2, 1), name = 'h9'))#8 5
decoder.add(layers.Conv3D(16, (1, 3, 3), activation=act_function, padding='same', name = 'h10'))
decoder.add(layers.UpSampling3D((1, 2, 5), name = 'h11'))#16, 25
decoder.add(layers.Conv3D(32, (1, 3, 3), activation=act_function, padding='same', name = 'h12'))
decoder.add(layers.UpSampling3D((1, 2, 2), name = 'h13'))#32, 50
decoder.add(layers.Conv3D(64, (1, 3, 3), activation=act_function, padding = 'same',name = 'h14'))
decoder.add(layers.UpSampling3D((1, 2, 2), name = 'h15'))#64 100
decoder.add(layers.Conv3D(1, (1, 3, 3), activation=act_function, padding='same', name = 'out'))
decoder.compile(optimizer='adam', loss = 'mse')
decoder.fit(input_x, target, epochs=10)
Error:
In [163]: decoder.fit(input_x, target)
Epoch 1/10
Traceback (most recent call last):
Input In [163] in <cell line: 1>
model.fit(input_x, target)
File ~/anaconda3/envs/keras_environment/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:1193 in fit
tmp_logs = self.train_function(iterator)
File ~/anaconda3/envs/keras_environment/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py:885 in __call__
result = self._call(*args, **kwds)
File ~/anaconda3/envs/keras_environment/lib/python3.8/site-packages/tensorflow/python/eager/def_function.py:917 in _call
return self._stateless_fn(*args, **kwds) # pylint: disable=not-callable
TypeError: 'NoneType' object is not callable
Editted 1:
In comments I was asked to explain how I define my dataset. Input dataset is numpy.ndarray, all elements in this array are floats(can be positive and negative). Target dataset is also numpy.ndarray consisting of floats (can be positive and negative).
input_x.shape
>>(1000, 2, 4, 5, 1)
target.shape
>>(1000, 2, 64, 100, 1)
Where 1000 is number of samples, each sample is complex signal, 2 is for real and imaginary part of each sample, 64 and 100 is matrix(64x100), and 1 is value(matrix entry). Dimension 2 might be confusing, so here is an example:
target[:, 0, :, :, :]# contains real part of complex signal
target[:, 1, :, :, :]# contains imaginary part of complex signal
Editted 2:
As mentioned in comments. I tried to minimize my question and make it Reproducible.
This above mentioned code is not showing any error when I tried replicating it in Google colab using TF 2.9 and TF 2.8 and in jupyter notebook using TF 2.9.
Please check this gist as a reference for the same. Let us know the tensorflow version and python version installed in your system for further understanding if the issue still persists.
Note: It is always a good practice to import keras and it's other libraries from tensorflow as below -
from tensorflow import keras
from tensorflow.keras import layers

Create timeseries dataset for TensorFlow v2

I'm trying to feed a model CNN+LSTM with data from a csv. What I'm missing is setting a correct dataset to start training my model.
This is my test model:
def test_model():
model = models.Sequential()
model.add(TimeDistributed(Conv1D(32, 4, strides=1, activation='relu', padding="valid"), input_shape=[None, 6, 20]))
model.add(TimeDistributed(MaxPooling1D(pool_size=2), input_shape=[None, 6, 20]))
model.add(TimeDistributed(Conv1D(64, 4, strides=1, activation='relu', padding="valid"), input_shape=[None, 6, 20]))
model.add(TimeDistributed(MaxPooling1D(pool_size=2), input_shape=[None, 6, 20]))
model.add(TimeDistributed(Flatten(), input_shape=[None, 6, 20]))
model.add(LSTM(100, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(100, activation="relu"))
model.add(Dropout(0.4))
model.add(Dense(5, activation="softmax"))
model.compile(optimizer=keras.optimizers.Adam(1e-3), loss="binary_crossentropy", metrics=["accuracy"])
return model
Here you can download test.csv
CSV dataset is made of 6 features + 1 multilabel. I need to create a time series each 20 row, so I think my shape should be [None, 6, 20] ordered by ascending timestamp value.
I'm new in TensorFlow and I don't know how to create an appropriate dataset from scratch, I was able to load a dataset from directory with images (for CNN) but in this scenario I really don't know how to do it.
This is what I tried to generate my dataset:
with open('test.csv') as csvfile:
dataset = list()
reader = csv.reader(csvfile, delimiter=',')
next(reader)
count = 0
timeseries = list()
labels = list()
for row in reader:
count = count + 1
if count <= 20:
timeseries.append(
[float(row[0]), float(row[1]), float(row[2]), float(row[3]), float(row[4]), float(row[5])])
else:
dataset.append(timeseries)
labels.append(int(row[6].split("L")[-1]))
timeseries = list()
count = 0
After that I transformed it in a tf.DataSet like this:
dataset = tf.data.Dataset.from_tensor_slices(dataset)
labels = tf.data.Dataset.from_tensor_slices(labels)
Here I got a <TensorSliceDataset shapes: (20, 6), types: tf.float32> like I want. Then I fed a K-Fold with it:
estimator = KerasClassifier(build_fn=test_model, epochs=60, batch_size=5, verbose=0)
kfold = KFold(n_splits=10, shuffle=True)
results = cross_val_score(estimator, dataset, labels, cv=kfold)
When running cross_val_score I got this error:
TypeError: Singleton array array(<TensorSliceDataset shapes: (20, 6), types: tf.float32>, dtype=object) cannot be considered a valid collection.
What I'm missing?

Am I sharing Layers ? - keras network modeling

I have a shared network models to make a model that has multiple inputs. my code is :
f1 = 128
f2 = 256
f3 = 1
shared_conv1 = L.Conv2D(filters=f1, kernel_size=(5, 5), strides=1, padding='same',name='shared_conv1')
shared_conv2 = L.Conv2D(filters=f1, kernel_size=(3, 3), strides=1, padding='same',name='shared_conv2')
shared_conv3 = L.Conv2D(filters=f1, kernel_size=(3, 3), strides=1, padding='same',name='shared_conv3')
shared_batch1 = L.BatchNormalization(name='shared_batch1')
shared_batch2 = L.BatchNormalization(name='shared_batch2')
shared_batch3 = L.BatchNormalization(name='shared_batch3')
shared_relu1 = L.ReLU(name='shared_relu1')
shared_relu2 = L.ReLU(name='shared_relu2')
shared_relu3 = L.ReLU(name='shared_relu3')
for i in range(length):
x_64 = shared_conv1(input[i])
x_64 = shared_batch1(x_64)
x_64 = shared_relu1(x_64)
x_64 = shared_conv2(x_64)
x_64 = shared_batch2(x_64)
x_64 = shared_relu2(x_64)
x_64 = shared_conv3(x_64)
x_64 = shared_batch3(x_64)
x_64 = shared_relu3(x_64)
print(x_64)
I want to use 8 input, and want x_64 to be output 8 times, and I saw print result like this :
Tensor("shared_relu1_3/Relu:0", shape=(None, 64, 64, 128), dtype=float32)
Tensor("shared_relu1_3_1/Relu:0", shape=(None, 64, 64, 128), dtype=float32)
Tensor("shared_relu1_3_2/Relu:0", shape=(None, 64, 64, 128), dtype=float32)
Since I expected "shared_relu1_3/Relu:0" to be printed 3 times, I could not judge that my network is worked as shared network correctly.
Am I doint right ?
Yes, it is working correctly, what it might confuse you is that the output tensors actually represent computation, so each of them are different but it does not mean that the weights are different, they are being shared.
Sharing works by passing several tensors through the same layer instance, since layers are the ones that have the weights inside them. So it should be working as expected.
Also there is no need to share ReLU's, since they do not have weights. Only layers that have weights should be shared if needed.

ValueError: Cannot feed value of shape (64, 200, 75) for Tensor 'TargetsData/Y:0', which has shape '(200, 75)'

I know this is a dumb question but I cant seem to figure it out. I feed in a numpy array of (?,200,75) and get this error:
ValueError: Cannot feed value of shape (64, 200, 75) for Tensor 'TargetsData/Y:0', which has shape '(200, 75)'
Here is my code:
import numpy as np
import tflearn
print("loading features....")
features = np.load("features_xs.npy")
print("loading classes....")
classes = np.load("classes_xs.npy")
symbols = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p'
,'q','r','s','t','u','v','w','x','y','z',
'A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U',
'V','W','X','Y','Z','1','2','3','4','5','6','7','8','9','0','.',',',
'!','?',':',';','\'','(',')','-','_',' ','"',]
num_symbols = len(symbols)
input_layer = tflearn.input_data(shape=[None, 200,num_symbols])
input_layer = tflearn.flatten(input_layer)
dense1 = tflearn.fully_connected(input_layer, 1000, activation='tanh',
regularizer='L2', weight_decay=0.001)
dense2 = tflearn.fully_connected(dense1, 2000, activation='tanh',
regularizer='L2', weight_decay=0.001)
dense2 = tflearn.fully_connected(dense2, 1000, activation='tanh',
regularizer='L2', weight_decay=0.001)
dropout2 = tflearn.dropout(dense2, 0.8)
final = tflearn.fully_connected(dropout2, (200*num_symbols), activation='tanh')
reshape = tflearn.reshape(final, [200,num_symbols], name="Reshape")
Adam = tflearn.Adam(learning_rate=0.01)
net = tflearn.regression(reshape, optimizer=Adam,
loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(features, classes, n_epoch=1, show_metric=True, run_id="dense_model")
model.save("model")
num_symbols is == to 75 in case you're wondering
I can't find the solution please help thanks.
Run the following code:
print(classes.shape)
You will be getting an output of (64, 200, 75). But your final layer reshape is expecting shape of (200, 75). You will have to supply values with shape of (200, 75) from your classes variable to resolve the error.