super(type, obj): obj must be an instance or subtype of type in Keras - tensorflow

I implement the following to build tiny yolo v2 from scratch using Keras with Tensorflow backend
My code was working fine in Keras 2.1.5
But when i updated to Keras 2.1.6 i ran in to an error
""kernel_constraint=None,
TypeError: super(type, obj): obj must be an instance or subtype of type ""
Please help me out
Thank you so much
import tensorflow as tf
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten,
Reshape, LeakyReLU, BatchNormalization
def yolo():
model = Sequential()
model.add(Conv2D(16,(3,3), padding='same',input_shape=(416,416,3),data_format='channels_last'))
model.add(LeakyReLU(alpha=0.1))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(32,(3,3), padding='same'))
model.add(BatchNormalization(axis=-1))
model.add(LeakyReLU(alpha=0.1))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(64,(3,3), padding='same'))
model.add(BatchNormalization(axis=-1))
model.add(LeakyReLU(alpha=0.1))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(128,(3,3), padding='same'))
model.add(BatchNormalization(axis=-1))
model.add(LeakyReLU(alpha=0.1))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(128,(3,3), padding='same'))
model.add(BatchNormalization(axis=-1))
model.add(LeakyReLU(alpha=0.1))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(12,(1,1), padding='same'))
model.add(BatchNormalization(axis=-1))
model.add(LeakyReLU(alpha=0.1))
model.add(Reshape((13,13,2,6)))
return model
model = yolo()
model.summary()

It can be caused by working without restarting the python kernel after the update.

Related

What is the classification algorithm used by Keras?

I've created sound classifier build using Keras from some tutorials in the internet. Here is my model code
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, InputLayer, Dropout, Conv1D, Conv2D, Flatten, Reshape, MaxPooling1D, MaxPooling2D, BatchNormalization, TimeDistributed
from tensorflow.keras.optimizers import Adam
model = Sequential()
model.add(Reshape((int(input_length / 40), 40), input_shape=(input_length, )))
model.add(Conv1D(8, kernel_size=3, activation='relu', padding='same'))
model.add(MaxPooling1D(pool_size=2, strides=2, padding='same'))
model.add(Dropout(0.25))
model.add(Conv1D(16, kernel_size=3, activation='relu', padding='same'))
model.add(MaxPooling1D(pool_size=2, strides=2, padding='same'))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(classes, activation='softmax', name='y_pred'))
opt = Adam(lr=0.005, beta_1=0.9, beta_2=0.999)
# this controls the batch size, or you can manipulate the tf.data.Dataset objects yourself
BATCH_SIZE = 32
train_dataset = train_dataset.batch(BATCH_SIZE, drop_remainder=False)
validation_dataset = validation_dataset.batch(BATCH_SIZE, drop_remainder=False)
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
model.fit(train_dataset, epochs=1000, validation_data=validation_dataset, verbose=2, callbacks=callbacks)
My teacher ask me what is algorithm I use for classifying (he said something like K-NN, Naive Bayes, SVM or something like that), and I don't know what I'm using.
You're using a Convolutional Neural Network (CNN)

Tensorflow: ValueError: Data cardinality is ambiguous:

I recently started learning Tensorflow and am following this guide.
https://pythonprogramming.net/convolutional-neural-network-deep-learning-python-tensorflow-keras/
I am attempting to use my own data sheet with two labels as well (car and not car).
This is my code:
import tensorflow as tf
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
import pickle
pickle_in = open("X.pickle","rb")
X = pickle.load(pickle_in)
pickle_in = open("y.pickle","rb")
y = pickle.load(pickle_in)
X = X/255.0
model = Sequential()
model.add(Conv2D(256, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(64))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(X, y, batch_size=32, epochs=3, validation_split=0.3)
model.save('car.model')
However, I'm getting an error that I do not understand how to fix.
raise ValueError(msg)
ValueError: Data cardinality is ambiguous:
x sizes: 8406
y sizes: 0
Please provide data which shares the same first dimension.
Appreciate the help!

AttributeError: 'Sequential' object has no attribute 'run_eagerly'

I'm trying to try using this model to train on rock, paper, scissor pictures. However, it was trained on 1800 pictures and only has an accuracy of 30-40%. I was then trying to use TensorBoard to see whats going on, but the error in the title appears.
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from tensorflow.python.keras.callbacks import TensorBoard
model = Sequential()
model.add(Conv2D(256, kernel_size=(4, 4),
activation='relu',
input_shape=(64,64,3)))
model.add(Conv2D(196, (4, 4), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(196, (4, 4), activation='relu'))
model.add(Conv2D(196, (4, 4), activation='relu'))
model.add(Dropout(0.25))
model.add(Conv2D(128, (4, 4), activation='relu'))
model.add(Conv2D(128, (4, 4), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(96, (4, 4), activation='relu'))
model.add(Conv2D(96, (4, 4), activation='relu'))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(3, activation='softmax'))
''' here it instantiates the tensorboard '''
tensorboard = TensorBoard(log_dir="C:/Users/bamla/Desktop/RPS project/Logs")
model.compile(loss="sparse_categorical_crossentropy",
optimizer="SGD",
metrics=['accuracy'])
model.summary()
''' Here its fitting the model '''
model.fit(x_train, y_train, batch_size=50, epochs = 3, callbacks=
[tensorboard])
This outputs:
Traceback (most recent call last):
File "c:/Users/bamla/Desktop/RPS project/Testing.py", line 82, in <module>
model.fit(x_train, y_train, batch_size=50, epochs = 3, callbacks=
[tensorboard])
File "C:\Users\bamla\AppData\Local\Programs\Python\Python37\lib\site-
packages\keras\engine\training.py", line 1178, in fit
validation_freq=validation_freq)
File "C:\Users\bamla\AppData\Local\Programs\Python\Python37\lib\site-
packages\keras\engine\training_arrays.py", line 125, in fit_loop
callbacks.set_model(callback_model)
File "C:\Users\bamla\AppData\Local\Programs\Python\Python37\lib\site-
packages\keras\callbacks.py", line 68, in set_model
callback.set_model(model)
File "C:\Users\bamla\AppData\Local\Programs\Python\Python37\lib\site-
packages\tensorflow\python\keras\callbacks.py", line 1509, in set_model
if not model.run_eagerly:
AttributeError: 'Sequential' object has no attribute 'run_eagerly'
Also, if you have any tips on how to improve the accuracy it would be appreciated!
The problem is here:
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from tensorflow.python.keras.callbacks import TensorBoard
Do not mix keras and tf.keras imports, these are not compatible with each other, and produce weird errors as the ones you are seeing.
I changed from tensorflow.python.keras.callbacks import TensorBoard
to from keras.callbacks import TensorBoard and it worked for me.
for me, this did the job:
from tensorflow.keras import datasets, layers, models
from tensorflow import keras
It seems that you are mixing imports from keras and tensorflow.keras (last one is preferred).
https://www.pyimagesearch.com/2019/10/21/keras-vs-tf-keras-whats-the-difference-in-tensorflow-2-0/
And most importantly, going forward all deep learning practitioners
should switch their code to TensorFlow 2.0 and the tf.keras package.
The original keras package will still receive bug fixes, but moving
forward, you should be using tf.keras.
Try with:
import tensorflow
Conv2D = tensorflow.keras.layers.Conv2D
MaxPooling2D = tensorflow.keras.layers.MaxPooling2D
Dense = tensorflow.keras.layers.Dense
Flatten = tensorflow.keras.layers.Flatten
Dropout = tensorflow.keras.layers.Dropout
TensorBoard = tensorflow.keras.callbacks.TensorBoard
model = tensorflow.keras.Sequential()

TF / Keras error: InputLayer not a Checkpointable

I am trying out the newly added TPU support on Google Colab with the simple cats vs dogs dataset.
After creating a simple CNN, I tried to export the model to TPU. But it failed with error
TypeError: Checkpointable._track_checkpointable() passed type <class 'keras.engine.topology.InputLayer'>, not a Checkpointable.
Here's the code that I wrote on Colab.
model = models.Sequential()
model.add(layers.Conv2D(32, (3,3), activation='relu', input_shape=(150, 150, 3)))
model.add(layers.MaxPooling2D((2,2)))
model.add(layers.Conv2D(64, (3,3), activation='relu'))
model.add(layers.MaxPooling2D((2,2)))
model.add(layers.Flatten())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.summary()
train_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(train_dir, target_size=(150,150), batch_size=20, class_mode='binary')
tpu_model = tf.contrib.tpu.keras_to_tpu_model(model, strategy=tf.contrib.tpu.TPUDistributionStrategy(tf.contrib.cluster_resolver.TPUClusterResolver(tpu="grpc://" + os.environ['COLAB_TPU_ADDR'])))
My guess is I am doing something wrong in train_generator. But I am not sure what it is. Any help would be highly appreciated.
If you're using or import layers from Keras instead of TensorFlow like this:
from keras import layers,models
from keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
You will get error like you mention above :
TypeError: Checkpointable._track_checkpointable() passed type <class 'keras.engine.topology.InputLayer'>, not a Checkpointable.
So, you can import layers directly from TensorFlow like my code below:
from tensorflow.keras import layers,models
from keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
or you can see my full code here:
https://gist.github.com/ilmimris/8218e397dd35ab693404e95db32dc574

Sampled_Softmax input parametrers

I am working on a Speaker recognition problem, I have very big number of classes so I need to use, tf.nn.sampled_softmax_loss to speed up training time. The problem is I am using Keras with Tensorflow as Backend, but Keras doesnt implement Sampled_softmax so I need to use Tensorflow function, but its unclear which should be the inputs of the tf.nn.sampled_softmax_loss() function.
My model and input are as below:
from preprocess import *
import keras
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D,AveragePooling2D
from keras.utils import to_categorical
from keras.utils import plot_model
from keras.models import load_model
from keras import regularizers
import numpy as np
from keras.callbacks import EarlyStopping
from keras import metrics
import os
import tensorflow as tf
from tensorflow.python.framework import dtypes
from importance_sampling.training import ImportanceTraining
epochs = 50
batch_size = 100
verbose = 1
labels= get_labels(big_numpy_files_path)
num_classes = len(labels)
#save_data_as_numpy_array(max_len = feature_dim_1, max_len2 = feature_dim_2,origin_path=data_set_path,destination_path=numpy_files_path)
#Get X & Y
X_train, X_valid, X_test ,y_train,y_valid,y_test = get_x_y_data(split_ratio=0.8, random_state=42,maxsamples=20,path=big_numpy_files_path)
#reshape X for input into CNN
X_train, X_valid, X_test = reshape_prepare_for_input(X1=X_train, X2=X_valid, X3=X_test,channel=1)
#Dimensions
dim_1 = X_train.shape[1]
dim_2 = X_train.shape[2]
dim_3 = X_train.shape[3]
#one hot encoding of Y
y_train_hot = to_categorical(y_train)
y_valid_hot = to_categorical(y_valid)
y_test_hot = to_categorical(y_test)
#Model
model = Sequential()
model.add(Conv2D(128, kernel_size=(6, 6),strides=2, activation='relu', input_shape=(X_train.shape[1], X_train.shape[2], X_train.shape[3])))
model.add(Conv2D(64, kernel_size=(2, 2),strides=1, activation='relu'))
model.add(Conv2D(32, kernel_size=(2, 2), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(256, activation='relu', use_bias=True,kernel_regularizer=regularizers.l2(0.01)))
model.add(Dropout(0.2))
model.add(Dense(256, activation='relu',use_bias=True,kernel_regularizer=regularizers.l2(0.02)))
model.add(Dropout(0.3))
model.add(Dense(256, activation='relu',use_bias=True,kernel_regularizer=regularizers.l2(0.02)))
model.add(Dropout(0.3))
model.add(Dense(num_classes, activation='softmax'))
loss=keras.losses.categorical_crossentropy
model.compile(loss=loss,optimizer='adamax',metrics=['accuracy'])
earlystopping = EarlyStopping(monitor='val_loss', min_delta=0.001, patience=15, verbose=verbose, mode='auto')
model.fit(X_train, y_train_hot,batch_size=batch_size,callbacks=[earlystopping],epochs=epochs, verbose=verbose, validation_data=(X_valid, y_valid_hot))
If I want to replace the above loss function with Samled softmax, If I'm trying to do something like below, what should be the inputs give my above architecture and should I add tf.reduce_mean?
for more details on the code, is https://github.com/selimelawwa/Speaker_Verification