Can i get the all output keras layers - tensorflow

I just started with deep learning and i want to get the input/output of each layer in real-time. I am using google colab with tensorflow 2 and python 3. I tried to get the layers like this but for some reason that i don't understand is not working. Any help will be appreciated.
# Here are imports
from __future__ import absolute_import, division, print_function, unicode_literals
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
import matplotlib.pyplot as plt
from tensorflow.keras import backend as K
# I am using CIFAR10 dataset
(train_images, train_labels), (test_images, test_labels) =
datasets.cifar10.load_data()
Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0
# Here is the model
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
# Compilation of the model
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(train_images, train_labels, epochs=10,
validation_data=(test_images, test_labels))
# Based on
https://stackoverflow.com/questions/41711190/keras-how-to-get-the-output-of-each-layer
# I tried this
tf.compat.v1.disable_eager_execution()
inp = model.input # input placeholder
outputs = [layer.output for layer in model.layers] # all layer outputs
functors = [K.function([inp, K.learning_phase()], [out]) for out in outputs] # evaluation functions
Testing
test = np.random.random(input_shape)[np.newaxis,...]
layer_outs = [func([test, 1.]) for func in functors]
print(layer_outs)
#The error appear at line
functors = [K.function([inp, K.learning_phase()], [out]) for out in outputs]
#I got this error message
Tensor Tensor("conv2d/Identity:0", shape=(None, 30, 30, 32), dtype=float32) is not an element of this graph.

This error basically tells you that you want to change the graph after compiling it. When you call compile, TF will statically define all operations. You have to move the code snippet where you define functors above the compile method. Just swap the last lines with these ones:
tf.compat.v1.disable_eager_execution()
inp = model.input # input placeholder
outputs = [layer.output for layer in model.layers] # all layer outputs
functors = [K.function([inp, K.learning_phase()], [out]) for out in outputs] # evaluation functions
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(train_images, train_labels, epochs=1,
validation_data=(test_images, test_labels))
#Testing
input_shape = [1] + list(model.input_shape[1:])
test = np.random.random(input_shape)
layer_outs = [func([test, 1.]) for func in functors]
print(layer_outs)

Related

Why are the weights of my QAT tf_model are floats and not 8-bit Integers?

I performed a simple Quantization Aware Training with Tensorflow on MNIST as follows:
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.datasets import mnist
# Load MNIST dataset
mnist = keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# Normalize the input image so that each pixel value is between 0 to 1.
train_images = train_images / 255.0
test_images = test_images / 255.0
# Define the model architecture.
model = keras.Sequential([
keras.layers.InputLayer(input_shape=(28, 28)),
keras.layers.Reshape(target_shape=(28, 28, 1)),
keras.layers.Conv2D(filters=12, kernel_size=(3, 3)),
keras.layers.Activation('relu'),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Flatten(),
keras.layers.Dense(10)
])
# Train the digit classification model
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(
train_images,
train_labels,
epochs=5,
validation_split=0.1,
)
import tensorflow_model_optimization as tfmot
quantize_model = tfmot.quantization.keras.quantize_model
# q_aware stands for for quantization aware.
q_aware_model = quantize_model(model)
# `quantize_model` requires a recompile.
q_aware_model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
train_images_subset = train_images[0:1000] # out of 60000
train_labels_subset = train_labels[0:1000]
q_aware_model.fit(train_images_subset, train_labels_subset,
batch_size=500, epochs=5, validation_split=0.1)
However, when I try to investigate the weights of the quantized model using, for instance, q_aware_model.get_weights()[5], I get an array of type Float-32. I am supposed to get type 8-bit integer; what am I doing wrong?

InvalidArgumentError: Incompatible shapes: [29] vs. [29,7,7,2]

so I'm new right here and in Python also. I'm trying to make my own network. I found some pictures of docs and cats 15x15 and unfortunatly couldn't make this basic network...
So, these are libraries which I'm using
from tensorflow.keras.models import Sequential
from tensorflow.keras import utils
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import Dense
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import keras
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import GlobalMaxPooling2D
Body
train_dataset = tf.keras.preprocessing.image_dataset_from_directory(
'drive/MyDrive/cats vs dogs/cats vs dogs/training',
color_mode="rgb",
batch_size=32,
image_size=(150, 150),
shuffle=True,
seed=42,
validation_split=0.1,
subset='training',
interpolation="bilinear",
follow_links=False,
)
validation_dataset = tf.keras.preprocessing.image_dataset_from_directory(
'drive/MyDrive/cats vs dogs/cats vs dogs/training',
color_mode="rgb",
batch_size=32,
image_size=(150, 150),
shuffle=True,
seed=42,
validation_split=0.1,
subset='validation',
interpolation="bilinear",
follow_links=False,
)
test_dataset = tf.keras.preprocessing.image_dataset_from_directory(
'drive/MyDrive/cats vs dogs/cats vs dogs/test',
batch_size = 32,
image_size = (150, 150),
interpolation="bilinear"
)
model = Sequential()
model.add(keras.Input(shape=(150, 150, 3)))
model.add(Conv2D(32, 5, strides=2, activation="relu"))
model.add(Conv2D(32, 3, activation="relu"))
model.add(MaxPooling2D(3))
model.add(Dense(250, activation='sigmoid'))
model.add(Dense(100))
model.add(MaxPooling2D(3))
model.add(Dense(2))
model.summary()
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
history = model.fit(train_dataset, validation_data=validation_dataset, epochs=5, verbose=2)
And I get this error
Incompatible shapes: [29] vs. [29,7,7,2]
[[node gradient_tape/binary_crossentropy/mul_1/BroadcastGradientArgs
(defined at /usr/local/lib/python3.7/dist-packages/keras/optimizer_v2/optimizer_v2.py:464)
]] [Op:__inference_train_function_4364]
Errors may have originated from an input operation.
Input Source operations connected to node
gradient_tape/binary_crossentropy/mul_1/BroadcastGradientArgs:
In[0] gradient_tape/binary_crossentropy/mul_1/Shape:
In[1] gradient_tape/binary_crossentropy/mul_1/Shape_1
I was trying to change from binary_crossentropy to categorical_crossentrapy but it didn't help, I suppose my mistake is in datasets or inputs but I don't know how to solve it :(
Really hope to find help here!
[my architecture][1]
[1]: https://i.stack.imgur.com/w4Y9N.png
You need to flatten your prediction somewhere, otherwise you are outputing an image (29 samples of size 7x7 with 2 channels), while you simply want a flat 2 dimensional logits (so shape 29x2). The architecture you are using is somewhat odd, did you mean to have flattening operation before first Dense layer, and then no "maxpooling2d" (as it makes no sense for flattened signal)? Mixing relu and sigmoid activations is also quite non standard, I would encourage you to start with established architectures rather than try to compose your own to get some intuitions.
model = Sequential()
model.add(keras.Input(shape=(150, 150, 3)))
model.add(Conv2D(32, 5, strides=2, activation="relu"))
model.add(Conv2D(32, 3, activation="relu"))
model.add(MaxPooling2D(3))
model.add(Flatten())
model.add(Dense(250, activation="relu"))
model.add(Dense(100, activation="relu"))
model.add(Dense(2))
model.summary()

Tensorboard graph messy (readvariableop_resource nodes) using tf.summay.trace_export

The code bellow build two tensorboard graphs for the same model, while using Keras API build nice simple graph, using tf.summary.trace_export() add for each variable define in the graph a node in the external scope with the suffix "readvariableop_resource", which make the graph be really messy as the number of the parameters increase.
(In the example below we have 2 dense layer each one have 2 variable (kernel and bias) total 4 variables (4 nodes))
from datetime import datetime
import tensorflow as tf
from tensorflow import keras
# Define the model.
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(32, activation='relu'),
keras.layers.Dropout(0.2),
keras.layers.Dense(10, activation='softmax')
])
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
(train_images, train_labels), _ = keras.datasets.mnist.load_data()
train_images = train_images / 255.0
# Define the Keras TensorBoard callback.
logdir="logs/fit/" + datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)
# Train the model.
model.fit(
train_images,
train_labels,
batch_size=64,
epochs=1,
callbacks=[tensorboard_callback])
#tf.function
def traceme(x):
return model(x)
logdir="logs/fit1/" + datetime.now().strftime("%Y%m%d-%H%M%S")
writer = tf.summary.create_file_writer(logdir)
tf.summary.trace_on(graph=True)
# Forward pass
traceme(tf.zeros((1, 28, 28, 1)))
with writer.as_default():
tf.summary.trace_export(name="model_trace", step=0)

how to increase the accuracy of an image classifier?

I made an image classifier using Tensorflow, Keras with the implementation of a CNN architecture, the model works pretty fine (at least for the images that I have tested on it ) and it has reached an accuracy of 78.87%, the only thing that I m facing is that I want to make the accuracy no less than 85%.
Please Note:
Dataset: 2 folders: [Train Folder===> 80 folders each has 110 images, Validation folder===> 80 folders each has 22 images] size of the images [240-260]x[40-60]
Below is the code I used to create, save and test my model:
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
# dimensions of our images.
img_width, img_height = 251, 54
#img_width, img_height = 150, 33
train_data_dir = 'C:/Users/ADEM/Desktop/msi_youssef/PFE/test/numbers/data/train'
validation_data_dir = 'C:/Users/ADEM/Desktop/msi_youssef/PFE/test/numbers/data/valid'
nb_train_samples = 8800 #10435
nb_validation_samples = 1763 #2051
epochs = 30 #20 # how much time you want to train your model on the data
batch_size = 32 #16
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(80)) #1
model.add(Activation('softmax')) #sigmoid
model.compile(loss='sparse_categorical_crossentropy',optimizer='rmsprop',metrics=['accuracy'])#categorical_crossentropy #binary_crossentropy
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.1,
zoom_range=0.05,
horizontal_flip=False)
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size)
model.save('testX_2.h5') #first_try
last epoche resulat
Epoch 30/30
275/275 [==============================] - 38s 137ms/step - loss: 0.9406 - acc: 0.7562 - val_loss: 0.1268 - val_acc: 0.9688
how I tested my model:
from keras.models import load_model
from keras.preprocessing import image
import matplotlib.pyplot as plt
import numpy as np
import os
result = {"0":"0", "1":"0.25", "2":"0.5", "3":"0.75", "4":"1", "5":"1.25", "6":"1.5", "7":"1.75",
"47":"2", "48":"2.25", "49":"2.5", "50":"2.75", "52":"3","53":"3.25", "54":"3.5", "55":"3.75", "56":"4", "57":"4.25", "58":"4.5",
"59":"4.75","60":"5", "61":"5.25", "62":"5.5", "63":"5.75", "64":"6", "65":"6.25","66":"6.5", "67":"6.75", "68":"7", "69":"7.25",
"70":"7.5", "71":"7.75", "72":"8", "73":"8.25", "74":"8.5", "75":"8.75", "76":"9", "77":"9.25", "78":"9.5", "79":"9.75", "8":"10",
"9":"10.25", "10":"10.5", "11":"10.75", "12":"11", "13":"11.25", "14":"11.5", "15":"11.75", "16":"12","17":"12.25", "18":"12.5",
"19":"12.75", "20":"13", "21":"13.25", "22":"13.5", "23":"13.75","24":"14", "25":"14.25", "26":"14.5", "27":"14.75", "28":"15",
"29":"15.25", "30":"15.5", "31":"15.75", "32":"16", "33":"16.25", "34":"16.5", "35":"16.75", "36":"17", "37":"17.25", "38":"17.5",
"39":"17.75", "40":"18", "41":"18.25", "42":"18.5", "43":"18.75", "44":"19", "45":"19.25", "46":"19.5", "51":"20"}
def load_image(img_path, show=False):
img = image.load_img(img_path, target_size=(251, 54))
img_tensor = image.img_to_array(img) # (height, width, channels)
img_tensor = np.expand_dims(img_tensor, axis=0) # (1, height, width, channels), add a dimension because the model expects this shape: (batch_size, height, width, channels)
img_tensor /= 255. # imshow expects values in the range [0, 1]
if show:
plt.imshow(img_tensor[0])
plt.axis('off')
plt.show()
return img_tensor
if __name__ == "__main__":
# load model
model = load_model('C:/Users/ADEM/Desktop/msi_youssef/PFE/other_shit/testX_2.h5')
# image path
img_path = 'C:/Users/ADEM/Desktop/msi_youssef/PFE/dataset/5.75/a.png'
# load a single image
new_image = load_image(img_path)
# check prediction
#pred = model.predict(new_image)
pred = model.predict_classes(new_image)
#print(pred[0])
print(result[str(pred[0])])
Taking all the information about dataset and considering your CNN model already has around 80% accuracy you can start with training the model for a higher number of epochs (typically > 100 epochs). That should give the required boost to your model.
If that alone does not work you can implement:
Transformation/augmentation:
perform transformation/augmentation on the images before feeding into the model.
Tweak Model:
make changes to model layers and do hyperparameter tuning.
You can follow this article to learn more.

ValueError: Error when checking target: expected dense_2 to have shape (None, 2) but got array with shape (321, 3)

I want to create an image classifier using keras, and train it with a few example images. Then, i will be using pre-trained models and adding a few layers at the end, but first, i want to understand keras and CNNs.
My console prints the following error:
ValueError: Error when checking target: expected dense_2 to have shape
(None, 2) but got array with shape (321, 3)
Here is my code:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import time
import numpy as np
import cv2
import time
from PIL import Image
import keras
import glob
from keras.models import Sequential
from keras.models import load_model
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.optimizers import SGD
from sklearn.preprocessing import LabelBinarizer
labels = ['buena', 'mala', 'otro']
def to_one_hot(labels, ys):
result = np.zeros((len(ys),len(labels)))
for i in range(result.shape[0]):
for j in range(result.shape[1]):
result[i,j] = int(ys[i] == labels[j])
return result
def build_dataset(labels):
num_classes = len(labels)
x = []
y = []
for label in labels:
for filename in (glob.glob('./tf_files/papas_fotos/'+label+'/*.jpg')):
img = cv2.imread(filename)
img = np.resize(img,(100,100, 3))
x.append(img)
y.append(label)
y = to_one_hot(labels, y)
# y = keras.utils.to_categorical(y, num_classes=3)
x = np.array(x)
x_train = x[20:]
y_train = y[20:]
x_test = x[:19]
y_test = y[:19]
print (x.shape, y.shape)
return x_train, y_train, x_test, y_test
model = Sequential()
# input: 100x100 images with 3 channels -> (100, 100, 3) tensors.
# this applies 32 convolution filters of size 3x3 each.
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(100, 100, 3)))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(3, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
x_train, y_train, x_test, y_test = build_dataset(labels)
model = load_model('thebestmodel.h5')
print (model)
model.fit(x_train, y_train, batch_size=32, epochs=20)
score = model.evaluate(x_test, y_test, batch_size=32)
model.save('thebestmodel.h5')
print (score)
What mistake am I making? I think that may be the size of my one hot encoded labels, but i can't make it work.
Thanks!
Although your code was fixed for this specific error, you're loading a saved model: model = load_model('thebestmodel.h5')
This is undoing everything before this line.