Pycharm: Changes made have no effect, how to explain this behaviour? - tensorflow

This problem only happens in Pycharm:
I made a very simple NN based on TF2.0 website tutorial. The weird thing about it is when I change batch_size, it keeps going with the old one as if I did nothing. In fact, everything I do is irrelevant.
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = x_train.reshape(60000, 784).astype('float32') / 255
class Prototype(tf.keras.models.Model):
def __init__(self, **kwargs):
super(Prototype, self).__init__(**kwargs)
self.l1 = layers.Dense(64, activation='relu', name='dense_1')
self.l2 = layers.Dense(64, activation='relu', name='dense_2')
self.l3 = layers.Dense(10, activation='softmax', name='predictions')
def call(self, ip):
x = self.l1(ip)
x = self.l2(x)
return self.l3(x)
model = Prototype()
model.build(input_shape=(None, 784,))
optimizer = keras.optimizers.SGD(learning_rate=1e-3)
loss_fn = keras.losses.SparseCategoricalCrossentropy()
batch_size = 250
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(batch_size)
def train_one_epoch():
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
print(x_batch_train.shape)
with tf.GradientTape() as tape:
logits = model(x_batch_train) # Logits for this minibatch
loss_value = loss_fn(y_batch_train, logits)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
I run the train_one_epoch(), it trains for one epoch. Then I change batch size and consequently dataset object to give new chunk sizes, BUT when I run train_one_epoch() again, it keeps going with the old batch_size.
Proof:

Related

sklearn classification_report ValueError: Found input variables with inconsistent numbers of samples: [18, 576]

I'm working on a CNN classification problem. I used keras and a pre-trained model. Now I want to evaluate my model and need the precision, recall and f1-Score. When I use sklearn.metrics classification_report I get above error. I know where the numbers are coming from, first is the length of my test dataset in batches and second are the number of actual sampels (predictions) in there. However I don't know how to "convert" them.
See my code down below:
# load train_ds
train_ds = tf.keras.utils.image_dataset_from_directory(
directory ='/gdrive/My Drive/Flies_dt/224x224',
image_size = (224, 224),
validation_split = 0.40,
subset = "training",
seed = 123,
shuffle = True)
# load val_ds
val_ds = tf.keras.utils.image_dataset_from_directory(
directory ='/gdrive/My Drive/Flies_dt/224x224',
image_size = (224, 224),
validation_split = 0.40,
subset = "validation",
seed = 123,
shuffle = True)
# move some batches of val_ds to test_ds
test_ds = val_ds.take((1*len(val_ds)) // 2)
print('test_ds =', len(test_ds))
val_ds = val_ds.skip((1*len(val_ds)) // 2)
print('val_ds =', len(val_ds)) #test_ds = 18 val_ds = 18
# Load Model
base_model = keras.applications.vgg19.VGG19(
include_top=False,
weights='imagenet',
input_shape=(224,224,3)
)
# Freeze base_model
base_model.trainable = False
#
inputs = keras.Input(shape=(224,224,3))
x = data_augmentation(inputs) #apply data augmentation
# Preprocessing
x = tf.keras.applications.vgg19.preprocess_input(x)
# The base model contains batchnorm layers. We want to keep them in inference mode
# when we unfreeze the base model for fine-tuning, so we make sure that the
# base_model is running in inference mode here.
x = base_model(x, training=False)
x = keras.layers.GlobalAveragePooling2D()(x)
x = keras.layers.Dropout(0.2)(x) # Regularize with dropout
outputs = keras.layers.Dense(5, activation="softmax")(x)
model = keras.Model(inputs, outputs)
model.compile(
loss="sparse_categorical_crossentropy",
optimizer="Adam",
metrics=['acc']
)
model.fit(train_ds, epochs=8, validation_data=val_ds, callbacks=[tensorboard_callback])
# Unfreeze the base_model. Note that it keeps running in inference mode
# since we passed `training=False` when calling it. This means that
# the batchnorm layers will not update their batch statistics.
# This prevents the batchnorm layers from undoing all the training
# we've done so far.
base_model.trainable = True
model.summary()
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=0.000001), # Low learning rate
loss="sparse_categorical_crossentropy",
metrics=['acc']
)
model.fit(train_ds, epochs=5, validation_data=val_ds)
#Evaluate
from sklearn.metrics import classification_report
y_pred = model.predict(test_ds, batch_size=64, verbose=1)
y_pred_bool = np.argmax(y_pred, axis=1)
print(classification_report(test_ds, y_pred_bool))
I also tried something like this, but I'm not sure if this gives me the correct values for multiclass classification.
from keras import backend as K
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
# compile the model
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc',f1_m,precision_m, recall_m])
# fit the model
history = model.fit(Xtrain, ytrain, validation_split=0.3, epochs=10, verbose=0)
# evaluate the model
loss, accuracy, f1_score, precision, recall = model.evaluate(Xtest, ytest, verbose=0)
This is a lot, Sorry. Hope somebody can help.

Optimizers with moment in custom training loop

If I am creating a personal training loop from scratch, does the Adam optimizer will apply correctly?
I doubt this because the optimizer is applied manually at each batch. No lines tell the program to remember the previous gradients. I wonder whether in this case Adam degenerates to a simple one-step gradient descent method.
Here is the example codes adapted from https://www.tensorflow.org/guide/keras/writing_a_training_loop_from_scratch. I simply change their SGD optimizer to Adam
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
inputs = keras.Input(shape=(784,), name="digits")
x1 = layers.Dense(64, activation="relu")(inputs)
x2 = layers.Dense(64, activation="relu")(x1)
outputs = layers.Dense(10, name="predictions")(x2)
model = keras.Model(inputs=inputs, outputs=outputs)
# Instantiate an optimizer.
optimizer = keras.optimizers.Adam(learning_rate=1e-3)
# Instantiate a loss function.
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# Prepare the training dataset.
batch_size = 64
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = np.reshape(x_train, (-1, 784))
x_test = np.reshape(x_test, (-1, 784))
# Reserve 10,000 samples for validation.
x_val = x_train[-10000:]
y_val = y_train[-10000:]
x_train = x_train[:-10000]
y_train = y_train[:-10000]
# Prepare the training dataset.
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size)
# Prepare the validation dataset.
val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_dataset = val_dataset.batch(batch_size)
epochs = 2
for epoch in range(epochs):
print("\nStart of epoch %d" % (epoch,))
# Iterate over the batches of the dataset.
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
# Open a GradientTape to record the operations run
# during the forward pass, which enables auto-differentiation.
with tf.GradientTape() as tape:
# Run the forward pass of the layer.
# The operations that the layer applies
# to its inputs are going to be recorded
# on the GradientTape.
logits = model(x_batch_train, training=True) # Logits for this minibatch
# Compute the loss value for this minibatch.
loss_value = loss_fn(y_batch_train, logits)
# Use the gradient tape to automatically retrieve
# the gradients of the trainable variables with respect to the loss.
grads = tape.gradient(loss_value, model.trainable_weights)
# Run one step of gradient descent by updating
# the value of the variables to minimize the loss.
optimizer.apply_gradients(zip(grads, model.trainable_weights))

Why trainable_variables Do not Change after Training?

I went over
a basic example of tf2.0
containing very simple code
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import tensorflow as tf
import cProfile
# Fetch and format the mnist data
(mnist_images, mnist_labels), _ = tf.keras.datasets.mnist.load_data()
dataset = tf.data.Dataset.from_tensor_slices(
(tf.cast(mnist_images[...,tf.newaxis]/255, tf.float32),
tf.cast(mnist_labels,tf.int64)))
dataset = dataset.shuffle(1000).batch(32)
# Build the model
mnist_model = tf.keras.Sequential([
tf.keras.layers.Conv2D(16,[3,3], activation='relu',
input_shape=(None, None, 1)),
tf.keras.layers.Conv2D(16,[3,3], activation='relu'),
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(10)
])
for images,labels in dataset.take(1):
print("Logits: ", mnist_model(images[0:1]).numpy())
optimizer = tf.keras.optimizers.Adam()
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
loss_history = []
def train_step(model, images, labels):
with tf.GradientTape() as tape:
logits = model(images, training=True)
# Add asserts to check the shape of the output.
tf.debugging.assert_equal(logits.shape, (32, 10))
loss_value = loss_object(labels, logits)
loss_history.append(loss_value.numpy().mean())
grads = tape.gradient(loss_value, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
def train(epochs):
for epoch in range(epochs):
for (batch, (images, labels)) in enumerate(dataset):
train_step(mnist_model, images, labels)
print ('Epoch {} finished'.format(epoch))
I trained it and save trainable_variables before and after by the following
t0=mnist_model.trainable_variables
train(epochs = 3)
t1=mnist_model.trainable_variables
diff = tf.reduce_mean(tf.abs(t0[0] - t1[0]))
# whethere indexing [0] or [1] etc. gets the same outcome of diff
print(diff.numpy())
They are the same!!!
So am I checking somethere incorrect? If that is the case, how can I observe those updated variables correctly?
You aren't creating new arrays of variables, just 2 pointers on the same object
Try to do so
t0 = np.array(mnist_model.trainable_variables)

Why is tensorflow having a worse accuracy than keras in direct comparison?

I made a direct comparison between TensorFlow vs Keras with the same parameters and the same dataset (MNIST).
The strange thing is that Keras achieves 96% performance in 10 epochs, while TensorFlow achieves about 70% performance in 10 epochs. I have run this code many times in the same instance and this inconsistency always occurs.
Even setting 50 epochs for TensorFlow, the final performance reaches 90%.
Code:
import keras
from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# One hot encoding
from keras.utils import np_utils
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
# Changing the shape of input images and normalizing
x_train = x_train.reshape((60000, 784))
x_test = x_test.reshape((10000, 784))
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
# Creating the neural network
model = Sequential()
model.add(Dense(30, input_dim=784, kernel_initializer='normal', activation='relu'))
model.add(Dense(30, kernel_initializer='normal', activation='relu'))
model.add(Dense(10, kernel_initializer='normal', activation='softmax'))
# Optimizer
optimizer = keras.optimizers.Adam()
# Loss function
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['acc'])
# Training
model.fit(x_train, y_train, epochs=10, batch_size=200, validation_data=(x_test, y_test), verbose=1)
# Checking the final accuracy
accuracy_final = model.evaluate(x_test, y_test, verbose=0)
print('Model Accuracy: ', accuracy_final)
TensorFlow code: (x_train, x_test, y_train, y_test are the same as the input for the Keras code above)
import tensorflow as tf
# Epochs parameters
epochs = 10
batch_size = 200
# Neural network parameters
n_input = 784
n_hidden_1 = 30
n_hidden_2 = 30
n_classes = 10
# Placeholders x, y
x = tf.placeholder(tf.float32, [None, n_input])
y = tf.placeholder(tf.float32, [None, n_classes])
# Creating the first layer
w1 = tf.Variable(tf.random_normal([n_input, n_hidden_1]))
b1 = tf.Variable(tf.random_normal([n_hidden_1]))
layer_1 = tf.nn.relu(tf.add(tf.matmul(x,w1),b1))
# Creating the second layer
w2 = tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2]))
b2 = tf.Variable(tf.random_normal([n_hidden_2]))
layer_2 = tf.nn.relu(tf.add(tf.matmul(layer_1,w2),b2))
# Creating the output layer
w_out = tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
bias_out = tf.Variable(tf.random_normal([n_classes]))
output = tf.matmul(layer_2, w_out) + bias_out
# Loss function
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = output, labels = y))
# Optimizer
optimizer = tf.train.AdamOptimizer().minimize(cost)
# Making predictions
predictions = tf.equal(tf.argmax(output, 1), tf.argmax(y, 1))
# Accuracy
accuracy = tf.reduce_mean(tf.cast(predictions, tf.float32))
# Variables that will be used in the training cycle
train_size = x_train.shape[0]
total_batches = train_size / batch_size
# Initializing the variables
init = tf.global_variables_initializer()
# Opening the session
with tf.Session() as sess:
sess.run(init)
# Training cycle
for epoch in range(epochs):
# Loop through all batch iterations
for i in range(0, train_size, batch_size):
batch_x = x_train[i:i + batch_size]
batch_y = y_train[i:i + batch_size]
# Fit training
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y})
# Running accuracy (with test data) on each epoch
acc_val = sess.run(accuracy, feed_dict={x: x_test, y: y_test})
# Showing results after each epoch
print ("Epoch: ", "{}".format((epoch + 1)))
print ("Accuracy_val = ", "{:.3f}".format(acc_val))
print ("Training Completed!")
# Checking the final accuracy
checking = tf.equal(tf.argmax(output, 1), tf.argmax(y, 1))
accuracy_final = tf.reduce_mean(tf.cast(checking, tf.float32))
print ("Model Accuracy:", accuracy_final.eval({x: x_test, y: y_test}))
I'm running everything in the same instance. Can anyone explain this inconsistency?
I think it's the initialization that's the culprit. For example, one real difference is that you initialize bias in TF with random_normal which isn't the best practice, and in fact Keras defaults to initializing the bias to zero, which is the best practice. You don't override this, since you only set kernel_initializer, but not bias_initializer in your Keras code.
Furthermore, things are worse for the weight initializers. You are using RandomNormal for Keras, defined like so:
keras.initializers.RandomNormal(mean=0.0, stddev=0.05, seed=None)
But in TF you use tf.random.normal:
tf.random.normal(shape, mean=0.0, stddev=1.0, dtype=tf.dtypes.float32, seed=None, name=None)
I can tell you that using standard deviation of 0.05 is reasonable for initialization, but using 1.0 is not.
I suspect that if you changed these parameters, things would look better. But if they don't, I'd suggest dumping the TensorFlow graph for both models and just checking by hand to see the differences. The graphs are small enough in this case to double-check.
To some extent this highlights the difference in philosophy between Keras and TF. Keras tries hard to set good defaults for NN training that correspond to what is known to work. But TensorFlow is completely agnostic - you have to know those practices and explicitly code them in. The standard deviation thing is a stellar example: of course it should be 1 by default in a mathematical function, but 0.05 is a good value if you know it will be used to initialize an NN layer.
Answer originally provided by Dmitriy Genzel on Quora.

keras tensorboard: plot train and validation scalars in a same figure

So I am using tensorboard within keras. In tensorflow one could use two different summarywriters for train and validation scalars so that tensorboard could plot them in a same figure. Something like the figure in
TensorBoard - Plot training and validation losses on the same graph?
Is there a way to do this in keras?
Thanks.
To handle the validation logs with a separate writer, you can write a custom callback that wraps around the original TensorBoard methods.
import os
import tensorflow as tf
from keras.callbacks import TensorBoard
class TrainValTensorBoard(TensorBoard):
def __init__(self, log_dir='./logs', **kwargs):
# Make the original `TensorBoard` log to a subdirectory 'training'
training_log_dir = os.path.join(log_dir, 'training')
super(TrainValTensorBoard, self).__init__(training_log_dir, **kwargs)
# Log the validation metrics to a separate subdirectory
self.val_log_dir = os.path.join(log_dir, 'validation')
def set_model(self, model):
# Setup writer for validation metrics
self.val_writer = tf.summary.FileWriter(self.val_log_dir)
super(TrainValTensorBoard, self).set_model(model)
def on_epoch_end(self, epoch, logs=None):
# Pop the validation logs and handle them separately with
# `self.val_writer`. Also rename the keys so that they can
# be plotted on the same figure with the training metrics
logs = logs or {}
val_logs = {k.replace('val_', ''): v for k, v in logs.items() if k.startswith('val_')}
for name, value in val_logs.items():
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value.item()
summary_value.tag = name
self.val_writer.add_summary(summary, epoch)
self.val_writer.flush()
# Pass the remaining logs to `TensorBoard.on_epoch_end`
logs = {k: v for k, v in logs.items() if not k.startswith('val_')}
super(TrainValTensorBoard, self).on_epoch_end(epoch, logs)
def on_train_end(self, logs=None):
super(TrainValTensorBoard, self).on_train_end(logs)
self.val_writer.close()
In __init__, two subdirectories are set up for training and validation logs
In set_model, a writer self.val_writer is created for the validation logs
In on_epoch_end, the validation logs are separated from the training logs and written to file with self.val_writer
Using the MNIST dataset as an example:
from keras.models import Sequential
from keras.layers import Dense
from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
model = Sequential()
model.add(Dense(64, activation='relu', input_shape=(784,)))
model.add(Dense(10, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(x_train, y_train, epochs=10,
validation_data=(x_test, y_test),
callbacks=[TrainValTensorBoard(write_graph=False)])
You can then visualize the two curves on a same figure in TensorBoard.
EDIT: I've modified the class a bit so that it can be used with eager execution.
The biggest change is that I use tf.keras in the following code. It seems that the TensorBoard callback in standalone Keras does not support eager mode yet.
import os
import tensorflow as tf
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.python.eager import context
class TrainValTensorBoard(TensorBoard):
def __init__(self, log_dir='./logs', **kwargs):
self.val_log_dir = os.path.join(log_dir, 'validation')
training_log_dir = os.path.join(log_dir, 'training')
super(TrainValTensorBoard, self).__init__(training_log_dir, **kwargs)
def set_model(self, model):
if context.executing_eagerly():
self.val_writer = tf.contrib.summary.create_file_writer(self.val_log_dir)
else:
self.val_writer = tf.summary.FileWriter(self.val_log_dir)
super(TrainValTensorBoard, self).set_model(model)
def _write_custom_summaries(self, step, logs=None):
logs = logs or {}
val_logs = {k.replace('val_', ''): v for k, v in logs.items() if 'val_' in k}
if context.executing_eagerly():
with self.val_writer.as_default(), tf.contrib.summary.always_record_summaries():
for name, value in val_logs.items():
tf.contrib.summary.scalar(name, value.item(), step=step)
else:
for name, value in val_logs.items():
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = value.item()
summary_value.tag = name
self.val_writer.add_summary(summary, step)
self.val_writer.flush()
logs = {k: v for k, v in logs.items() if not 'val_' in k}
super(TrainValTensorBoard, self)._write_custom_summaries(step, logs)
def on_train_end(self, logs=None):
super(TrainValTensorBoard, self).on_train_end(logs)
self.val_writer.close()
The idea is the same --
Check the source code of TensorBoard callback
See what it does to set up the writer
Do the same thing in this custom callback
Again, you can use the MNIST data to test it,
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.train import AdamOptimizer
tf.enable_eager_execution()
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
y_train = y_train.astype(int)
y_test = y_test.astype(int)
model = Sequential()
model.add(Dense(64, activation='relu', input_shape=(784,)))
model.add(Dense(10, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy', optimizer=AdamOptimizer(), metrics=['accuracy'])
model.fit(x_train, y_train, epochs=10,
validation_data=(x_test, y_test),
callbacks=[TrainValTensorBoard(write_graph=False)])
If you are using TensorFlow 2.0, you now get this by default using the Keras TensorBoard callback. (When using TensorFlow with Keras, make sure you're using tensorflow.keras.)
See this tutorial :
https://www.tensorflow.org/tensorboard/scalars_and_keras