How can you save a keras model in 64bit format? - tensorflow

how can you save a keras model in 64bit format?
This is able to 'put tensorflow' in 64bit 'mode' for the current runtime. But I've found that even just saving the model & reloading it is sufficient to truncate the 64bit precision & change model outputs.
In particular I would like to save the model is such a way that it would load automatically in '64 bit mode', and obviously would never lose its precision.

What is the purpose of the requirements when you are using an optimizer larger than e-07?
[ Sample ]:
import os
from os.path import exists
import tensorflow as tf
import h5py
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
database_buffer = "F:\\models\\buffer\\" + os.path.basename(__file__).split('.')[0] + "\\TF_DataSets_01.h5"
database_buffer_dir = os.path.dirname(database_buffer)
checkpoint_path = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\TF_DataSets_01.h5"
checkpoint_dir = os.path.dirname(checkpoint_path)
loggings = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\loggings.log"
if not exists(checkpoint_dir) :
os.mkdir(checkpoint_dir)
print("Create directory: " + checkpoint_dir)
if not exists(database_buffer_dir) :
os.mkdir(database_buffer_dir)
print("Create directory: " + database_buffer_dir)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
DataSet
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.cifar10.load_data()
# Create hdf5 file
hdf5_file = h5py.File(database_buffer, mode='w')
# Train images
hdf5_file['x_train'] = train_images
hdf5_file['y_train'] = train_labels
# Test images
hdf5_file['x_test'] = test_images
hdf5_file['y_test'] = test_labels
hdf5_file.close()
# Visualize dataset train sample
hdf5_file = h5py.File(database_buffer, mode='r')
x_train = hdf5_file['x_train'][0: 10000]
x_test = hdf5_file['x_test'][0: 100]
y_train = hdf5_file['y_train'][0: 10000]
y_test = hdf5_file['y_test'][0: 100]
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Initialize
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=( 32, 32, 3 )),
tf.keras.layers.Normalization(mean=3., variance=2.),
tf.keras.layers.Normalization(mean=4., variance=6.),
tf.keras.layers.Conv2DTranspose(2, 3, activation='relu', padding="same", name="Conv2DTranspose_01"),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='valid'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(4 * 256),
tf.keras.layers.Reshape((4 * 256, 1)),
tf.keras.layers.LSTM(128, return_sequences=True, return_state=False),
tf.keras.layers.LSTM(128, name='LSTM256'),
tf.keras.layers.Dropout(0.2),
])
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(64, activation='relu', name='dense64'))
model.add(tf.keras.layers.Dense(7))
model.summary()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
optimizer = tf.keras.optimizers.Nadam( learning_rate=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, name='Nadam' )
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
lossfn = tf.keras.losses.MeanSquaredLogarithmicError(reduction=tf.keras.losses.Reduction.AUTO, name='mean_squared_logarithmic_error')
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model.compile(optimizer=optimizer, loss=lossfn, metrics=['accuracy'])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: FileWriter
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
if exists(checkpoint_path) :
model.load_weights(checkpoint_path)
print("model load: " + checkpoint_path)
input("Press Any Key!")
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = model.fit(x_train, y_train, epochs=1 ,validation_data=(x_train, y_train))
model.save_weights(checkpoint_path)
[ Output ]:

Related

TypeError: Singleton array cannot be considered a valid collection

Tried using k-cross validation from this link but with my own dataset and I got this error:
TypeError: Singleton array array(<BatchDataset element_spec=(TensorSpec(shape=(None, 180, 180, 3), dtype=tf.float32, name=None), TensorSpec(shape=(None,), dtype=tf.int32, name=None))>,
dtype=object) cannot be considered a valid collection.
Here is my code:
import numpy as np
import PIL
import tensorflow as tf
import os
from sklearn.model_selection import KFold
import numpy as np
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
import pathlib
num_folds = 10
acc_per_fold = []
loss_per_fold = []
tf.get_logger().setLevel('ERROR')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
dataset_path = "data"
fullPath = os.path.abspath("./" + dataset_path)
#data_dir = tf.keras.utils.get_file('photos', origin='file://'+dataset_path, extract=True)
data_dir = pathlib.Path(fullPath)
image_count = len(list(data_dir.glob('*/*.jpg')))+len(list(data_dir.glob('*/*.png')))
print(image_count)
#man = list(data_dir.glob('man/*'))
#im = PIL.Image.open(str(man[28]))
#im.show()
batch_size = 32
img_height = 180
img_width = 180
train_ds = tf.keras.utils.image_dataset_from_directory(
data_dir,
labels='inferred',
validation_split=0.2,
subset="training",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
val_ds = tf.keras.utils.image_dataset_from_directory(
data_dir,
labels='inferred',
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
class_names = train_ds.class_names
print(class_names)
normalization_layer = layers.Rescaling(1./255)
# Define the K-fold Cross Validator
kfold = KFold(n_splits=num_folds, shuffle=True, random_state=42)
fold_no = 1
for train, test in kfold.split(train_ds, val_ds):
# Define the model architecture
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(no_classes, activation='softmax'))
# Compile the model
model.compile(loss=loss_function,
optimizer=optimizer,
metrics=['accuracy'])
# Generate a print
print('------------------------------------------------------------------------')
print(f'Training for fold {fold_no} ...')
# Fit data to model
history = model.fit(inputs[train], targets[train],
batch_size=batch_size,
epochs=no_epochs,
verbose=verbosity)
# Generate generalization metrics
scores = model.evaluate(inputs[test], targets[test], verbose=0)
print(f'Score for fold {fold_no}: {model.metrics_names[0]} of {scores[0]}; {model.metrics_names[1]} of {scores[1]*100}%')
acc_per_fold.append(scores[1] * 100)
loss_per_fold.append(scores[0])
# Increase fold number
fold_no = fold_no + 1
# == Provide average scores ==
print('------------------------------------------------------------------------')
print('Score per fold')
for i in range(0, len(acc_per_fold)):
print('------------------------------------------------------------------------')
print(f'> Fold {i+1} - Loss: {loss_per_fold[i]} - Accuracy: {acc_per_fold[i]}%')
print('------------------------------------------------------------------------')
print('Average scores for all folds:')
print(f'> Accuracy: {np.mean(acc_per_fold)} (+- {np.std(acc_per_fold)})')
print(f'> Loss: {np.mean(loss_per_fold)}')
print('------------------------------------------------------------------------')

Model.predict tensorflow shape is read incorrectly

I started making a sequential network using tensorflow for food classification.
When I created the simplest model I faced a following issue: model.predict(images[99]) was giving me an issue :
Input 0 of layer "dense_2" is incompatible with the layer: expected axis -1 of input shape to have value 4096, but received input with shape (32, 64).
It happened even though
images[99].shape 99
images is a data, where every element of the list is an image with one channel.
images.shape (10099, 64, 64)
Model:
`
model = keras.Sequential([
keras.layers.Flatten(input_shape=(64,64)),
keras.layers.Dense(4096, activation=tf.nn.relu),
keras.layers.Dense(101, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',
loss = tf.keras.losses.MeanSquaredError(),
metrics = \['accuracy'\])
model.fit(images_tr, categories_tr, epochs=2)
it also looks absurd to me because when I try:
model.predict(np.zeros((64, 64))`
I get the same issue
Also when I do evaluation model.evaluate(images) it works perfectly fine.
I have tried to change version of tensorflow from 2.9.0 to 2.2.2, that didn't help.
that is because it selected one from the received value shape and the smallest that can be filled is 32, you can do something as this for creating a flexible layer the shape is by your conditions.
Sample: You may calculate the input shape for the target layer as in the sample.
import tensorflow as tf
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
start = 3
limit = 12291
delta = 3
# Create DATA
sample = tf.range( start, limit, delta )
sample = tf.cast( sample, dtype=tf.int64 ).numpy()
sample = tf.constant( [sample, sample], shape=( 2, 4096, 1 ) )
label = tf.constant([[0.2, 0.8, 0.8], [0.0, 0.0, 0.8]], dtype=tf.float32)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Class / Functions
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class MyDenseLayer(tf.keras.layers.Layer):
def __init__(self, num_outputs):
super(MyDenseLayer, self).__init__()
self.num_outputs = num_outputs
def build(self, input_shape):
self.kernel = self.add_weight("kernel",
shape=[int(input_shape[-1]),
self.num_outputs]) # (4096, 1)
def call(self, inputs):
temp = tf.matmul(inputs, self.kernel)
return temp
input_layer = tf.keras.layers.InputLayer(input_shape=( int(sample.shape[-2] / 64), 64, 1 ))
layer_01 = MyDenseLayer(3)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Initialize
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model = tf.keras.models.Sequential([
input_layer,
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(36, activation='relu'),
layer_01,
])
model.summary()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: DataSet
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
dataset = tf.data.Dataset.from_tensor_slices((tf.constant(tf.cast(sample, dtype=tf.int64), shape=(2, 1, 64, 64), dtype=tf.int64),tf.constant(label, shape=(2, 3, 1), dtype=tf.float32)))
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
optimizer = tf.keras.optimizers.Nadam(
learning_rate=0.00001, beta_1=0.9, beta_2=0.999, epsilon=1e-07,
name='Nadam'
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
lossfn = tf.keras.losses.BinaryCrossentropy(
from_logits=False,
label_smoothing=0.0,
axis=-1,
reduction=tf.keras.losses.Reduction.AUTO,
name='binary_crossentropy'
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model.compile(optimizer=optimizer, loss=lossfn, metrics=['accuracy'])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = model.fit( dataset, batch_size=10, epochs=5 )
predictions = model.predict(tf.constant(sample[1,:,:], shape=(1, int(sample.shape[-2] / 64), 64, 1)))
print( predictions )
Output: 3 dots controls rotor communication wireless.
Epoch 1/10000
2/2 [==============================] - 1s 4ms/step - loss: 10.8326 - accuracy: 0.0000e+00
Epoch 2/10000
2/2 [==============================] - 0s 5ms/step - loss: 10.8326 - accuracy: 0.0000e+00
[[ 0.0, 1.0, 0.8 ]]
Output: Application for motors rotors or communication wireless.

Reshape the input for BatchDataset trained model

I trained my tensorflow model on images after convert it to BatchDataset
IMG_size = 224
INPUT_SHAPE = [None, IMG_size, IMG_size, 3] # 4D input
model.fit(x=train_data,
epochs=EPOCHES,
validation_data=test_data,
validation_freq=1, # check validation metrics every epoch
callbacks=[tensorboard, early_stopping])
model.compile(
loss=tf.keras.losses.CategoricalCrossentropy(),
optimizer=tf.keras.optimizers.Adam(),
metrics=["accuracy"]
)
model.build(INPUT_SHAPE)
the train_data type is:
tensorflow.python.data.ops.dataset_ops.BatchDataset.
I want to run my model on a single numpy array or tensor constant, but it will be 3D input matrix not 4D as the input TensorShape([224, 224, 3]); how can i reshape it?
You can expand the dimensions of your image matrix by using this code:
newImage = tf.expand_dims(Original_Image, axis = 0)
then pass it to the predict function, it will work fine.
target sizes make all input into the same shape.
It is helpful with input shape or you can use the image function to expand the dimension. img_array = tf.expand_dims(image, 0) # Create a batch
Talking about your input INPUT_SHAPE = [None, IMG_size, IMG_size, 3] # 4D input you can arrange those input images by image training dataset and feeds into the model.
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
BATCH_SIZE = 16
IMG_SIZE = (160, 160)
PATH = 'F:\\datasets\\downloads\\sample\\cats_dogs\\training'
training_directory = os.path.join(PATH, 'train')
validation_directory = os.path.join(PATH, 'validation')
train_dataset = tf.keras.utils.image_dataset_from_directory(training_directory,
shuffle=True,
batch_size=BATCH_SIZE,
image_size=IMG_SIZE,
seed=42)
validation_dataset = tf.keras.utils.image_dataset_from_directory(validation_directory,
shuffle=True,
batch_size=BATCH_SIZE,
image_size=IMG_SIZE,
seed=42)
class_names = train_dataset.class_names
print( "class_names: " + str( class_names ) )
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
DataSet
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
AUTOTUNE = tf.data.experimental.AUTOTUNE
train_dataset = train_dataset.prefetch(buffer_size=AUTOTUNE)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Model ( examine input layer )
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
inputs = tf.keras.Input(shape=(160, 160, 3))
model = tf.keras.Model(inputs, outputs)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = model.fit(train_dataset, epochs=initial_epochs, validation_data=validation_dataset)
...

HIGH Resolution Image and Colab PRO Crash

I have 1700 images of 1000*1000 Image height and Width. There are minor details in it, so I prefer to keep this size. Now, my google colab pro crashes. Please Help.
'''
##title IMAGE TO DATA, NORMALIZATION AND AUGMENTATION
#Directories with Subdirectories as Classes for training and validation datasets
%%capture
train_dir = '/content/Dataset/Training'
validation_dir = '/content/Dataset/Validation'
# Set batch size and Image Height and Width
batch_size = 32
IMG_HEIGHT, IMG_WIDTH = (1000,1000)
#Image to Data Transform using ImageDataGenerator of Keras
#Image to Data for Training Data
Dataset_Image_Training = ImageDataGenerator(rescale = 1./255, zoom_range=[0.8, 1.5], brightness_range= [0.8, 2.0])
train_data_gen = Dataset_Image_Training.flow_from_directory(
batch_size= batch_size,
directory=train_dir,
shuffle=True,
target_size=(IMG_HEIGHT,IMG_WIDTH),
class_mode='binary')
#Image to Data for Validation Data
validation_image_generator = ImageDataGenerator(rescale=1./255, zoom_range=[0.8, 1.5], brightness_range= [0.8, 2.0])
val_data_gen = validation_image_generator.flow_from_directory(
batch_size=batch_size,
directory= validation_dir,
shuffle=True,
target_size=(IMG_HEIGHT,IMG_WIDTH),
class_mode= 'binary')
#Check Classes in Dataset
train_data_gen.class_indices
##title Deep Learning CNN Model with Keras Seqential with **Dropout**
#%%capture
model = Sequential([
Conv2D(32, (3,3), padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),
MaxPool2D(2,2),
Dropout(0.5),
Conv2D(64, (3,3), padding='same', activation='relu'),
MaxPool2D(2,2),
Dropout(0.5),
Conv2D(128, (3,3), padding='same', activation='relu'),
MaxPool2D(2,2),
Dropout(0.5),
Conv2D(256, (3,3), padding='same', activation='relu'),
MaxPool2D(2,2),
Dropout(0.5),
Flatten(),
Dense(512, activation='relu'),
Dropout(0.5),
Dense(1, activation='sigmoid')])
# Model Compilation
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
#Tensorboard Set up
import tensorflow as tf
import datetime
log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
#Checkpoint and earlystop setting
filepath = '/content/drive/My Drive/DL_Model.hdf5'
checkpoint = [tf.keras.callbacks.ModelCheckpoint(filepath, monitor='val_accuracy', mode='max', save_best_only=True, Save_weights_only = False, verbose = 1),
tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience = 15, verbose =1), [tensorboard_callback]]
#Model Fitting
hist = model.fit(
train_data_gen,
steps_per_epoch=None,
epochs=500,
validation_data=val_data_gen,
validation_steps=None,
callbacks = [checkpoint]
)
#Accuracy Print
train_acc = max(hist.history['accuracy'])
val_acc = max(hist.history['val_accuracy'])
train_loss = min(hist.history['loss'])
val_loss = min(hist.history['val_loss'])
print('Training accuracy is')
print(train_acc)
print('Validation accuracy is')
print(val_acc)
print('Training loss is')
print(train_loss)
print('Validation loss is')
print(val_loss)
#Load Tensorboard
%load_ext tensorboard
%tensorboard --logdir logs
'''

Keras model failed to learn anything after changing to use tf.data api

I was trying to convert a simple Keras model to use tf.data api for data loading, but somehow the accuracy remains about 10% during the whole 10 epochs.
In comparison, the original code without using tf.data api can easily achieve about 98% accuracy. Did I do anything wrong?
The version using tf.data api
import math
import tensorflow as tf
import numpy as np
batch_size = 32
def load_data():
mnist = tf.keras.datasets.mnist
(train_data, train_label), (validation_data, validation_label) = mnist.load_data()
train_data, validation_data = train_data / 255.0, validation_data / 255.0
train_label = train_label.astype(np.float32)
return train_data, train_label
def build_model():
class MyModel(tf.keras.Model):
def __init__(self):
super(MyModel, self).__init__(name='my_model')
self.flatten = tf.keras.layers.Flatten()
self.dense_1 = tf.keras.layers.Dense(512, activation=tf.nn.relu)
self.dropout = tf.keras.layers.Dropout(0.2)
self.dense_2 = tf.keras.layers.Dense(10, activation=tf.nn.softmax)
def call(self, inputs):
x = self.flatten(inputs)
x = self.dense_1(x)
x = self.dropout(x)
y = self.dense_2(x)
return y
model = MyModel()
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
train_data, train_label = load_data()
train_sample_count = len(train_data)
train_dataset = tf.data.Dataset.from_tensor_slices((train_data, train_label))
train_dataset = train_dataset.batch(batch_size)
train_dataset = train_dataset.repeat()
model = build_model()
model.fit(
train_dataset,
epochs=10,
steps_per_epoch=math.ceil(train_sample_count/batch_size)
)
The version without using tf.data api
# load_data and build_model are exactly same as those in the tf.data api version
train_data, train_label = load_data()
model = build_model()
model.fit(
train_data,
train_label,
epochs=10
)