I just implemented a LSTM,
but I'm not sure if I interpreted the structure right.
is in this context testPredict = model.predict(Xtest) the last value of the sequence, and therefore ultimately (after reversing the MinMaxscaler) the variable last_value = (testPredict[-1]) the prediction regarding the future?
from IPython.core.debugger import set_trace
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import time
import yfinance as yf
import sklearn
from sklearn.preprocessing import MinMaxScaler
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import LSTM, Dense, Dropout, Flatten
from sklearn.metrics import mean_squared_error
from keras.layers import ConvLSTM2D
from keras.layers import Bidirectional
from keras.models import model_from_json
df = yf.download(tickers="BTC-USD", period="20wk", interval="60m")
df = df[["Close"]]
df["returns"] = df.Close.pct_change()
df["log_returns"] = np.log(1 + df["returns"])
df.dropna(inplace=True)
X = df[["Close", "log_returns"]].values
scaler = MinMaxScaler(feature_range=(0, 1)).fit(X)
X_scaled = scaler.transform(X)
y = [x[0] for x in X_scaled]
split = int(len(X_scaled) * 0.8)
X_train = X_scaled[:split]
X_test = X_scaled[split : len(X_scaled)]
y_train = y[:split]
y_test = y[split : len(y)]
assert len(X_train) == len(y_train)
assert len(X_test) == len(y_test)
n = 24 #analyze the last 24 prices
Xtrain = []
ytrain = []
Xtest = []
ytest = []
for i in range(n, len(X_train)):
Xtrain.append(X_train[i - n : i, : X_train.shape[1]])
ytrain.append(y_train[i])
for i in range(n, len(X_test)):
Xtest.append(X_test[i - n : i, : X_test.shape[1]])
ytest.append(y_test[i])
val = np.array(ytrain[0])
val = np.c_[val, np.zeros(val.shape)]
scaler.inverse_transform(val)
Xtrain, ytrain = (np.array(Xtrain), np.array(ytrain))
Xtrain = np.reshape(Xtrain, (Xtrain.shape[0], Xtrain.shape[1], Xtrain.shape[2]))
Xtest, ytest = (np.array(Xtest), np.array(ytest))
Xtest = np.reshape(Xtest, (Xtest.shape[0], Xtest.shape[1], Xtest.shape[2]))
model = Sequential()
model.add(LSTM(8, return_sequences=True, input_shape=(Xtrain.shape[1], Xtrain.shape[2])))
#model.add(Bidirectional(LSTM(8, return_sequences=True, input_shape=(Xtrain.shape[1], Xtrain.shape[2]))))
model.add(LSTM(4))
model.add(Dropout(0.2))
model.add(Dense(1))
model.compile(loss="mean_squared_error", optimizer="adam")
model.fit(Xtrain, ytrain, epochs=100, validation_data=(Xtest, ytest), batch_size=16, verbose=1)
trainPredict = model.predict(Xtrain)
testPredict = model.predict(Xtest)
trainPredict = np.c_[trainPredict, np.zeros(trainPredict.shape)]
testPredict = np.c_[testPredict, np.zeros(testPredict.shape)]
trainPredict = scaler.inverse_transform(trainPredict)
trainPredict = [x[0] for x in trainPredict]
testPredict = scaler.inverse_transform(testPredict)
testPredict = [x[0] for x in testPredict]
trainScore = mean_squared_error([x[0][0] for x in Xtrain], trainPredict, squared=False)
#print("Train Score: %.2f RMSE" % (trainScore))
testScore = mean_squared_error([x[0][0] for x in Xtest], testPredict, squared=False)
#print("Test Score: %.2f RMSE" % (testScore))
########################################################################################################################
last_value = (testPredict[-1])
I will show you a picture result for understanding that does not change the truth that the label and value match with the criteria but reversed order to verify the result. Sufficient data is required for the working model.
Selecting random pictures from dataset ~ 6-7 per actor in reversed order verifies that the model is working when predicting the new income of the input that is all.
Symmetric shape could tell you action to respond but it is not the future prediction, in his case to have the prediction result he selects the most matching with scores or softmax but without softmax, he also can use np.argmax working with the sequences output ( you need to see the model output )
Predicting real-time data you need to input the variables and historical as you create some report, the model learns the change of the values within scopes !
[ Sample ]: Example by prediction target you visually see that learning network can do it correctly in both-way and you can use of the networks with new input reversed order is not good examines only you don't have any data.
import os
from os.path import exists
import tensorflow as tf
import tensorflow_io as tfio
import matplotlib.pyplot as plt
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
None
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
config = tf.config.experimental.set_memory_growth(physical_devices[0], True)
print(physical_devices)
print(config)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
PATH = os.path.join('F:\\datasets\\downloads\\Actors\\train\\Pikaploy', '*.tif')
PATH_2 = os.path.join('F:\\datasets\\downloads\\Actors\\train\\Candidt Kibt', '*.tif')
files = tf.data.Dataset.list_files(PATH)
files_2 = tf.data.Dataset.list_files(PATH_2)
list_file = []
list_file_actual = []
list_label = []
list_label_actual = [ 'Pikaploy', 'Pikaploy', 'Pikaploy', 'Pikaploy', 'Pikaploy', 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt' ]
for file in files.take(5):
image = tf.io.read_file( file )
image = tfio.experimental.image.decode_tiff(image, index=0)
list_file_actual.append(image)
image = tf.image.resize(image, [32,32], method='nearest')
list_file.append(image)
list_label.append(1)
for file in files_2.take(5):
image = tf.io.read_file( file )
image = tfio.experimental.image.decode_tiff(image, index=0)
list_file_actual.append(image)
image = tf.image.resize(image, [32,32], method='nearest')
list_file.append(image)
list_label.append(9)
checkpoint_path = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\TF_DataSets_01.h5"
checkpoint_dir = os.path.dirname(checkpoint_path)
loggings = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\loggings.log"
if not exists(checkpoint_dir) :
os.mkdir(checkpoint_dir)
print("Create directory: " + checkpoint_dir)
log_dir = checkpoint_dir
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
DataSet
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
dataset = tf.data.Dataset.from_tensor_slices((tf.constant(tf.cast(list_file, dtype=tf.int64), shape=(10, 1, 32, 32, 4), dtype=tf.int64),
tf.constant(list_label, shape=(10, 1, 1), dtype=tf.int64)))
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Initialize
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=( 32, 32, 4 )),
tf.keras.layers.Normalization(mean=3., variance=2.),
tf.keras.layers.Normalization(mean=4., variance=6.),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Reshape((128, 225)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96, return_sequences=True, return_state=False)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(192, activation='relu'),
tf.keras.layers.Dense(10),
])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
optimizer = tf.keras.optimizers.Nadam(
learning_rate=0.00001, beta_1=0.9, beta_2=0.999, epsilon=1e-07,
name='Nadam'
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
lossfn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=False,
reduction=tf.keras.losses.Reduction.AUTO,
name='sparse_categorical_crossentropy'
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model.compile(optimizer=optimizer, loss=lossfn, metrics=['accuracy'])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: FileWriter
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
if exists(checkpoint_path) :
model.load_weights(checkpoint_path)
print("model load: " + checkpoint_path)
input("Press Any Key!")
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = model.fit( dataset, batch_size=100, epochs=50 )
plt.figure(figsize=(5,2))
plt.title("Actors recognitions")
for i in range(len(list_file)):
img = tf.keras.preprocessing.image.array_to_img(
list_file[i],
data_format=None,
scale=True
)
img_array = tf.keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0)
predictions = model.predict(img_array)
predictions = predictions[-1:]
score = tf.nn.softmax(predictions[0])
plt.subplot(5, 2, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(list_file_actual[i])
plt.xlabel(str(round(score[tf.math.argmax(score).numpy()].numpy(), 2)) + ":" + str(list_label_actual[tf.math.argmax(score)]))
plt.show()
input('...')
Related
I am writing a character recognizing CNN. I have used
EMNIST Dataset.
Kaggle Notebook link : https://www.kaggle.com/code/notshrirang/ocr-with-cnn
GitHub Notebook link : https://github.com/NotShrirang/MyOCR
My model does pretty good on testing dataset. But when I use the image I captured with my phone, it never predict correctly.
What to do? Please help.
Here is my code snippet for model architecture:
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(kernel_size=(8, 8),filters=128, input_shape=(28, 28, 1), activation="relu"),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.AveragePooling2D(pool_size=(2, 2)),
tf.keras.layers.Conv2D(kernel_size=(4, 4), filters=64, activation="relu"),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.AveragePooling2D(pool_size=(2, 2)),
tf.keras.layers.Dense(32, activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(27, activation='softmax')
])
model.compile(
optimizer=tf.keras.optimizers.SGD(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['accuracy']
)
model.fit(X_train, y_train, epochs=8, validation_split = 0.2)
Output While training model:
Loss vs Val Loss Plot:
Evaluation:
evaluation = new_model.evaluate(X_test, y_test)
evaluation
I resized the images to (28x28).
Also, all images in training, testing and real data are converted to grayscale.
The dataset has all images with their matrix transposed. I have straightened them.
I tried normalizing the data, but it decreased the val_accuracy so I stopped normalizing.
I tried shuffling the data. It increased the val_accuracy so I kept it.
I tried increasing and decreasing layers and epoch in model. It was of no use except change in training time.
I added batch normalization. I increased time required to train model.
there are many techniques not only the model
Increase the size of data, which also creates significant inputs.
Use features extraction functions and preprocessing of data, MFCC, Furriers, and Data input transforming ( blur, rotates, flipped, paddings, zooms, or random noises ).
Create multiple sets of data inputs and training K-folded validation.
Random data selection, saved and load model or performance callbacks parameter adjusting.
Compares between models or model concatenated.
Sample: My image recognitions templates, useful when performing functions and conversations for solutions.
import os
from os.path import exists
import tensorflow as tf
import tensorflow_io as tfio
import pandas as pd
import matplotlib.pyplot as plt
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
None
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
config = tf.config.experimental.set_memory_growth(physical_devices[0], True)
print(physical_devices)
print(config)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
variables = pd.read_excel('F:\\temp\\Python\\excel\\Book 13 (2) (3).xlsx', index_col=None, header=[0])
list_label = [ ]
list_Image = [ ]
list_file_actual = [ ]
list_label_actual = [ 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt', 'Pikaploy', 'Pikaploy', 'Pikaploy', 'Pikaploy', 'Pikaploy' ]
for Index, Image, Label in variables.values:
print( Label )
# list_label.append( Label )
image = tf.io.read_file( Image )
image = tf.io.decode_image(image)
list_file_actual.append(image)
image = tf.image.resize(image, [32,32], method='nearest')
list_Image.append(image)
if Label == 0:
list_label.append(0)
else:
list_label.append(9)
# if Label == 0:
# list_label_actual.append('Candidt Kibt')
# else:
# list_label_actual.append('Pikaploy')
list_label = tf.cast( list_label, dtype=tf.int32 )
list_label = tf.constant( list_label, shape=( 54, 1, 1 ) )
list_Image = tf.cast( list_Image, dtype=tf.int32 )
list_Image = tf.constant( list_Image, shape=( 54, 1, 32, 32, 3 ) )
# print( list_label_actual )
# print( list_label )
checkpoint_path = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\TF_DataSets_01.h5"
checkpoint_dir = os.path.dirname(checkpoint_path)
loggings = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\loggings.log"
if not exists(checkpoint_dir) :
os.mkdir(checkpoint_dir)
print("Create directory: " + checkpoint_dir)
log_dir = checkpoint_dir
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: DataSet
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
dataset = tf.data.Dataset.from_tensor_slices(( list_Image, list_label ))
list_Image = tf.constant( list_Image, shape=( 54, 32, 32, 3) ).numpy()
print( "===========================================" )
print( "type of variables: " )
print( type(variables) )
print( variables )
print( "variables.values: " )
print( variables.values )
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Initialize
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=( 32, 32, 3 )),
tf.keras.layers.Normalization(mean=3., variance=2.),
tf.keras.layers.Normalization(mean=4., variance=6.),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Reshape((512, 225)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96, return_sequences=True, return_state=False)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(192, activation='relu'),
tf.keras.layers.Dense(10),
])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: FileWriter
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
if exists(checkpoint_path) :
model.load_weights(checkpoint_path)
print("model load: " + checkpoint_path)
input("Press Any Key!")
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Callback
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class custom_callback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if( logs['accuracy'] >= 0.97 ):
self.model.stop_training = True
custom_callback = custom_callback()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
optimizer = tf.keras.optimizers.Nadam(
learning_rate=0.000001, beta_1=0.9, beta_2=0.999, epsilon=1e-07,
name='Nadam'
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
lossfn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=False,
reduction=tf.keras.losses.Reduction.AUTO,
name='sparse_categorical_crossentropy'
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model.compile(optimizer=optimizer, loss=lossfn, metrics=['accuracy'] )
model.save_weights(checkpoint_path)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = model.fit( dataset, batch_size=120, epochs=10000, callbacks=[custom_callback] )
plt.figure(figsize=(6, 6))
plt.title("Actors recognitions")
for i in range(30):
img = tf.keras.preprocessing.image.array_to_img(
list_Image[i],
data_format=None,
scale=True
)
img_array = tf.keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0)
predictions = model.predict(img_array)
score = tf.nn.softmax(predictions[0])
plt.subplot(6, 6, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(list_file_actual[i])
plt.xlabel(str(round(score[tf.math.argmax(score).numpy()].numpy(), 2)) + ":" + str(list_label_actual[tf.math.argmax(score)]))
plt.show()
input('...')
Output: They are actually Thailand actors, broadcasts collected from the Internet.
I'm facing issue with tensorboard loading in Google Colab. I tried to uninstall and then install it again but no sucess. i'm sharing the code and the error.
!pip install tensorboard
%load_ext tensorboard
log_folder = 'log1'
callbacks = TensorBoard(log_dir= log_folder, histogram_freq= 1)
model.fit(train_X, train_y, validation_data = (test_X, test_y),callbacks= callbacks,verbose= 0, epochs = 20)
%tensorboard --logdir = '/content/log1' I tried withour quotes as well i.e /content/log1
enter image description here
I tried to load tensorboard and tried Uninstall and then reinstall
"=" between --logdir and <PATH> appears to be the problem here. Use something like --logdir log1
try to create access to the target directory that is required for the logging and accumulating functions. Oneway to save CPU memory is to use disk space and that is why summarized functions are working after it updates.
Sample: Callback function with updates frequency and the icon examples.
import os
from os.path import exists
import tensorflow as tf
import tensorflow_io as tfio
from datetime import datetime
import matplotlib.pyplot as plt
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
PATH = os.path.join('F:\\datasets\\downloads\\Actors\\train\\Pikaploy', '*.tif')
PATH_2 = os.path.join('F:\\datasets\\downloads\\Actors\\train\\Candidt Kibt', '*.tif')
files = tf.data.Dataset.list_files(PATH)
files_2 = tf.data.Dataset.list_files(PATH_2)
list_file = []
list_file_actual = []
list_label = []
list_label_actual = [ 'Pikaploy', 'Pikaploy', 'Pikaploy', 'Pikaploy', 'Pikaploy', 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt' ]
list_image_greyscales = []
for file in files.take(5):
image = tf.io.read_file( file )
image = tfio.experimental.image.decode_tiff(image, index=0)
list_file_actual.append(image)
image = tf.image.resize(image, [32,32], method='nearest')
list_file.append(image)
list_image_greyscales.append(tf.image.rgb_to_grayscale(image[:,:,0:3]))
list_label.append(1)
for file in files_2.take(5):
image = tf.io.read_file( file )
image = tfio.experimental.image.decode_tiff(image, index=0)
list_file_actual.append(image)
image = tf.image.resize(image, [32,32], method='nearest')
list_file.append(image)
list_image_greyscales.append(tf.image.rgb_to_grayscale(image[:,:,0:3]))
list_label.append(9)
checkpoint_path = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\TF_DataSets_01.h5"
log_dir = os.path.dirname(checkpoint_path)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
DataSet
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
dataset = tf.data.Dataset.from_tensor_slices((tf.constant(tf.cast(list_file, dtype=tf.int64), shape=(10, 1, 32, 32, 4), dtype=tf.int64),tf.constant(list_label, shape=(10, 1, 1), dtype=tf.int64)))
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Callback
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
tb_callback = tf.keras.callbacks.TensorBoard(log_dir, update_freq=1, histogram_freq=1)
class custom_callback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if( logs['accuracy'] >= 0.95 ):
self.model.stop_training = True
custom_callback = custom_callback()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Initialize
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=( 32, 32, 4 )),
tf.keras.layers.Normalization(mean=3., variance=2.),
tf.keras.layers.Normalization(mean=4., variance=6.),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Reshape((128, 225)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96, return_sequences=True, return_state=False)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(192, activation='relu'),
tf.keras.layers.Dense(10),
])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
optimizer = tf.keras.optimizers.Nadam(
learning_rate=0.00001, beta_1=0.9, beta_2=0.999, epsilon=1e-07,
name='Nadam'
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
lossfn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=False,
reduction=tf.keras.losses.Reduction.AUTO,
name='sparse_categorical_crossentropy'
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model.compile(optimizer=optimizer, loss=lossfn, metrics=['accuracy'])
# Creates a file writer for the log directory.
file_writer = tf.summary.create_file_writer(log_dir + "\\" + datetime.now().strftime("%Y%m%d-%H%M%S") )
# Using the file writer, log the reshaped image.
with file_writer.as_default():
for i in range(10):
tf.summary.image("Training data", tf.constant( list_image_greyscales[i], shape=(1,32,32,1) ), step=i)
tf.summary.scalar("Training data label", data=float(i), step=i)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = model.fit( dataset, batch_size=100, epochs=10, callbacks=[tb_callback, custom_callback] )
model.save_weights(checkpoint_path)
plt.figure(figsize=(5,2))
plt.title("Actors recognitions")
for i in range(len(list_file)):
img = tf.keras.preprocessing.image.array_to_img(
list_file[i],
data_format=None,
scale=True
)
img_array = tf.keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0)
predictions = model.predict(img_array)
score = tf.nn.softmax(predictions[0])
plt.subplot(5, 2, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(list_file_actual[i])
plt.xlabel(str(round(score[tf.math.argmax(score).numpy()].numpy(), 2)) + ":" + str(list_label_actual[tf.math.argmax(score)]))
plt.show()
input('...')
### tensorboard --logdir="F:\models\checkpoint\test_tf_tensorboard_2\\"
Output: Tensorboard training and remarkable data.
I fixed the error. Assignment operator was creating the problem, when I deleted the = it worked
%tensorboard --logdir '/content/log1'
When I am using custom loss function with batch gradient descent. I am getting error give in photos below after first epoch.
The code runs fine with binaryCrossEntropy.
I am getting the error below:
optimizer.apply_gradients(zip(grads, model_2.trainable_weights))
No gradients provided for any variable: (['dense_22/kernel:0', 'dense_22/bias:0', 'dense_23/kernel:0', 'dense_23/bias:0', 'dense_24/kernel:0', 'dense_24/bias:0'],).
The code:
# importing necessary libraries and functions
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.layers import InputLayer, GlobalAveragePooling2D, Dense, Dropout
from tensorflow.keras.applications.densenet import DenseNet121, preprocess_input
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.metrics import Mean, CategoricalAccuracy
import matplotlib.pyplot as plt
import keras.backend as K
import pandas as pd
import tensorflow_datasets as tfds
from collections import deque
from sklearn.model_selection import train_test_split #train test split
from sklearn.model_selection import StratifiedKFold #Stratifying the data (for test train split)
from sklearn.preprocessing import MinMaxScaler #data normalization with sklearn
import matplotlib.pyplot as plt
import math
lambda_par = tf.Variable(0.5)
def fairnessLoss(y_true,y_pred):
print("HI",y_true,y_pred)
cse_min = cse_maj = tf.Variable(0.0)
n_min = n_maj = tf.Variable(0.0)
print(y_pred.shape[0])
for i in range(y_pred.shape[0]):
print(i)
if(y_true[i][0]==1):
cse_min.assign_add(tf.math.log(y_pred[i][0]))
n_min.assign_add(1.0)
else:
cse_maj.assign_add(tf.math.log(1-y_pred[i][0]))
n_maj.assign_add(1.0)
print("First step")
tem1 = tf.divide(cse_min,n_min)
tem2 = tf.divide(cse_maj,n_maj)
fe = tf.Variable(tem1)
fe.assign_add(-tem2)
fe = tf.math.multiply(fe,fe)
ans = tf.Variable(0.0)
ans.assign_add(cse_min)
ans.assign_add(cse_maj)
ans.assign_add(tf.math.multiply(lambda_par,fe))
return ans
model = tf.keras.Sequential([
tf.keras.layers.Dense(8, activation=tf.keras.activations.sigmoid), # hidden layer 1, ReLU activation
tf.keras.layers.Dense(8, activation=tf.keras.activations.sigmoid),
tf.keras.layers.Dense(1, activation=tf.keras.activations.sigmoid)
])
batch_size=len(train_X)
train_yy = []
for i in range(len(train_y)):
train_yy.append([train_y[i]])
train_dataset = tf.data.Dataset.from_tensor_slices((train_X, train_yy))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size)
# # Prepare the validation dataset.
# val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
# val_dataset = val_dataset.batch(batch_size)
train_acc_metric = keras.metrics.BinaryAccuracy()
val_acc_metric = keras.metrics.BinaryAccuracy()
epochs = 500
# Instantiate an optimizer to train the model.
optimizer = keras.optimizers.Adam()
# Instantiate a loss function.
loss_fn = keras.losses.BinaryCrossentropy(from_logits=True)
# storing variables to plot loss and accuracy
losses = []
accuracy = []
for epoch in range(epochs):
print("\nStart of epoch %d" % (epoch,))
epoch_loss_avg = Mean()
# Iterate over the batches of the dataset.
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):[
# Open a GradientTape to record the operations run
# during the forward pass, which enables auto-differentiation.
with tf.GradientTape() as tape:
# Run the forward pass of the layer.
# The operations that the layer applies
# to its inputs are going to be recorded
# on the GradientTape.
logits = model(x_batch_train, training=True) # Logits for this minibatch
# Compute the loss value for this minibatch.
loss_value = fairnessLoss(y_batch_train, logits)
# Use the gradient tape to automatically retrieve
# the gradients of the trainable variables with respect to the loss.
grads = tape.gradient(loss_value, model.trainable_weights)
# Run one step of gradient descent by updating
# the value of the variables to minimize the loss.
optimizer.apply_gradients(zip(grads, model.trainable_weights))
epoch_loss_avg.update_state(loss_value)
train_acc_metric.update_state(y_batch_train, logits)
losses.append(epoch_loss_avg.result())
accuracy.append(train_acc_metric.result())
# Log every 200 batches.
if step % 200 == 0:
print(
"Training loss (for one batch) at step %d: %.4f"
% (step, float(loss_value))
)
print("Seen so far: %s samples" % ((step + 1) * batch_size))
print(train_acc_metric.result())
train_acc_metric.reset_states()
Photo of the error-1
Photo of the error-2
Loads and Optimizers are dual parallel in the statistics, accelerate to hear the Optimizers or vary their rates to see the true.
Sample: Gradient Tape when applying value to tf.variables, loss functions is what change or apply, and measurement from you provided logics but optimizers are how you achieved it or setting to goals.
Dataset: Image categories problem, image, and labels for categories.
Index Image Label
1 F:\datasets\downloads\Actors\train\Candidt Kibt\01.tif 0
2 F:\datasets\downloads\Actors\train\Candidt Kibt\02.tif 0
19 F:\datasets\downloads\Actors\train\Pikaploy\01.tif 1
Codes: For test Tape and Gradients only
import os
from os.path import exists
import tensorflow as tf
import pandas as pd
import matplotlib.pyplot as plt
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
None
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
config = tf.config.experimental.set_memory_growth(physical_devices[0], True)
print(physical_devices)
print(config)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
num_iter = 1000
train_generator_batch_size = 1
batch_size = 1
WIDTH = 256
HEIGHT = 256
CHANNEL = 3
checkpoint_path = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\TF_DataSets_01.h5"
checkpoint_dir = os.path.dirname(checkpoint_path)
if not exists(checkpoint_dir) :
os.mkdir(checkpoint_dir)
print("Create directory: " + checkpoint_dir)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Definition / Class
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
def create_image_generator( ):
variables = pd.read_excel('F:\\temp\\Python\\excel\\Book 7.xlsx', index_col=None, header=[0], dtype=str)
train_generator = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
validation_split=0.2,
)
train_image_ds = train_generator.flow_from_dataframe(
dataframe = variables,
directory=None,
x_col= 'Image',
y_col= 'Label',
weight_col=None,
target_size=( WIDTH, HEIGHT ),
color_mode='rgb',
classes=None,
class_mode='categorical', ####
batch_size=train_generator_batch_size,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
subset=None,
interpolation='nearest',
validate_filenames=True,
)
return train_image_ds
class gradient_tape_optimizer( ):
def __init__ ( self, model, num_iter, content_iter, batch_size ):
self.num_iter = num_iter
self.content_iter = content_iter
self.style_iter = content_iter
self.batch_size = batch_size
self.model = model
self.loss = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=False,
reduction=tf.keras.losses.Reduction.AUTO,
name='sparse_categorical_crossentropy' )
self.optimizer = tf.keras.optimizers.Nadam( learning_rate=0.00001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, name='Nadam' )
def _compute_mean_std( self, feats : tf.Tensor, eps=1e-8 ):
"""
feats: Features should be in shape N x H x W x C
"""
mean = tf.math.reduce_mean(feats, axis=[1,2], keepdims=True)
std = tf.math.reduce_std(feats, axis=[1,2], keepdims=True) + eps
return mean, std
def criterion( self, stylized_img : tf.Tensor, style_img : tf.Tensor, t : tf.Tensor ):
stylized_content_feats = self.model.encode(stylized_img)
stylized_feats = self.model.encode(stylized_img, return_all=True)
style_feats = self.model.encode(style_img, return_all=True)
content_loss = self.mse_loss(t, stylized_content_feats)
style_loss = 0
for f1, f2 in zip(stylized_feats, style_feats):
m1, s1 = self._compute_mean_std(f1)
m2, s2 = self._compute_mean_std(f2)
style_loss += self.mse_loss(m1, m2) + self.mse_loss(s1, s2)
return content_loss + self.style_weight * style_loss
def train( self ):
step = 0
while step < self.num_iter:
content_batch = self.content_iter.get_next()
if content_batch[0].shape[1] != self.batch_size:
content_batch = self.content_iter.get_next()
style_batch = self.style_iter.get_next()
if style_batch[0].shape[1] != self.batch_size:
style_batch = self.style_iter.get_next()
current_label = tf.constant( content_batch[1], shape=( 2, 1 ) ).numpy()
loss_value = tf.Variable( 10.0 )
with tf.GradientTape() as tape:
result = self.model( inputs=tf.constant( content_batch[0], shape=( 1, WIDTH, HEIGHT, CHANNEL ) ) )
result = tf.constant( result, shape=( 2, 1 ) )
predict_label = tf.Variable( tf.constant( self.model.trainable_weights[len(self.model.trainable_weights) - 1], shape=( 2, 1 ) ) )
loss_value = self.loss( result.numpy(), current_label )
loss_value = tf.Variable( tf.constant( loss_value, shape=( 1, ) ).numpy() )
tape.watch( loss_value )
gradients = tape.gradient( loss_value, loss_value )
self.optimizer.apply_gradients(zip(gradients, self.model.trainable_weights))
# log and save every 200 batches
if step % 200 == 0:
if result[tf.math.argmax(result).numpy()[0]][0] > 0 :
print(f'Training loss (for one batch) at step {step}: {self.loss} value {result[tf.math.argmax(result).numpy()[0]]}')
else :
print(f'Training loss (for one batch) at step {step}: {self.loss} value {result[abs( 1 - tf.math.argmax(result).numpy()[0]) ]}')
print(f'Seen so far: {(step+1)*self.batch_size} samples')
self.model.save_weights(checkpoint_path)
step += 1
print("Finished training...")
self.model.save_weights(checkpoint_path)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Dataset
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
variables = pd.read_excel('F:\\temp\\Python\\excel\\Book 7.xlsx', index_col=None, header=[0], dtype=str)
train_image_ds = tf.data.Dataset.from_generator(
create_image_generator,
output_types=None,
output_shapes=None,
args=None,
output_signature=(
tf.TensorSpec(shape=( 1, WIDTH, HEIGHT, CHANNEL ), dtype=tf.float32, name=None), tf.TensorSpec(shape=(1, 2), dtype=tf.float32, name=None),
),
name='train_image_ds'
)
train_image_ds = train_image_ds.batch( 1 )
iterator = iter( train_image_ds )
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Initialize
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=( WIDTH, HEIGHT, CHANNEL )),
tf.keras.layers.Normalization(mean=3., variance=2.),
tf.keras.layers.Normalization(mean=4., variance=6.),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Reshape((128, 127 * 127)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96, return_sequences=True, return_state=False)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(192, activation='relu'),
tf.keras.layers.Dense(2),
])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
optimizer = tf.keras.optimizers.Nadam(
learning_rate=0.00001, beta_1=0.9, beta_2=0.999, epsilon=0.0000001,
name='Nadam'
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
lossfn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=False,
reduction=tf.keras.losses.Reduction.AUTO,
name='sparse_categorical_crossentropy'
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model.compile(optimizer=optimizer, loss=lossfn, metrics=['accuracy'])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
gradient_tape_optimizer = gradient_tape_optimizer( model, num_iter, iterator, batch_size )
result = gradient_tape_optimizer.train()
input( '...' )
Result: Loss is change less that is because custom Optimizers is simply algorithms.
2022-10-15 14:23:57.141863: I tensorflow/stream_executor/cuda/cuda_dnn.cc:384] Loaded cuDNN version 8100
Training loss (for one batch) at step 0: <keras.losses.SparseCategoricalCrossentropy object at 0x00000238B5054550> value [0.06285592]
Seen so far: 1 samples
Training loss (for one batch) at step 200: <keras.losses.SparseCategoricalCrossentropy object at 0x00000238B5054550> value [0.05492945]
Seen so far: 201 samples
Training loss (for one batch) at step 400: <keras.losses.SparseCategoricalCrossentropy object at 0x00000238B5054550> value [0.05577546]
Seen so far: 401 samples
Training loss (for one batch) at step 600: <keras.losses.SparseCategoricalCrossentropy object at 0x00000238B5054550> value [0.06180618]
Seen so far: 601 samples
Training loss (for one batch) at step 800: <keras.losses.SparseCategoricalCrossentropy object at 0x00000238B5054550> value [0.05990243]
Seen so far: 801 samples
Finished training...
...
So I'm doing this project on empty shelf detection in store and sending alert through sns and I'm am not able to get any source on how to complete it. I'm coding on google colab.
I trained my images on YOLO and tensor flow. And I've a working live feed showing me the object detection. But now I want my object to detect empty shelf when the items are removed then send an alert to the said number or account.
Can anyone help me on how to achieve this? or anyway to compare the two images like from planogram and captured feed, then send alert on the missing item.
Thanks.
I answer for the image categorized task but SNS you need to request for interface allows or specification methods.
You can find the code from the Internet but we are also trying and specification is important since two-sided communication they ar expecting the same definitions.
Some messages server may delays to crashes with communication forwards when you are not sending correct messages.
We will help the image categorizes task.
[ Sample ]: The list of communication target expecting you create folders or queues for target server you may utilized this codes samples.
import tensorflow as tf
import tensorflow_io as tfio
import pandas as pd
import matplotlib.pyplot as plt
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
list_label_actual = [ 'Candidt Kibt', 'Pikaploy' ]
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Dataset
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
variables = pd.read_excel('F:\\temp\\Python\\excel\\Book 7.xlsx', index_col=None, header=[0])
list_label = [ ]
list_Image = [ ]
list_file_actual = [ ]
for Index, Image, Label in variables.values:
print( Label )
list_label.append( Label )
image = tf.io.read_file( Image )
image = tfio.experimental.image.decode_tiff(image, index=0)
list_file_actual.append(image)
image = tf.image.resize(image, [32,32], method='nearest')
list_Image.append(image)
list_label = tf.cast( list_label, dtype=tf.int32 )
list_label = tf.constant( list_label, shape=( 33, 1, 1 ) )
list_Image = tf.cast( list_Image, dtype=tf.int32 )
list_Image = tf.constant( list_Image, shape=( 33, 1, 32, 32, 4 ) )
dataset = tf.data.Dataset.from_tensor_slices(( list_Image, list_label ))
list_Image = tf.constant( list_Image, shape=( 33, 32, 32, 4) ).numpy()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Initialize
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=( 32, 32, 4 )),
tf.keras.layers.Normalization(mean=3., variance=2.),
tf.keras.layers.Normalization(mean=4., variance=6.),
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Reshape((256, 32 * 32)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(196, return_sequences=True, return_state=False)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(196)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(192, activation='relu'),
tf.keras.layers.Dense(2),
])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Callback
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class custom_callback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if( logs['accuracy'] >= 0.97 ):
self.model.stop_training = True
custom_callback = custom_callback()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
optimizer = tf.keras.optimizers.Nadam(
learning_rate=0.000001, beta_1=0.9, beta_2=0.999, epsilon=1e-07,
name='Nadam'
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
lossfn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=False,
reduction=tf.keras.losses.Reduction.AUTO,
name='sparse_categorical_crossentropy'
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model.compile(optimizer=optimizer, loss=lossfn, metrics=['accuracy'] )
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = model.fit( dataset, batch_size=100, epochs=50, callbacks=[custom_callback] )
plt.figure(figsize=(6,6))
plt.title("Actors recognitions")
for i in range(len(list_Image)):
img = tf.keras.preprocessing.image.array_to_img(
list_Image[i],
data_format=None,
scale=True
)
img_array = tf.keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0)
predictions = model.predict(img_array)
score = tf.nn.softmax(predictions[0])
plt.subplot(6, 6, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(list_file_actual[i])
plt.xlabel(str(round(score[tf.math.argmax(score).numpy()].numpy(), 2)) + ":" + str(list_label_actual[tf.math.argmax(score)]))
plt.show()
input('...')
[ Output ]:
I am trying to train Unet model with the following parameters:
droput_: 0.2,
activation_: sigmoid,
activation_inner_: relu,
learning_rate_: 0.0001,
epsilon_: 1e-07,
loss : mse,
metric : rmse,
kernel_regularizer=regularizers.l1_l2(l1=0.01, l2=0.01), bias_regularizer=regularizers.l1_l2(l1=0.01, l2=0.01)
I ran the model and got the following plot for loss and metric.
Blue = Training,
Orange = Validation
Please help in interpreting the loss (mse) and metric(rmse) graph.
Is it overfitting?
Does the metric plot is ok?
DATA is unique enough they change a bit when loss decreases with a slicely effects on mean square errors or longer time it keep doing same behaviour that is samples.
[ Sample ]:
import os
from os.path import exists
import tensorflow as tf
import h5py
from tensorflow_examples.models.pix2pix import pix2pix
import matplotlib.pyplot as plt
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
OUTPUT_CLASSES = 3
database_buffer = "F:\\models\\buffer\\" + os.path.basename(__file__).split('.')[0] + "\\TF_DataSets_01.h5"
database_buffer_dir = os.path.dirname(database_buffer)
checkpoint_path = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\TF_DataSets_01.h5"
checkpoint_dir = os.path.dirname(checkpoint_path)
loggings = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\loggings.log"
if not exists(checkpoint_dir) :
os.mkdir(checkpoint_dir)
print("Create directory: " + checkpoint_dir)
if not exists(database_buffer_dir) :
os.mkdir(database_buffer_dir)
print("Create directory: " + database_buffer_dir)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Functions
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
def unet_model(output_channels:int):
inputs = tf.keras.layers.Input(shape=[32, 32, 3])
# inputs = tf.keras.layers.Input(shape=[128, 128, 3])
# Downsampling through the model
skips = down_stack(inputs)
x = skips[-1]
skips = reversed(skips[:-1])
# Upsampling and establishing the skip connections
for up, skip in zip(up_stack, skips):
x = up(x)
concat = tf.keras.layers.Concatenate()
x = concat([x, skip])
# This is the last layer of the model
last = tf.keras.layers.Conv2DTranspose(
filters=output_channels, kernel_size=3, strides=2,
padding='same') #64x64 -> 128x128
x = last(x)
return tf.keras.Model(inputs=inputs, outputs=x, name="U-net")
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
DataSet
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
train_generator = tf.keras.preprocessing.image.ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
validation_split=0.2)
train_generator.fit(x_train)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Initialize
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# base_model = tf.keras.applications.MobileNetV2(input_shape=[128, 128, 3], include_top=False)
base_model = tf.keras.applications.MobileNetV2(input_shape=[32, 32, 3], include_top=False)
# Use the activations of these layers
layer_names = [
'block_1_expand_relu', # 64x64
'block_3_expand_relu', # 32x32
'block_6_expand_relu', # 16x16
'block_13_expand_relu', # 8x8
'block_16_project', # 4x4
]
base_model_outputs = [base_model.get_layer(name).output for name in layer_names]
# Create the feature extraction model
down_stack = tf.keras.Model(inputs=base_model.input, outputs=base_model_outputs)
down_stack.trainable = False
up_stack = [
pix2pix.upsample(512, 3), # 4x4 -> 8x8
pix2pix.upsample(256, 3), # 8x8 -> 16x16
pix2pix.upsample(128, 3), # 16x16 -> 32x32
pix2pix.upsample(64, 3), # 32x32 -> 64x64
]
unet_model = unet_model( output_channels=OUTPUT_CLASSES )
unet_model.summary()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
optimizer = tf.keras.optimizers.Nadam( learning_rate=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, name='Nadam' )
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
lossfn = tf.keras.losses.MeanSquaredError( reduction=tf.keras.losses.Reduction.AUTO, name='mean_squared_error' )
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# model.compile(optimizer=optimizer, loss=lossfn, metrics=['accuracy'])
unet_model.compile(optimizer=optimizer, loss=lossfn, metrics=[ tf.keras.metrics.RootMeanSquaredError( name='root_mean_squared_error' ) ])
# metric : rmse,
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: FileWriter
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
if exists(checkpoint_path) :
unet_model.load_weights(checkpoint_path)
print("model load: " + checkpoint_path)
input("Press Any Key!")
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = unet_model.fit( train_generator.flow(x_train, y_train, batch_size=1, subset='training'), epochs=5, steps_per_epoch=1 )
unet_model.save_weights(checkpoint_path)
# print( history.history )
# {'loss': [25.04973030090332], 'root_mean_squared_error': [5.004970550537109]}
plt.plot(history.history['loss'])
plt.plot(history.history['root_mean_squared_error'])
plt.legend(['loss', 'root_mean_squared_error'])
plt.autumn()
plt.show()
plt.close()
input('...')
[ Output ]: