I'm facing issue with tensorboard loading in Google Colab. I tried to uninstall and then install it again but no sucess. i'm sharing the code and the error.
!pip install tensorboard
%load_ext tensorboard
log_folder = 'log1'
callbacks = TensorBoard(log_dir= log_folder, histogram_freq= 1)
model.fit(train_X, train_y, validation_data = (test_X, test_y),callbacks= callbacks,verbose= 0, epochs = 20)
%tensorboard --logdir = '/content/log1' I tried withour quotes as well i.e /content/log1
enter image description here
I tried to load tensorboard and tried Uninstall and then reinstall
"=" between --logdir and <PATH> appears to be the problem here. Use something like --logdir log1
try to create access to the target directory that is required for the logging and accumulating functions. Oneway to save CPU memory is to use disk space and that is why summarized functions are working after it updates.
Sample: Callback function with updates frequency and the icon examples.
import os
from os.path import exists
import tensorflow as tf
import tensorflow_io as tfio
from datetime import datetime
import matplotlib.pyplot as plt
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
PATH = os.path.join('F:\\datasets\\downloads\\Actors\\train\\Pikaploy', '*.tif')
PATH_2 = os.path.join('F:\\datasets\\downloads\\Actors\\train\\Candidt Kibt', '*.tif')
files = tf.data.Dataset.list_files(PATH)
files_2 = tf.data.Dataset.list_files(PATH_2)
list_file = []
list_file_actual = []
list_label = []
list_label_actual = [ 'Pikaploy', 'Pikaploy', 'Pikaploy', 'Pikaploy', 'Pikaploy', 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt' ]
list_image_greyscales = []
for file in files.take(5):
image = tf.io.read_file( file )
image = tfio.experimental.image.decode_tiff(image, index=0)
list_file_actual.append(image)
image = tf.image.resize(image, [32,32], method='nearest')
list_file.append(image)
list_image_greyscales.append(tf.image.rgb_to_grayscale(image[:,:,0:3]))
list_label.append(1)
for file in files_2.take(5):
image = tf.io.read_file( file )
image = tfio.experimental.image.decode_tiff(image, index=0)
list_file_actual.append(image)
image = tf.image.resize(image, [32,32], method='nearest')
list_file.append(image)
list_image_greyscales.append(tf.image.rgb_to_grayscale(image[:,:,0:3]))
list_label.append(9)
checkpoint_path = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\TF_DataSets_01.h5"
log_dir = os.path.dirname(checkpoint_path)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
DataSet
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
dataset = tf.data.Dataset.from_tensor_slices((tf.constant(tf.cast(list_file, dtype=tf.int64), shape=(10, 1, 32, 32, 4), dtype=tf.int64),tf.constant(list_label, shape=(10, 1, 1), dtype=tf.int64)))
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Callback
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
tb_callback = tf.keras.callbacks.TensorBoard(log_dir, update_freq=1, histogram_freq=1)
class custom_callback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if( logs['accuracy'] >= 0.95 ):
self.model.stop_training = True
custom_callback = custom_callback()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Initialize
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=( 32, 32, 4 )),
tf.keras.layers.Normalization(mean=3., variance=2.),
tf.keras.layers.Normalization(mean=4., variance=6.),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Reshape((128, 225)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96, return_sequences=True, return_state=False)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(192, activation='relu'),
tf.keras.layers.Dense(10),
])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
optimizer = tf.keras.optimizers.Nadam(
learning_rate=0.00001, beta_1=0.9, beta_2=0.999, epsilon=1e-07,
name='Nadam'
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
lossfn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=False,
reduction=tf.keras.losses.Reduction.AUTO,
name='sparse_categorical_crossentropy'
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model.compile(optimizer=optimizer, loss=lossfn, metrics=['accuracy'])
# Creates a file writer for the log directory.
file_writer = tf.summary.create_file_writer(log_dir + "\\" + datetime.now().strftime("%Y%m%d-%H%M%S") )
# Using the file writer, log the reshaped image.
with file_writer.as_default():
for i in range(10):
tf.summary.image("Training data", tf.constant( list_image_greyscales[i], shape=(1,32,32,1) ), step=i)
tf.summary.scalar("Training data label", data=float(i), step=i)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = model.fit( dataset, batch_size=100, epochs=10, callbacks=[tb_callback, custom_callback] )
model.save_weights(checkpoint_path)
plt.figure(figsize=(5,2))
plt.title("Actors recognitions")
for i in range(len(list_file)):
img = tf.keras.preprocessing.image.array_to_img(
list_file[i],
data_format=None,
scale=True
)
img_array = tf.keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0)
predictions = model.predict(img_array)
score = tf.nn.softmax(predictions[0])
plt.subplot(5, 2, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(list_file_actual[i])
plt.xlabel(str(round(score[tf.math.argmax(score).numpy()].numpy(), 2)) + ":" + str(list_label_actual[tf.math.argmax(score)]))
plt.show()
input('...')
### tensorboard --logdir="F:\models\checkpoint\test_tf_tensorboard_2\\"
Output: Tensorboard training and remarkable data.
I fixed the error. Assignment operator was creating the problem, when I deleted the = it worked
%tensorboard --logdir '/content/log1'
Related
I am writing a character recognizing CNN. I have used
EMNIST Dataset.
Kaggle Notebook link : https://www.kaggle.com/code/notshrirang/ocr-with-cnn
GitHub Notebook link : https://github.com/NotShrirang/MyOCR
My model does pretty good on testing dataset. But when I use the image I captured with my phone, it never predict correctly.
What to do? Please help.
Here is my code snippet for model architecture:
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(kernel_size=(8, 8),filters=128, input_shape=(28, 28, 1), activation="relu"),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.AveragePooling2D(pool_size=(2, 2)),
tf.keras.layers.Conv2D(kernel_size=(4, 4), filters=64, activation="relu"),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.AveragePooling2D(pool_size=(2, 2)),
tf.keras.layers.Dense(32, activation='relu'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(27, activation='softmax')
])
model.compile(
optimizer=tf.keras.optimizers.SGD(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['accuracy']
)
model.fit(X_train, y_train, epochs=8, validation_split = 0.2)
Output While training model:
Loss vs Val Loss Plot:
Evaluation:
evaluation = new_model.evaluate(X_test, y_test)
evaluation
I resized the images to (28x28).
Also, all images in training, testing and real data are converted to grayscale.
The dataset has all images with their matrix transposed. I have straightened them.
I tried normalizing the data, but it decreased the val_accuracy so I stopped normalizing.
I tried shuffling the data. It increased the val_accuracy so I kept it.
I tried increasing and decreasing layers and epoch in model. It was of no use except change in training time.
I added batch normalization. I increased time required to train model.
there are many techniques not only the model
Increase the size of data, which also creates significant inputs.
Use features extraction functions and preprocessing of data, MFCC, Furriers, and Data input transforming ( blur, rotates, flipped, paddings, zooms, or random noises ).
Create multiple sets of data inputs and training K-folded validation.
Random data selection, saved and load model or performance callbacks parameter adjusting.
Compares between models or model concatenated.
Sample: My image recognitions templates, useful when performing functions and conversations for solutions.
import os
from os.path import exists
import tensorflow as tf
import tensorflow_io as tfio
import pandas as pd
import matplotlib.pyplot as plt
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
None
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
config = tf.config.experimental.set_memory_growth(physical_devices[0], True)
print(physical_devices)
print(config)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
variables = pd.read_excel('F:\\temp\\Python\\excel\\Book 13 (2) (3).xlsx', index_col=None, header=[0])
list_label = [ ]
list_Image = [ ]
list_file_actual = [ ]
list_label_actual = [ 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt', 'Pikaploy', 'Pikaploy', 'Pikaploy', 'Pikaploy', 'Pikaploy' ]
for Index, Image, Label in variables.values:
print( Label )
# list_label.append( Label )
image = tf.io.read_file( Image )
image = tf.io.decode_image(image)
list_file_actual.append(image)
image = tf.image.resize(image, [32,32], method='nearest')
list_Image.append(image)
if Label == 0:
list_label.append(0)
else:
list_label.append(9)
# if Label == 0:
# list_label_actual.append('Candidt Kibt')
# else:
# list_label_actual.append('Pikaploy')
list_label = tf.cast( list_label, dtype=tf.int32 )
list_label = tf.constant( list_label, shape=( 54, 1, 1 ) )
list_Image = tf.cast( list_Image, dtype=tf.int32 )
list_Image = tf.constant( list_Image, shape=( 54, 1, 32, 32, 3 ) )
# print( list_label_actual )
# print( list_label )
checkpoint_path = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\TF_DataSets_01.h5"
checkpoint_dir = os.path.dirname(checkpoint_path)
loggings = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\loggings.log"
if not exists(checkpoint_dir) :
os.mkdir(checkpoint_dir)
print("Create directory: " + checkpoint_dir)
log_dir = checkpoint_dir
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: DataSet
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
dataset = tf.data.Dataset.from_tensor_slices(( list_Image, list_label ))
list_Image = tf.constant( list_Image, shape=( 54, 32, 32, 3) ).numpy()
print( "===========================================" )
print( "type of variables: " )
print( type(variables) )
print( variables )
print( "variables.values: " )
print( variables.values )
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Initialize
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=( 32, 32, 3 )),
tf.keras.layers.Normalization(mean=3., variance=2.),
tf.keras.layers.Normalization(mean=4., variance=6.),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dropout(0.3),
tf.keras.layers.Reshape((512, 225)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96, return_sequences=True, return_state=False)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(192, activation='relu'),
tf.keras.layers.Dense(10),
])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: FileWriter
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
if exists(checkpoint_path) :
model.load_weights(checkpoint_path)
print("model load: " + checkpoint_path)
input("Press Any Key!")
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Callback
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class custom_callback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if( logs['accuracy'] >= 0.97 ):
self.model.stop_training = True
custom_callback = custom_callback()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
optimizer = tf.keras.optimizers.Nadam(
learning_rate=0.000001, beta_1=0.9, beta_2=0.999, epsilon=1e-07,
name='Nadam'
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
lossfn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=False,
reduction=tf.keras.losses.Reduction.AUTO,
name='sparse_categorical_crossentropy'
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model.compile(optimizer=optimizer, loss=lossfn, metrics=['accuracy'] )
model.save_weights(checkpoint_path)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = model.fit( dataset, batch_size=120, epochs=10000, callbacks=[custom_callback] )
plt.figure(figsize=(6, 6))
plt.title("Actors recognitions")
for i in range(30):
img = tf.keras.preprocessing.image.array_to_img(
list_Image[i],
data_format=None,
scale=True
)
img_array = tf.keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0)
predictions = model.predict(img_array)
score = tf.nn.softmax(predictions[0])
plt.subplot(6, 6, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(list_file_actual[i])
plt.xlabel(str(round(score[tf.math.argmax(score).numpy()].numpy(), 2)) + ":" + str(list_label_actual[tf.math.argmax(score)]))
plt.show()
input('...')
Output: They are actually Thailand actors, broadcasts collected from the Internet.
So I'm doing this project on empty shelf detection in store and sending alert through sns and I'm am not able to get any source on how to complete it. I'm coding on google colab.
I trained my images on YOLO and tensor flow. And I've a working live feed showing me the object detection. But now I want my object to detect empty shelf when the items are removed then send an alert to the said number or account.
Can anyone help me on how to achieve this? or anyway to compare the two images like from planogram and captured feed, then send alert on the missing item.
Thanks.
I answer for the image categorized task but SNS you need to request for interface allows or specification methods.
You can find the code from the Internet but we are also trying and specification is important since two-sided communication they ar expecting the same definitions.
Some messages server may delays to crashes with communication forwards when you are not sending correct messages.
We will help the image categorizes task.
[ Sample ]: The list of communication target expecting you create folders or queues for target server you may utilized this codes samples.
import tensorflow as tf
import tensorflow_io as tfio
import pandas as pd
import matplotlib.pyplot as plt
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
list_label_actual = [ 'Candidt Kibt', 'Pikaploy' ]
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Dataset
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
variables = pd.read_excel('F:\\temp\\Python\\excel\\Book 7.xlsx', index_col=None, header=[0])
list_label = [ ]
list_Image = [ ]
list_file_actual = [ ]
for Index, Image, Label in variables.values:
print( Label )
list_label.append( Label )
image = tf.io.read_file( Image )
image = tfio.experimental.image.decode_tiff(image, index=0)
list_file_actual.append(image)
image = tf.image.resize(image, [32,32], method='nearest')
list_Image.append(image)
list_label = tf.cast( list_label, dtype=tf.int32 )
list_label = tf.constant( list_label, shape=( 33, 1, 1 ) )
list_Image = tf.cast( list_Image, dtype=tf.int32 )
list_Image = tf.constant( list_Image, shape=( 33, 1, 32, 32, 4 ) )
dataset = tf.data.Dataset.from_tensor_slices(( list_Image, list_label ))
list_Image = tf.constant( list_Image, shape=( 33, 32, 32, 4) ).numpy()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Initialize
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=( 32, 32, 4 )),
tf.keras.layers.Normalization(mean=3., variance=2.),
tf.keras.layers.Normalization(mean=4., variance=6.),
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Reshape((256, 32 * 32)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(196, return_sequences=True, return_state=False)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(196)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(192, activation='relu'),
tf.keras.layers.Dense(2),
])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Callback
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class custom_callback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if( logs['accuracy'] >= 0.97 ):
self.model.stop_training = True
custom_callback = custom_callback()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
optimizer = tf.keras.optimizers.Nadam(
learning_rate=0.000001, beta_1=0.9, beta_2=0.999, epsilon=1e-07,
name='Nadam'
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
lossfn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=False,
reduction=tf.keras.losses.Reduction.AUTO,
name='sparse_categorical_crossentropy'
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model.compile(optimizer=optimizer, loss=lossfn, metrics=['accuracy'] )
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = model.fit( dataset, batch_size=100, epochs=50, callbacks=[custom_callback] )
plt.figure(figsize=(6,6))
plt.title("Actors recognitions")
for i in range(len(list_Image)):
img = tf.keras.preprocessing.image.array_to_img(
list_Image[i],
data_format=None,
scale=True
)
img_array = tf.keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0)
predictions = model.predict(img_array)
score = tf.nn.softmax(predictions[0])
plt.subplot(6, 6, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(list_file_actual[i])
plt.xlabel(str(round(score[tf.math.argmax(score).numpy()].numpy(), 2)) + ":" + str(list_label_actual[tf.math.argmax(score)]))
plt.show()
input('...')
[ Output ]:
I just implemented a LSTM,
but I'm not sure if I interpreted the structure right.
is in this context testPredict = model.predict(Xtest) the last value of the sequence, and therefore ultimately (after reversing the MinMaxscaler) the variable last_value = (testPredict[-1]) the prediction regarding the future?
from IPython.core.debugger import set_trace
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import time
import yfinance as yf
import sklearn
from sklearn.preprocessing import MinMaxScaler
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import LSTM, Dense, Dropout, Flatten
from sklearn.metrics import mean_squared_error
from keras.layers import ConvLSTM2D
from keras.layers import Bidirectional
from keras.models import model_from_json
df = yf.download(tickers="BTC-USD", period="20wk", interval="60m")
df = df[["Close"]]
df["returns"] = df.Close.pct_change()
df["log_returns"] = np.log(1 + df["returns"])
df.dropna(inplace=True)
X = df[["Close", "log_returns"]].values
scaler = MinMaxScaler(feature_range=(0, 1)).fit(X)
X_scaled = scaler.transform(X)
y = [x[0] for x in X_scaled]
split = int(len(X_scaled) * 0.8)
X_train = X_scaled[:split]
X_test = X_scaled[split : len(X_scaled)]
y_train = y[:split]
y_test = y[split : len(y)]
assert len(X_train) == len(y_train)
assert len(X_test) == len(y_test)
n = 24 #analyze the last 24 prices
Xtrain = []
ytrain = []
Xtest = []
ytest = []
for i in range(n, len(X_train)):
Xtrain.append(X_train[i - n : i, : X_train.shape[1]])
ytrain.append(y_train[i])
for i in range(n, len(X_test)):
Xtest.append(X_test[i - n : i, : X_test.shape[1]])
ytest.append(y_test[i])
val = np.array(ytrain[0])
val = np.c_[val, np.zeros(val.shape)]
scaler.inverse_transform(val)
Xtrain, ytrain = (np.array(Xtrain), np.array(ytrain))
Xtrain = np.reshape(Xtrain, (Xtrain.shape[0], Xtrain.shape[1], Xtrain.shape[2]))
Xtest, ytest = (np.array(Xtest), np.array(ytest))
Xtest = np.reshape(Xtest, (Xtest.shape[0], Xtest.shape[1], Xtest.shape[2]))
model = Sequential()
model.add(LSTM(8, return_sequences=True, input_shape=(Xtrain.shape[1], Xtrain.shape[2])))
#model.add(Bidirectional(LSTM(8, return_sequences=True, input_shape=(Xtrain.shape[1], Xtrain.shape[2]))))
model.add(LSTM(4))
model.add(Dropout(0.2))
model.add(Dense(1))
model.compile(loss="mean_squared_error", optimizer="adam")
model.fit(Xtrain, ytrain, epochs=100, validation_data=(Xtest, ytest), batch_size=16, verbose=1)
trainPredict = model.predict(Xtrain)
testPredict = model.predict(Xtest)
trainPredict = np.c_[trainPredict, np.zeros(trainPredict.shape)]
testPredict = np.c_[testPredict, np.zeros(testPredict.shape)]
trainPredict = scaler.inverse_transform(trainPredict)
trainPredict = [x[0] for x in trainPredict]
testPredict = scaler.inverse_transform(testPredict)
testPredict = [x[0] for x in testPredict]
trainScore = mean_squared_error([x[0][0] for x in Xtrain], trainPredict, squared=False)
#print("Train Score: %.2f RMSE" % (trainScore))
testScore = mean_squared_error([x[0][0] for x in Xtest], testPredict, squared=False)
#print("Test Score: %.2f RMSE" % (testScore))
########################################################################################################################
last_value = (testPredict[-1])
I will show you a picture result for understanding that does not change the truth that the label and value match with the criteria but reversed order to verify the result. Sufficient data is required for the working model.
Selecting random pictures from dataset ~ 6-7 per actor in reversed order verifies that the model is working when predicting the new income of the input that is all.
Symmetric shape could tell you action to respond but it is not the future prediction, in his case to have the prediction result he selects the most matching with scores or softmax but without softmax, he also can use np.argmax working with the sequences output ( you need to see the model output )
Predicting real-time data you need to input the variables and historical as you create some report, the model learns the change of the values within scopes !
[ Sample ]: Example by prediction target you visually see that learning network can do it correctly in both-way and you can use of the networks with new input reversed order is not good examines only you don't have any data.
import os
from os.path import exists
import tensorflow as tf
import tensorflow_io as tfio
import matplotlib.pyplot as plt
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
[PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU')]
None
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
config = tf.config.experimental.set_memory_growth(physical_devices[0], True)
print(physical_devices)
print(config)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
PATH = os.path.join('F:\\datasets\\downloads\\Actors\\train\\Pikaploy', '*.tif')
PATH_2 = os.path.join('F:\\datasets\\downloads\\Actors\\train\\Candidt Kibt', '*.tif')
files = tf.data.Dataset.list_files(PATH)
files_2 = tf.data.Dataset.list_files(PATH_2)
list_file = []
list_file_actual = []
list_label = []
list_label_actual = [ 'Pikaploy', 'Pikaploy', 'Pikaploy', 'Pikaploy', 'Pikaploy', 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt', 'Candidt Kibt' ]
for file in files.take(5):
image = tf.io.read_file( file )
image = tfio.experimental.image.decode_tiff(image, index=0)
list_file_actual.append(image)
image = tf.image.resize(image, [32,32], method='nearest')
list_file.append(image)
list_label.append(1)
for file in files_2.take(5):
image = tf.io.read_file( file )
image = tfio.experimental.image.decode_tiff(image, index=0)
list_file_actual.append(image)
image = tf.image.resize(image, [32,32], method='nearest')
list_file.append(image)
list_label.append(9)
checkpoint_path = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\TF_DataSets_01.h5"
checkpoint_dir = os.path.dirname(checkpoint_path)
loggings = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\loggings.log"
if not exists(checkpoint_dir) :
os.mkdir(checkpoint_dir)
print("Create directory: " + checkpoint_dir)
log_dir = checkpoint_dir
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
DataSet
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
dataset = tf.data.Dataset.from_tensor_slices((tf.constant(tf.cast(list_file, dtype=tf.int64), shape=(10, 1, 32, 32, 4), dtype=tf.int64),
tf.constant(list_label, shape=(10, 1, 1), dtype=tf.int64)))
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Initialize
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=( 32, 32, 4 )),
tf.keras.layers.Normalization(mean=3., variance=2.),
tf.keras.layers.Normalization(mean=4., variance=6.),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Reshape((128, 225)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96, return_sequences=True, return_state=False)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(96)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(192, activation='relu'),
tf.keras.layers.Dense(10),
])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
optimizer = tf.keras.optimizers.Nadam(
learning_rate=0.00001, beta_1=0.9, beta_2=0.999, epsilon=1e-07,
name='Nadam'
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
lossfn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=False,
reduction=tf.keras.losses.Reduction.AUTO,
name='sparse_categorical_crossentropy'
)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model.compile(optimizer=optimizer, loss=lossfn, metrics=['accuracy'])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: FileWriter
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
if exists(checkpoint_path) :
model.load_weights(checkpoint_path)
print("model load: " + checkpoint_path)
input("Press Any Key!")
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = model.fit( dataset, batch_size=100, epochs=50 )
plt.figure(figsize=(5,2))
plt.title("Actors recognitions")
for i in range(len(list_file)):
img = tf.keras.preprocessing.image.array_to_img(
list_file[i],
data_format=None,
scale=True
)
img_array = tf.keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0)
predictions = model.predict(img_array)
predictions = predictions[-1:]
score = tf.nn.softmax(predictions[0])
plt.subplot(5, 2, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(list_file_actual[i])
plt.xlabel(str(round(score[tf.math.argmax(score).numpy()].numpy(), 2)) + ":" + str(list_label_actual[tf.math.argmax(score)]))
plt.show()
input('...')
I have a Segmentation network model trained for 2 classes and am able to see accurate results.
But when using grad-cam for the heatmap, I am able to see good results for the last convolution layer
for both the classes but having issues when trying to generate a heatmap for the second last convolution layer
for one of the classes (the other class's heatmap is working fine).
**Last 5 layers**
convolution_layer(filters:8, kernel:3*3)
convolution_transpose_layer(filters:2, kernel:2*2)
convolution_layer(filters:2, kernel:3*3)
convolution_layer(filters:10, kernel:1*1)
activation_layer(softmax)
The heatmap is empty because of all negative pooled gradients(due to mean from all the -ve gradients wrt Conv layer),
resulting in negative values in pooled_grads*convolution_output on which relu is applied, giving all zeros.
What does it mean for GradCAM to be all negative?
Why is it that all channels in the convolution lead to a "negative" contribution to the true output class?
https://arxiv.org/pdf/2002.11434.pdf
following this paper for heatmap for segmentation models.
I use a bit of time to make the GIF explain the convolution was running too much of the same input or no contrast information they merged to similar values.
Convolution with padding they will extend the similarities those are one cause or they can find contrast information. It is before answers I go through your links and see the examples output and think this is what the heat map you mean just using the color system you specify.
( I removed those Conv layers and used only 1 layer to examine and accelerate the output behaviors, Heatmap is worked and that is not always negative when using convs not negative means, not negative image when you use the true dimensions images )
[ Sample ]:
import os
from os.path import exists
import tensorflow as tf
import h5py
import matplotlib.pyplot as plt
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
database_buffer = "F:\\models\\buffer\\" + os.path.basename(__file__).split('.')[0] + "\\TF_DataSets_01.h5"
database_buffer_dir = os.path.dirname(database_buffer)
checkpoint_path = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\TF_DataSets_01.h5"
checkpoint_dir = os.path.dirname(checkpoint_path)
loggings = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\loggings.log"
if not exists(checkpoint_dir) :
os.mkdir(checkpoint_dir)
print("Create directory: " + checkpoint_dir)
if not exists(database_buffer_dir) :
os.mkdir(database_buffer_dir)
print("Create directory: " + database_buffer_dir)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
DataSet
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
x_train = tf.constant( x_train[0], shape=( 1, 32, 32, 3 ) )
y_train = tf.constant( y_train[0], shape=( 1, 1, 1, 1 ) )
plt.imshow( tf.constant( x_train, shape=( 32, 32, 3 ) ).numpy() )
plt.show()
plt.close()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Initialize
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=( 32, 32, 3 ), name="Input_01"),
tf.keras.layers.Conv2D(1, (1, 1), activation='relu', name="Conv2D_00"),
# tf.keras.layers.Conv2D(8, (3, 3), activation='relu', name="Conv2D_01"),
# tf.keras.layers.Conv2DTranspose(2, (2, 2), activation='relu', name="Conv2DTranspose_01"),
# tf.keras.layers.Conv2D(2, (3, 3), activation='relu', name="Conv2D_02"),
# tf.keras.layers.Conv2D(10, (1, 1), activation='relu', name="Conv2D_03"),
])
model.add(tf.keras.layers.Dense(1))
model.summary()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Callback
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class custom_callback(tf.keras.callbacks.Callback):
def on_train_end(self, epoch, logs={}):
print(self.model.inputs)
feature_extractor = tf.keras.Model(inputs=self.model.inputs, outputs=[layer.output for layer in self.model.layers], )
img = tf.keras.preprocessing.image.array_to_img(
tf.constant(feature_extractor(x_train)[0], shape=(32, 32, 1)),
data_format=None,
scale=True
)
plt.imshow( img )
plt.show()
plt.close()
input('Press Any Key!')
custom_callback = custom_callback()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
optimizer = tf.keras.optimizers.Nadam( learning_rate=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, name='Nadam' )
optimizer = tf.keras.optimizers.Adam( learning_rate=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False, name='Adam' )
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
lossfn = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=False, reduction=tf.keras.losses.Reduction.AUTO, name='sparse_categorical_crossentropy' )
lossfn = tf.keras.losses.MeanSquaredLogarithmicError(reduction=tf.keras.losses.Reduction.AUTO, name='mean_squared_logarithmic_error')
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model.compile(optimizer=optimizer, loss=lossfn, metrics=['accuracy'])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: FileWriter
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
if exists(checkpoint_path) :
model.load_weights(checkpoint_path)
print("model load: " + checkpoint_path)
input("Press Any Key!")
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = model.fit(x_train, y_train, epochs=25 ,validation_data=(x_train, y_train), callbacks=[custom_callback])
input('...')
[ Output ]:
I am trying to train Unet model with the following parameters:
droput_: 0.2,
activation_: sigmoid,
activation_inner_: relu,
learning_rate_: 0.0001,
epsilon_: 1e-07,
loss : mse,
metric : rmse,
kernel_regularizer=regularizers.l1_l2(l1=0.01, l2=0.01), bias_regularizer=regularizers.l1_l2(l1=0.01, l2=0.01)
I ran the model and got the following plot for loss and metric.
Blue = Training,
Orange = Validation
Please help in interpreting the loss (mse) and metric(rmse) graph.
Is it overfitting?
Does the metric plot is ok?
DATA is unique enough they change a bit when loss decreases with a slicely effects on mean square errors or longer time it keep doing same behaviour that is samples.
[ Sample ]:
import os
from os.path import exists
import tensorflow as tf
import h5py
from tensorflow_examples.models.pix2pix import pix2pix
import matplotlib.pyplot as plt
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Variables
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
OUTPUT_CLASSES = 3
database_buffer = "F:\\models\\buffer\\" + os.path.basename(__file__).split('.')[0] + "\\TF_DataSets_01.h5"
database_buffer_dir = os.path.dirname(database_buffer)
checkpoint_path = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\TF_DataSets_01.h5"
checkpoint_dir = os.path.dirname(checkpoint_path)
loggings = "F:\\models\\checkpoint\\" + os.path.basename(__file__).split('.')[0] + "\\loggings.log"
if not exists(checkpoint_dir) :
os.mkdir(checkpoint_dir)
print("Create directory: " + checkpoint_dir)
if not exists(database_buffer_dir) :
os.mkdir(database_buffer_dir)
print("Create directory: " + database_buffer_dir)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
Functions
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
def unet_model(output_channels:int):
inputs = tf.keras.layers.Input(shape=[32, 32, 3])
# inputs = tf.keras.layers.Input(shape=[128, 128, 3])
# Downsampling through the model
skips = down_stack(inputs)
x = skips[-1]
skips = reversed(skips[:-1])
# Upsampling and establishing the skip connections
for up, skip in zip(up_stack, skips):
x = up(x)
concat = tf.keras.layers.Concatenate()
x = concat([x, skip])
# This is the last layer of the model
last = tf.keras.layers.Conv2DTranspose(
filters=output_channels, kernel_size=3, strides=2,
padding='same') #64x64 -> 128x128
x = last(x)
return tf.keras.Model(inputs=inputs, outputs=x, name="U-net")
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
DataSet
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
train_generator = tf.keras.preprocessing.image.ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
validation_split=0.2)
train_generator.fit(x_train)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Initialize
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# base_model = tf.keras.applications.MobileNetV2(input_shape=[128, 128, 3], include_top=False)
base_model = tf.keras.applications.MobileNetV2(input_shape=[32, 32, 3], include_top=False)
# Use the activations of these layers
layer_names = [
'block_1_expand_relu', # 64x64
'block_3_expand_relu', # 32x32
'block_6_expand_relu', # 16x16
'block_13_expand_relu', # 8x8
'block_16_project', # 4x4
]
base_model_outputs = [base_model.get_layer(name).output for name in layer_names]
# Create the feature extraction model
down_stack = tf.keras.Model(inputs=base_model.input, outputs=base_model_outputs)
down_stack.trainable = False
up_stack = [
pix2pix.upsample(512, 3), # 4x4 -> 8x8
pix2pix.upsample(256, 3), # 8x8 -> 16x16
pix2pix.upsample(128, 3), # 16x16 -> 32x32
pix2pix.upsample(64, 3), # 32x32 -> 64x64
]
unet_model = unet_model( output_channels=OUTPUT_CLASSES )
unet_model.summary()
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
optimizer = tf.keras.optimizers.Nadam( learning_rate=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, name='Nadam' )
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
lossfn = tf.keras.losses.MeanSquaredError( reduction=tf.keras.losses.Reduction.AUTO, name='mean_squared_error' )
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# model.compile(optimizer=optimizer, loss=lossfn, metrics=['accuracy'])
unet_model.compile(optimizer=optimizer, loss=lossfn, metrics=[ tf.keras.metrics.RootMeanSquaredError( name='root_mean_squared_error' ) ])
# metric : rmse,
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: FileWriter
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
if exists(checkpoint_path) :
unet_model.load_weights(checkpoint_path)
print("model load: " + checkpoint_path)
input("Press Any Key!")
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = unet_model.fit( train_generator.flow(x_train, y_train, batch_size=1, subset='training'), epochs=5, steps_per_epoch=1 )
unet_model.save_weights(checkpoint_path)
# print( history.history )
# {'loss': [25.04973030090332], 'root_mean_squared_error': [5.004970550537109]}
plt.plot(history.history['loss'])
plt.plot(history.history['root_mean_squared_error'])
plt.legend(['loss', 'root_mean_squared_error'])
plt.autumn()
plt.show()
plt.close()
input('...')
[ Output ]: