I made a CNN in colab and saved the models at every epoch. I exported the h5 file and now am trying to load the model. Here's the main error:
ValueError: Unknown layer: ModelWrapper. Please ensure this object is passed to the custom_objects argument. See https://www.tensorflow.org/guide/keras/save_and_serialize#registering_the_custom_object for details.
Here's the code I used to train the model:
from pandas.core.internals.base import T
import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense
import matplotlib.pyplot as plt
import time
import tensorflow as tf
import levenberg_marquardt as lm
archivo = '/content/Binary System Water - NaCl.xlsx'
df = pd.read_excel(archivo, sheet_name='Sin Dependencia de T')
Dep = df['Depresión °C ']
Indep = df['molSal/KgH2O']
Dep = np.array(Dep)
Dep = np.delete(Dep,-1)
#Dep = Dep.reshape(69,1)
Indep = np.array(Indep)
Indep = np.delete(Indep,-1)
#Indep = Indep.reshape(69,1)
model = Sequential()
model.add(Dense(8, input_dim=1, activation = 'relu'))
model.add(Dense(1, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer = 'adam',
metrics = ['binary_accuracy'] )
model.compile(loss='mean_squared_error',
optimizer='adam',
metrics=['mae', 'mse'])
`model_wrapper = lm.ModelWrapper(
tf.keras.models.clone_model(model))
model_wrapper.compile(
optimizer=tf.keras.optimizers.SGD(learning_rate=0.01),
loss=lm.MeanSquaredError())`
print("Utilizando Levenberg-Marquardt")
model_wrapper.fit(Indep,Dep, epochs=100)`
In = np.linspace(0,5.1,num=30)
plt.scatter(Indep, Dep, label="reference")
plt.plot(In, model_wrapper.predict(In), 'r--', label="lm")
from keras.models import load_model
model_wrapper.save('path_to_my_model.h5')
new_model = load_model('path_to_my_model.h5')
I try with this code:
# serializa el modelo para JSON
model_json = model_wrapper.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
#serializan los pesos (weights) para HDF5
model.save_weights("model.h5")
print("Modelo guardado en el PC")
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights("model.h5")
print("Modelo cargado desde el PC")
Related
I have a problem about my CNN model made using tensorflow. The goal is to predict the classes of satellite images, corresponding to the type of clouds (data extracted from the kaggle competition "Planet: Understanding the Amazon from Space"). There are 4 classes : clear, cloudy, partly cloudy and haze.
Everything works fine until I try to test the model on individual images. Then, it always predicts 2 classes and nothing else. I noticed that if I run the model again, it may predict 2 other classes among the 4. The model was trained for 10 epochs, which gave an accuracy of 0.8717.
Here is my code :
import numpy as np
import pandas as pd
import cv2
from tqdm import tqdm
import h5py
import os
os.listdir("/kaggle/input/")
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense,MaxPooling2D,Conv2D,Flatten,Dropout,Activation
from tensorflow.keras.layers import BatchNormalization
from sklearn import svm
from sklearn.model_selection import cross_val_score
import matplotlib.pyplot as plt
from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img
from oauth2client.client import GoogleCredentials
import csv
#from keras.optimizers import RMSprop
from tensorflow.keras import Input, Model
batch_size = 128
img_width = 256
img_height = 256
train_data = ImageDataGenerator(
rescale = 1./255,
validation_split = 0.25)
train_generator = train_data.flow_from_directory(
'../input/clouds',
target_size=(img_height, img_width),
color_mode='rgb',
batch_size=batch_size,
shuffle = True,
class_mode="categorical",
subset = 'training'
)
valid_generator = train_data.flow_from_directory(
'../input/clouds',
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical',
subset = 'validation'
)
num_classes = 4
model = Sequential([
Input(shape = [img_width, img_height, 3]),
Conv2D(128,4,activation = 'relu'),
MaxPooling2D(),
Conv2D(64,4,activation = 'relu'),
MaxPooling2D(),
Conv2D(32,4, activation = 'relu'),
MaxPooling2D(),
Conv2D(16,4,activation = 'relu'),
MaxPooling2D(),
Flatten(),
Dense(64, activation = 'relu'),
Dense(num_classes, activation = 'softmax')
])
model.compile(optimizer = "adam",
loss = 'categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_generator, validation_data = valid_generator, epochs = 10)
img_to_predict = cv2.imread('/kaggle/input/clouds-test/clouds_test/test_3877_6013089.jpg') #an augmented image from original dataset
img_to_predict = cv2.cvtColor(img_to_predict, cv2.COLOR_BGR2RGB)
img_to_predict = np.expand_dims(cv2.resize(img_to_predict, (256,256)), axis = 0)
res = model.predict(img_to_predict)
label_map = (train_generator.class_indices)
print(label_map)
print(list(label_map)[np.argmax(res, axis = -1)[0]])
Thank you for you help.
I am working on a fine-grained classification to classify car models. So I have used transfer learning ResNet50. As per my knowledge it is performing fine while training. But when I try new images it is always predicting a single class. Below is my code.
For training:
from tensorflow.keras.layers import Input, Lambda, Dense, Flatten
from tensorflow.keras.models import Model
from tensorflow.keras.applications.resnet50 import ResNet50
from keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.resnet50 import preprocess_input
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import ImageDataGenerator,load_img
from tensorflow.keras.models import Sequential
import matplotlib.pyplot as plt
import numpy as np
from glob import glob
IMAGE_SIZE = [224, 224]
train_path = 'Datasets/train'
valid_path = 'Datasets/test'
resnet = ResNet50(input_shape = IMAGE_SIZE + [3], weights='imagenet', include_top = False)
for layer in resnet.layers:
layer.trainable = False
folders = glob('Datasets/train/*') #training folders
x = Flatten()(resnet.output)
prediction = Dense(len(folders), activation='softmax') (x)
model = Model(inputs = resnet.input, outputs = prediction)
model.compile(
loss = 'categorical_crossentropy',
optimizer = 'adam',
metrics = ['accuracy']
)
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('Datasets/train',
target_size = (224, 224),
batch_size = 32,
class_mode = 'categorical')
test_set = test_datagen.flow_from_directory('Datasets/test',
target_size = (224, 224),
batch_size = 32,
class_mode = 'categorical')
r = model.fit_generator(
training_set,
validation_data=test_set,
epochs=200,
steps_per_epoch=len(training_set),
validation_steps=len(test_set)
)
from tensorflow.keras.models import load_model
model.save('model_updateV1.h5')
y_pred = model.predict(test_set)
import numpy as np
y_pred = np.argmax(y_pred, axis=1)
For Trying New Images:
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import image
import numpy as np
from tensorflow.keras.applications.resnet50 import preprocess_input
model = load_model('model_updateV1.h5')
img = image.load_img('Datasets/test/mercedes/45.jpg', target_size=(224,224))
x = image.img_to_array(img)
x = x/255.
x = np.expand_dims(x, axis = 0)
img_data = preprocess_input(x)
img_data.shape
model.predict(img_data)
a = np.argmax(model.predict(img_data), axis=1)
a
I think your problem is that you are rescaling the images twice. You have code
x=x/255
then you expand the dimensions which is fine. However you then have code
img_data = preprocess_input(x)
The preprocess_input functon I believe rescales the pixel values between -1 and +1 with the code
x=x/127.5-1.
So now your pixel value have been scaled down twice. So just delete the code
x=x/255
i have a error in following code , error in 2st part on code and on first part i am declaring my dataset , layers etc.
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
data=pd.read_excel('/content/dataset.xlsx')
data.head()
data.plot(kind='scatter', x='fiyat', y='yil',alpha = 0.5,color = 'red')
plt.xlabel('price') # label = name of label
plt.ylabel('year')
plt.title('Fiyat ve yil Scatter Plot')
data.plot(kind='scatter', x='fiyat', y='km',alpha = 0.5,color = 'grey')
plt.xlabel('price') # label = name of label
plt.ylabel('km')
plt.title('Fiyat ve km Scatter Plot')
data.plot(kind='scatter', x='fiyat', y='motor_gucu_hp',alpha = 0.5,color = 'green')
plt.xlabel('price') # label = name of label
plt.ylabel('machine power')
plt.title('fiyat ve motor_gucu_hp Scatter Plot')
# Importing the dataset
X = data.iloc[:, data.columns != 'fiyat']
y = data.fiyat
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.preprocessing import StandardScaler
from matplotlib import pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
from sklearn.model_selection import train_test_split
# define base model
def baseline_model():
# create model
model = Sequential()
model.add(Dense(30, input_dim=120, kernel_initializer='normal', activation='relu'))
model.add(Dense(120, activation = 'relu'))
model.add(Dense(120, activation = 'relu'))
model.add(Dense(1, kernel_initializer='normal'))
# Compile model
model.compile(loss='mse',
optimizer='adam',
metrics=['mae'] )
return model
model = baseline_model()
model.summary()
And getting error in here ; on model.fit location
import tensorflow as tf
from tensorflow import keras
import numpy as np
# Display training progress by printing a single dot for each completed epoch
EPOCHS = 500
# Store training stats
history = model.fit(X_train, y_train, epochs=EPOCHS,
batch_size=16, verbose=0)
ValueError: Input 0 of layer sequential_2 is incompatible with the layer: expected axis -1 of input shape to have value 120 but received input with shape (None, 47)
And error like this , can you help? What can i do.
This Code reads in a set of testing and training guitar jpg images for the neural net to learn and test from.
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2
import random
DATADIR = "C:/Users/TheKid/Data/DataMiningProject/DataSet"
CATEGORIES = ["Fender_Jazzmaster", "Gibson_ES"]
CATEGORIES2 = ["Test"]
for category in CATEGORIES:
path = os.path.join(DATADIR,category)
for img in os.listdir(path):
img_array = cv2.imread(os.path.join(path,img),cv2.IMREAD_GRAYSCALE)
IMG_SIZE = 70
new_array = cv2.resize(img_array,(IMG_SIZE,IMG_SIZE))
training_data = []
def create_training_data():
for category in CATEGORIES:
path = os.path.join(DATADIR,category)
class_num = CATEGORIES.index(category)
for img in os.listdir(path):
img_array = cv2.imread(os.path.join(path,img),cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(img_array,(IMG_SIZE,IMG_SIZE))
training_data.append([new_array,class_num])
create_training_data()
print(len(training_data))
random.shuffle(training_data)
X = []
y = []
for features, label in training_data:
X.append(features)
y.append(label)
X = np.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
for category in CATEGORIES2:
path2 = os.path.join(DATADIR,category)
for img in os.listdir(path2):
img_array2 = cv2.imread(os.path.join(path2,img),cv2.IMREAD_GRAYSCALE)
IMG_SIZE = 70
new_array2 = cv2.resize(img_array,(IMG_SIZE,IMG_SIZE))
testing_data = []
def create_testing_data():
for category in CATEGORIES2:
path2 = os.path.join(DATADIR,category)
class_num2 = CATEGORIES2.index(category)
for img in os.listdir(path2):
img_array2 = cv2.imread(os.path.join(path2,img),cv2.IMREAD_GRAYSCALE)
new_array2 = cv2.resize(img_array2,(IMG_SIZE,IMG_SIZE))
testing_data.append([new_array2,class_num2])
create_testing_data()
print(len(testing_data))
random.shuffle(testing_data)
X2 = []
y2 = []
for features, label in testing_data:
X2.append(features)
y2.append(label)
X2 = np.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 1)
import pickle
pickle_out = open("X.pickle" , "wb")
pickle.dump(X, pickle_out)
pickle_out.close()
pickle_out = open("y.pickle" , "wb")
pickle.dump(y, pickle_out)
pickle_out.close()
pickle_in = open("X.pickle", "rb")
X = pickle.load(pickle_in)
pickle_out = open("X2.pickle" , "wb")
pickle.dump(X2, pickle_out)
pickle_out.close()
pickle_out = open("y2.pickle" , "wb")
pickle.dump(y2, pickle_out)
pickle_out.close()
pickle_in = open("X2.pickle", "rb")
X = pickle.load(pickle_in)
This next bit of code takes in the pickle file saved in previous code and is supposed to use Keras tuners search function to run different variants of the neural net with different amounts of conv layer ,layer sizes etc so I can choose the most efficient version. But when run this error gets thrown:
ValueError: Data cardinality is ambiguous:
x sizes: 1312
y sizes: 12
Please provide data which shares the same first dimension.
The Shapes of all the variables are:
(x_train = (1312, 70, 70, 1)
y_train =(1312,)
x_test = (1312, 70, 70, 1)
y_test =(12,)
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.callbacks import TensorBoard
import numpy as np
import time
import pickle
import matplotlib.pyplot as plt
from tensorflow import keras
from kerastuner.tuners import RandomSearch
from kerastuner.engine.hyperparameters import HyperParameters
pickle_in = open("X.pickle","rb")
x_train = pickle.load(pickle_in)
pickle_in = open("y.pickle","rb")
y_train = pickle.load(pickle_in)
pickle_in = open("X2.pickle","rb")
x_test = pickle.load(pickle_in)
pickle_in = open("y2.pickle","rb")
y_test = pickle.load(pickle_in)
x_train=np.array(x_train/255.0)
y_train=np.array(y_train)
x_test=np.array(x_test/255.0)
y_test=np.array(y_test)
LOG_DIR = f"{int(time.time())}"
def build_model(hp):
model = keras.models.Sequential()
model.add(Conv2D(hp.Int("input_units",32, 256, 32 ), (3, 3), input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
for i in range(hp.Int("n_layers", 1, 4)):
model.add(Conv2D(hp.Int(f"conv-{i}_units",32, 256, 32 ), (3, 3)))
model.add(Activation('relu'))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(10))
model.add(Activation("softmax"))
model.compile(optimizer="adam",
loss="binary_crossentropy",
metrics=["accuracy"])
return model
tuner = RandomSearch(
build_model,
objective = "val_accuracy",
max_trials = 1,
executions_per_trial = 1,
directory = LOG_DIR)
tuner.search(x=x_train,
y=y_train,
epochs=1,
batch_size=64,
validation_data=(x_test,y_test))
with open(f"tuner_{int(time.time())}.pkl", "wb") as f:
pickle.dump(tuner, f)
tuner = pickle.load(open(""))
print(tuner.get_best_hyperparameters()[0].values)
How would I go about resolving this error? It's seems like a matrix formatting issue to me but I have little experience in dealing with a problem like this.
As the error message and shape of the data (x_test and y_test) clearly suggests, you have 1312 rows in x_test and 12 rows in y_test. You are feeding this data to validation_data=(x_test,y_test).
Kindly pass the same dimension or same rows of data for x_test and y_test in validation_data and this should fix your error.
I have created a project using keras and tensorflow. I used the NSL KDD dataset and coded my project in python. I also used the SGD optimizer.
I would like to fit a model then evaluate it and then check its accuracy. (So I can compare it to the results with machine learning).
Here is my complete code below, please review it.
import tensorflow as tf
from keras import backend as K
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants, signature_constants, signature_def_utils_impl
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
import numpy as np
sess = tf.Session()
K.set_session(sess)
K.set_learning_phase(0)
model_version = "2"
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('KDD_Dataset.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 41:42].values
# Encoding categorical data X
from sklearn.preprocessing import LabelEncoder
labelencoder_X = LabelEncoder()
X[:,0] = labelencoder_X.fit_transform(X[:,0])
X[:,1] = labelencoder_X.fit_transform(X[:,1])
X[:,2] = labelencoder_X.fit_transform(X[:,2])
#
from sklearn.preprocessing import OneHotEncoder
onehotencoder_0 = OneHotEncoder(categorical_features=[0])
onehotencoder_1 = OneHotEncoder(categorical_features=[1])
onehotencoder_2 = OneHotEncoder(categorical_features=[2])
X = onehotencoder_0.fit_transform(X).toarray()
X = onehotencoder_1.fit_transform(X).toarray()
X = onehotencoder_2.fit_transform(X).toarray()
# Encoding categorical data y
from sklearn.preprocessing import LabelEncoder
labelencoder_y = LabelEncoder()
y = labelencoder_y.fit_transform(y)
max(y)
# Splitting the dataset into the Training set and Test set
#from sklearn.cross_validation import train_test_split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size = 0.2,
random_state = 0)
# create the model
model = Sequential()
model.add(Dense(41, input_dim=8, init='uniform', activation='relu'))
model.add(Dense(20, init='uniform', activation='relu'))
model.add(Dense(1, init='uniform', activation='sigmoid'))
# compile the model
model.compile(loss='binary_crossentropy', optimizer=sgd,metrics=['accuracy'])
model.fit(X_train, y_train, validation_data=(X_test, y_test), nb_epoch=200, batch_size=5, verbose=0)
See Dense(41, input_dim=8, init='uniform', activation='relu')
The model you defined with 8 features, however your inputs have 45 features. They are not matching. You have to either make your model with 45 features to match the input, or cut the length of input feature to 8 to match your model.
Change line
model.add(Dense(41, input_dim=8, init='uniform', activation='relu'))
to
model.add(Dense(42, input_dim=42, init='uniform', activation='relu'))
and
optimizer=sgd to optimizer='sgd'