Train multiple neural nets in parallel on CPU in keras - tensorflow

I'd like to train tens of small neural networks in parallel on the CPU
in Keras with Tensorflow backend.
By default Tensorflow splits the batches over the cores when training a single nn but my average core utilization is only around 50%.
So it seems like a good idea to assign the complete training of a neural net to a core so less data has to be moved around.
I can't seem to find how I can specify these actions.
Also note the neural nets have a different architecture so combining everything into a single graph will lead to sparser matrices and slower
execution.

There are some key points to making this work:
Use processes, not threads. Threads will result in asynchronous execution, but not parallel so only one CPU core would be used.
For practical purposes building, compiling and fitting a neural net should happen in the same process.
For each process a separate tensorflow graph and session need to be initialized.
After training the nets, you likely will want to serialize them for later use. It's important to use Keras' model.save(file_name), not regular pickling.
Implementation:
extend the python Process class:
from keras.layers import Dense
from keras.models import Sequential
from multiprocessing import Process, Queue
import tensorflow as tf
from train_val_set import TrainValSet
class NNProcess(Process):
def __init__(self, process_id: int, nr_nets: int, ret_queue: Queue):
super(NNProcess, self).__init__()
self.process_id = process_id
self.neural_nets = []
self.train_val_set = None
self.nr_nets = nr_nets
self.ret_queue = ret_queue
def set_train_val(self, train_val_set: TrainValSet):
self.train_val_set = train_val_set
def get_session_config(self):
num_cores = 1
num_CPU = 1
num_GPU = 0
config = tf.ConfigProto(intra_op_parallelism_threads=num_cores,
inter_op_parallelism_threads=num_cores, allow_soft_placement=False,
device_count={'CPU': num_CPU, 'GPU': num_GPU})
return config
def run(self):
print("process " + str(self.process_id) + " starting...")
with tf.Session(graph=tf.Graph(), config=self.get_session_config()) as session:
self.init_nets()
self.compile()
self.fit_nets(self.train_val_set)
for i in range(0, self.nr_nets):
file_name = self.neural_nets[i].name + "_" + str(i) + ".pickle"
self.neural_nets[i].save(file_name)
self.ret_queue.put(file_name)
print("process " + str(self.process_id) + " finished.")
def compile(self):
for neural_net in self.neural_nets:
neural_net.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
def init_nets(self):
for i in range(0, self.nr_nets):
model = Sequential()
model.add(Dense(units=64, activation='relu', input_dim=100))
model.add(Dense(units=10, activation='softmax'))
self.neural_nets.append(model)
def fit_nets(self, train_val_set: TrainValSet):
for i in range(0, self.nr_nets):
self.neural_nets[i].fit()
Helper class:
from pandas import DataFrame
class TrainValSet:
def __init__(self, df_train: DataFrame, df_val: DataFrame):
self.x_train, self.y_train = self.get_x_y(df_train)
self.x_val, self.y_val = self.get_x_y(df_val)
def get_x_y(self, df: DataFrame):
X = df.iloc[:, 0:-1].values
y = df.iloc[:, -1].values
return X, y
main file:
import pandas as pd
from multiprocessing import Manager
import tensorflow as tf
from keras import backend as K
from train_val_set import TrainValSet
from nn_process import NNProcess
def load_train_val_test_datasets(dataset_dir: str, dataset_name: str):
df_train = pd.read_csv(dataset_dir + dataset_name + "/" + dataset_name + "_train.csv", header=None)
df_val = pd.read_csv(dataset_dir + dataset_name + "/" + dataset_name + "_val.csv", header=None)
df_test = pd.read_csv(dataset_dir + dataset_name + "/" + dataset_name + "_test.csv", header=None)
return df_train, df_val, df_test
# config for prediction and evaluation only
def get_session_config(num_cores):
num_CPU = 1
num_GPU = 0
config = tf.ConfigProto(intra_op_parallelism_threads=num_cores,
inter_op_parallelism_threads=num_cores, allow_soft_placement=True,
device_count={'CPU': num_CPU, 'GPU': num_GPU})
return config
def train_test(nr_nets: int, nr_processes: int):
df_train, df_val, df_test = load_train_val_test_datasets('MNIST')
train_val_set = TrainValSet(df_train, df_val)
nets_per_proc = int(nr_nets/nr_processes)
nn_queue = Manager().Queue()
processes = []
for i in range(0, nr_processes):
nn_process = NNProcess(i, nets_per_proc, nn_queue)
nn_process.set_train_val(train_val_set)
processes.append(nn_process)
for nn_process in processes:
nn_process.start()
for nn_process in processes:
nn_process.join()
tf_session = tf.Session(config=get_session_config(4))
K.set_session(tf_session)
# ...
# load neural nets from files
# do predictions

Related

embedding layer for several categories and regression

I found [this][1] and created this running POC code:
import tensorflow as tf
from tensorflow import keras
import numpy as np
def get_embedding_size(cat_data):
no_of_unique_cat = len(np.unique(cat_data))
return int(min(np.ceil((no_of_unique_cat)/2), 50))
# 3 numerical variables
num_data = np.random.random(size=(10,3))
# 2 categorical variables
cat_data_1 = np.random.randint(0,4,10)
cat_data_2 = np.random.randint(0,5,10)
target = np.random.random(size=(10,1))
no_unique_categories_category_1 = len(np.unique(cat_data_1))
embedding_size_category_1 = get_embedding_size(cat_data_1)
inp_cat_data = keras.layers.Input(shape=(no_unique_categories_category_1,))
# 3 columns
inp_num_data = keras.layers.Input(shape=(num_data.shape[1],))
emb = keras.layers.Embedding(input_dim=no_unique_categories_category_1, output_dim=embedding_size_category_1)(inp_cat_data)
flatten = keras.layers.Flatten()(emb)
# Concatenate two layers
conc = keras.layers.Concatenate()([flatten, inp_num_data])
dense1 = keras.layers.Dense(3, activation=tf.nn.relu,)(conc)
# Creating output layer
out = keras.layers.Dense(1, activation=None)(dense1)
model = keras.Model(inputs=[inp_cat_data, inp_num_data], outputs=out)
model.compile(optimizer='adam',
loss=keras.losses.mean_squared_error,
metrics=[keras.metrics.mean_squared_error])
one_hot_encoded_cat_data_1 = np.eye(cat_data_1.max()+1)[cat_data_1]
model.fit([one_hot_encoded_cat_data_1, num_data], target)
I wonder how could one add the additional categorical variable cat_data_2? I am also wondering, why is one hot encoding still used. Is the whole point of embedding not to make this necessary? Thanks!
model.layers[1].get_weights()[0]
[1]: https://mmuratarat.github.io/2019-06-12/embeddings-with-numeric-variables-Keras

Abysmal tf.GradientTape performance compared to tf.gradients() for computing jacobians

SOLUTION BELOW:
Scenario:
I am trying to compute the jacobian of a user defined function many, many times in a loop. I am able to do this with TF 2's GradientTape as well as the older session based tf.gradients() method. The problem is that GradientTape is terribly slow (100x slower) than tf.gradients(). It has features i'd like to use (bath_jacobian, hessian support, etc), but if it's 100x slower then i can't use it.
The Question:
It's not clear to me if i'm simply misusing GradientTape, or if it will always be slower because it has to re-differentiate the provided function every time its called (my suspicion). I'm asking for tips to fix my use of GradientTape or a confirmation that it will always be fundamentally slower than tf.gradients by orders of magnitude.
Related Questions:
Repeated use of GradientTape for multiple Jacobian calculations - same scenario, unanswered
Does `GradientTape` need to re-differentiate each evaluation of a derivative? - same scenario, unanswered
using one GradientTape with global context - loosely related, having trouble applyng that solution to my scenario
Fully contained minimum example to compare GradientTape and tf.gradients():
import tensorflow as tf
from tensorflow.python.framework.ops import disable_eager_execution
import numpy as np
# from tensorflow.python.ops.parallel_for.gradients import jacobian, batch_jacobian
import timeit
class FunctionCaller(object):
def __init__(self, func, nX, dtype=tf.float64, useSessions=True):
if useSessions:
disable_eager_execution()
self.func = func
self.nX = nX
self.useSessions = useSessions
self.dtype = dtype
self.sess = tf.compat.v1.Session() if useSessions else None
if not useSessions:
return
#
# we are in session mode, so build the graph and take the batch-jacobian of the function's outputs
#
xTensor = tf.compat.v1.placeholder(dtype, shape=[None, nX])
# add function to graph and guarantee its output shape
func_tensor = tf.reshape(func(xTensor), [-1, nX])
# take the gradient for each output, one at a time, and stack the results back together
each_output = tf.unstack(func_tensor, nX, axis=1)
jac_x = tf.stack([tf.gradients(output, xTensor, unconnected_gradients='zero')[0]
for output in each_output], axis=1)
# record these tensors so we can use them later with session.run()
self.xTensor = xTensor
self.func_tensor = func_tensor
self.jac_func_tensor = jac_x
def jac(self, x_i):
if self.useSessions:
return self.sess.run(self.jac_func_tensor, {self.xTensor: x_i})
else:
return self._useGradientTape(x_i)
# THIS FUNCTION IS SUPER INEFFICIENT.
def _useGradientTape(self, x_i):
with tf.GradientTape(persistent=True) as g:
xTensor = tf.Variable(x_i, dtype=self.dtype) # is this my problem??? i recreate x every time?
y = tf.reshape(self.func(xTensor), [-1, self.nX])
jac_x_at_i = g.batch_jacobian(y, xTensor)
# del g
return jac_x_at_i.numpy()
def __del__(self):
if self.sess is not None:
self.sess.close()
def main():
#tf.function
def Xdot(x_i):
x_0, x_1, x_2 = tf.split(x_i, 3, axis=1)
return tf.concat([x_2 * tf.sin(x_2), x_2 * tf.cos(x_2), x_2], axis=1)
nT = 20
nX = 3
# create some trash data
x_i = np.arange(nT*nX).reshape([-1, nX])
nTrials = 100
# try the eager version first
caller_eager = FunctionCaller(Xdot, nX, useSessions=False)
start_time = timeit.default_timer()
for _ in range(nTrials):
jac_eager = caller_eager.jac(x_i)
elapsed = timeit.default_timer() - start_time
print("eager code took {} sec: {} sec/trial".format(elapsed, elapsed/nTrials))
# now try the sessions version
caller_sessions = FunctionCaller(Xdot, nX, useSessions=True)
start_time = timeit.default_timer()
caller_sessions.jac(x_i) # call it once to do its graph building stuff?
for _ in range(nTrials):
jac_session = caller_sessions.jac(x_i)
elapsed = timeit.default_timer() - start_time
print("session code took {} sec: {} sec/trial".format(elapsed, elapsed/nTrials))
residual = np.max(np.abs(jac_eager - jac_session))
print('residual between eager and session trials is {}'.format(residual))
if __name__ == "__main__":
main()
EDIT - SOLUTION:
xdurch0 pointed out below that I should wrap _useGradientTape() in a #tf.function - something I was unsuccessful with before for other reasons. Once I did that, I had to move xTensor's definition outside the #tf.function wrapper by making it a member variable and using tf.assign().
With all this done, I find that GradientTape (for this simple example) is now on the same order of magnitude as tf.gradints. When running enough trials (~1E5), it's twice as fast as tf.gradients. awesome!
import tensorflow as tf
from tensorflow.python.framework.ops import disable_eager_execution
import numpy as np
import timeit
class FunctionCaller(object):
def __init__(self, func, nT, nX, dtype=tf.float64, useSessions=True):
if useSessions:
disable_eager_execution()
self.func = func
self.nX = nX
self.useSessions = useSessions
self.dtype = dtype
self.sess = tf.compat.v1.Session() if useSessions else None
if not useSessions:
# you should be able to create without an initial value, but tf is demanding one
# despite what the docs say. bug?
# tf.Variable(initial_value=None, shape=[None, nX], validate_shape=False, dtype=self.dtype)
self.xTensor = tf.Variable([[0]*nX]*nT, dtype=self.dtype) # x needs to be properly sized once
return
#
# we are in session mode, so build the graph and take the batch-jacobian of the function's outputs
#
xTensor = tf.compat.v1.placeholder(dtype, shape=[None, nX])
# add function to graph and guarantee its output shape
func_tensor = tf.reshape(func(xTensor), [-1, nX])
# take the gradient for each output, one at a time, and stack the results back together
each_output = tf.unstack(func_tensor, nX, axis=1)
jac_x = tf.stack([tf.gradients(output, xTensor, unconnected_gradients='zero')[0]
for output in each_output], axis=1)
# record these tensors so we can use them later with session.run()
self.xTensor = xTensor
self.func_tensor = func_tensor
self.jac_func_tensor = jac_x
def jac(self, x_i):
if self.useSessions:
return self.sess.run(self.jac_func_tensor, {self.xTensor: x_i})
else:
return self._useGradientTape(x_i).numpy()
#tf.function # THIS IS CRUCIAL
def _useGradientTape(self, x_i):
with tf.GradientTape(persistent=True) as g:
self.xTensor.assign(x_i) # you need to create the variable once outside the graph
y = tf.reshape(self.func(self.xTensor), [-1, self.nX])
jac_x_at_i = g.batch_jacobian(y, self.xTensor)
# del g
return jac_x_at_i
def __del__(self):
if self.sess is not None:
self.sess.close()
def main():
#tf.function
def Xdot(x_i):
x_0, x_1, x_2 = tf.split(x_i, 3, axis=1)
return tf.concat([x_2 * tf.sin(x_2), x_2 * tf.cos(x_2), x_2], axis=1)
nT = 20
nX = 3
# create some trash data
x_i = np.random.random([nT, nX])
nTrials = 1000 # i find that nTrials<=1E3, eager is slower, it's faster for >=1E4, it's TWICE as fast for >=1E5
# try the eager version first
caller_eager = FunctionCaller(Xdot, nT, nX, useSessions=False)
start_time = timeit.default_timer()
for _ in range(nTrials):
jac_eager = caller_eager.jac(x_i)
elapsed = timeit.default_timer() - start_time
print("eager code took {} sec: {} sec/trial".format(elapsed, elapsed/nTrials))
# now try the sessions version
caller_sessions = FunctionCaller(Xdot, nT, nX, useSessions=True)
start_time = timeit.default_timer()
for _ in range(nTrials):
jac_session = caller_sessions.jac(x_i)
elapsed = timeit.default_timer() - start_time
print("session code took {} sec: {} sec/trial".format(elapsed, elapsed/nTrials))
residual = np.max(np.abs(jac_eager - jac_session))
print('residual between eager and session trials is {}'.format(residual))
if __name__ == "__main__":
main()

Error when using tensorflow HMC to marginalise GPR hyperparameters

I would like to use tensorflow (version 2) to use gaussian process regression
to fit some data and I found the google colab example online here [1].
I have turned some of this notebook into a minimal example that is below.
Sometimes the code fails with the following error when using MCMC to marginalize the hyperparameters: and I was wondering if anyone has seen this before or knows how to get around this?
tensorflow.python.framework.errors_impl.InvalidArgumentError: Input matrix is not invertible.
[[{{node mcmc_sample_chain/trace_scan/while/body/_168/smart_for_loop/while/body/_842/dual_averaging_step_size_adaptation___init__/_one_step/transformed_kernel_one_step/mh_one_step/hmc_kernel_one_step/leapfrog_integrate/while/body/_1244/leapfrog_integrate_one_step/maybe_call_fn_and_grads/value_and_gradients/value_and_gradient/gradients/leapfrog_integrate_one_step/maybe_call_fn_and_grads/value_and_gradients/value_and_gradient/PartitionedCall_grad/PartitionedCall/gradients/JointDistributionNamed/log_prob/JointDistributionNamed_log_prob_GaussianProcess/log_prob/JointDistributionNamed_log_prob_GaussianProcess/get_marginal_distribution/Cholesky_grad/MatrixTriangularSolve}}]] [Op:__inference_do_sampling_113645]
Function call stack:
do_sampling
[1] https://colab.research.google.com/github/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Gaussian_Process_Regression_In_TFP.ipynb#scrollTo=jw-_1yC50xaM
Note that some of code below is a bit redundant but it should
in some sections but it should be able to reproduce the error.
Thanks!
import time
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
tfb = tfp.bijectors
tfd = tfp.distributions
tfk = tfp.math.psd_kernels
tf.enable_v2_behavior()
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#%pylab inline
# Configure plot defaults
plt.rcParams['axes.facecolor'] = 'white'
plt.rcParams['grid.color'] = '#666666'
#%config InlineBackend.figure_format = 'png'
def sinusoid(x):
return np.sin(3 * np.pi * x[..., 0])
def generate_1d_data(num_training_points, observation_noise_variance):
"""Generate noisy sinusoidal observations at a random set of points.
Returns:
observation_index_points, observations
"""
index_points_ = np.random.uniform(-1., 1., (num_training_points, 1))
index_points_ = index_points_.astype(np.float64)
# y = f(x) + noise
observations_ = (sinusoid(index_points_) +
np.random.normal(loc=0,
scale=np.sqrt(observation_noise_variance),
size=(num_training_points)))
return index_points_, observations_
# Generate training data with a known noise level (we'll later try to recover
# this value from the data).
NUM_TRAINING_POINTS = 100
observation_index_points_, observations_ = generate_1d_data(
num_training_points=NUM_TRAINING_POINTS,
observation_noise_variance=.1)
def build_gp(amplitude, length_scale, observation_noise_variance):
"""Defines the conditional dist. of GP outputs, given kernel parameters."""
# Create the covariance kernel, which will be shared between the prior (which we
# use for maximum likelihood training) and the posterior (which we use for
# posterior predictive sampling)
kernel = tfk.ExponentiatedQuadratic(amplitude, length_scale)
# Create the GP prior distribution, which we will use to train the model
# parameters.
return tfd.GaussianProcess(
kernel=kernel,
index_points=observation_index_points_,
observation_noise_variance=observation_noise_variance)
gp_joint_model = tfd.JointDistributionNamed({
'amplitude': tfd.LogNormal(loc=0., scale=np.float64(1.)),
'length_scale': tfd.LogNormal(loc=0., scale=np.float64(1.)),
'observation_noise_variance': tfd.LogNormal(loc=0., scale=np.float64(1.)),
'observations': build_gp,
})
x = gp_joint_model.sample()
lp = gp_joint_model.log_prob(x)
print("sampled {}".format(x))
print("log_prob of sample: {}".format(lp))
# Create the trainable model parameters, which we'll subsequently optimize.
# Note that we constrain them to be strictly positive.
constrain_positive = tfb.Shift(np.finfo(np.float64).tiny)(tfb.Exp())
amplitude_var = tfp.util.TransformedVariable(
initial_value=1.,
bijector=constrain_positive,
name='amplitude',
dtype=np.float64)
length_scale_var = tfp.util.TransformedVariable(
initial_value=1.,
bijector=constrain_positive,
name='length_scale',
dtype=np.float64)
observation_noise_variance_var = tfp.util.TransformedVariable(
initial_value=1.,
bijector=constrain_positive,
name='observation_noise_variance_var',
dtype=np.float64)
trainable_variables = [v.trainable_variables[0] for v in
[amplitude_var,
length_scale_var,
observation_noise_variance_var]]
# Use `tf.function` to trace the loss for more efficient evaluation.
#tf.function(autograph=False, experimental_compile=False)
def target_log_prob(amplitude, length_scale, observation_noise_variance):
return gp_joint_model.log_prob({
'amplitude': amplitude,
'length_scale': length_scale,
'observation_noise_variance': observation_noise_variance,
'observations': observations_
})
# Now we optimize the model parameters.
num_iters = 1000
optimizer = tf.optimizers.Adam(learning_rate=.01)
# Store the likelihood values during training, so we can plot the progress
lls_ = np.zeros(num_iters, np.float64)
for i in range(num_iters):
with tf.GradientTape() as tape:
loss = -target_log_prob(amplitude_var, length_scale_var,
observation_noise_variance_var)
grads = tape.gradient(loss, trainable_variables)
optimizer.apply_gradients(zip(grads, trainable_variables))
lls_[i] = loss
print('Trained parameters:')
print('amplitude: {}'.format(amplitude_var._value().numpy()))
print('length_scale: {}'.format(length_scale_var._value().numpy()))
print('observation_noise_variance: {}'.format(observation_noise_variance_var._value().numpy()))
num_results = 100
num_burnin_steps = 50
sampler = tfp.mcmc.TransformedTransitionKernel(
tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob,
step_size=tf.cast(0.1, tf.float64),
num_leapfrog_steps=8),
bijector=[constrain_positive, constrain_positive, constrain_positive])
adaptive_sampler = tfp.mcmc.DualAveragingStepSizeAdaptation(
inner_kernel=sampler,
num_adaptation_steps=int(0.8 * num_burnin_steps),
target_accept_prob=tf.cast(0.75, tf.float64))
initial_state = [tf.cast(x, tf.float64) for x in [1., 1., 1.]]
# Speed up sampling by tracing with `tf.function`.
#tf.function(autograph=False, experimental_compile=False)
def do_sampling():
return tfp.mcmc.sample_chain(
kernel=adaptive_sampler,
current_state=initial_state,
num_results=num_results,
num_burnin_steps=num_burnin_steps,
trace_fn=lambda current_state, kernel_results: kernel_results)
t0 = time.time()
samples, kernel_results = do_sampling()
t1 = time.time()
print("Inference ran in {:.2f}s.".format(t1-t0))
This can happen if you have multiple index points that are very close, so you might consider using np.linspace or just doing some post filtering of your random draw. I would also suggest a bit bigger epsilon, maybe 1e-6.

XGboost + GridSearch : wired warning

Below is a code I wrote for Hyperparameter tuning of XGboost using RandomizedSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, auc
from pprint import pprint
from xgboost import XGBClassifier
import time
# instantiate XGBoost model
clf = XGBClassifier(missing=np.nan, nthreads=-1)
# Define scoring metrics
scorers = {
'accuracy_score': make_scorer(accuracy_score),
'precision_score': make_scorer(precision_score),
'recall_score': make_scorer(recall_score)
}
param_grid_dummy = {
"n_estimators": [25, 250],
"max_depth": [3,5],
"learning_rate": [0.0005, 0,005],
}
def random_search_wrapper(refit_score = 'precision_score'):
"""
fits a RandomizedSearchCV classifier using refit_score for optimization
prints classifier performance metrics
"""
rf_random = RandomizedSearchCV(estimator = clf, param_distributions = param_grid_dummy, n_iter = 3, scoring=scorers, refit = refit_score, cv = 3, return_train_score= True, n_jobs= -1)
rf_random.fit(X_train_df, Y_train)
# make the predictions
Y_pred = rf_random.predict(X_test_df)
print('Best params for {}'.format(refit_score))
print(rf_random.best_params_)
# confusion matrix on test data
print('\nConfusion matrix of Random Forest optimized for {} on the test data: '.format(refit_score))
print(pd.DataFrame(confusion_matrix(Y_test, Y_pred),
columns = ['pred_neg', 'pred_pos'], index = ['neg', 'pos']))
return rf_random
# Optimize classifier for recall score
start = time.time()
rf_random_cl = random_search_wrapper(refit_score='precision_score')
# Print time
end = time.time()
print()
print((end - start)/60, "minutes")
I get a wired warning.
/anaconda3/lib/python3.7/site-packages/sklearn/preprocessing/label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.
if diff:
Can someone pls help me understand what wrong am I doing here?
when I do simple clf.fit(X_train_df, Y_train). It works perfectly fine
This is an issue with sklearn version. few versions < 0.20.1 throw this this error
Code is correct.

Caffe always returns one label

I have trained a model with caffe tools under bin and now I am trying to do testing using python script, I read in an image and preprocess it myself (as I did for my training dataset) and I load the pretrained weights to the net, but I am almost always (99.99% of the time) receiving the same result -0- for every test image. I did consider that my model might be overfitting but after training a few models, I have come to realize the labels I get from predictions are most likely the cause. I have also increased dropout and took random crops to overcome overfitting and I have about 60K for training. The dataset is also roughly balanced. I get between 77 to 87 accuracy during evaluation step of training (depending on how I process data, what architecture I use etc)
Excuse my super hacky code, I have been distant to caffe testing for some time so I suspect the problem is how I pass the input data to the network, but I can't put my finger on it:
import h5py, os
import sys
sys.path.append("/home/X/Desktop/caffe-caffe-0.16/python")
from caffe.io import oversample
from caffe.io import resize_image
import caffe
from random import randint
import numpy as np
import cv2
import matplotlib.pyplot as plt
from collections import Counter as Cnt
meanImg = cv2.imread('/home/caffe/data/Ch/Final_meanImg.png')
model_def = '/home/X/Desktop/caffe-caffe-0.16/models/bvlc_googlenet/deploy.prototxt'
model_weights = '/media/X/DATA/SDet/Google__iter_140000.caffemodel'
# load the model
#caffe.set_mode_gpu()
#caffe.set_device(0)
net = caffe.Net(model_def, # defines the structure of the model
model_weights, # contains the trained weights
caffe.TEST) # use test mode (e.g., don't perform dropout)
with open( '/home/caffe/examples/sdet/SDet/test_random.txt', 'r' ) as T, open('/media/X/DATA/SDet/results/testResults.txt','w') as testResultsFile:
readImgCounter = 0
runningCorrect = 0
runningAcc = 0.0
#testResultsFile.write('filename'+' '+'prediction'+' '+'GT')
lines = T.readlines()
for i,l in enumerate(lines):
sp = l.split(' ')
video = sp[0].split('_')[0]
impath = '/home/caffe/data/Ch/images/'+video+'/'+sp[0] +'.jpg'
img = cv2.imread(impath)
resized_img = resize_image(img, (255,255))
oversampledImages = oversample([resized_img], (224,224)) #5 crops x 2 mirror flips = return 10 images
transposed_img = np.zeros( (10, 3, 224, 224), dtype='f4' )
tp = np.zeros( (1, 3, 224, 224), dtype='f4' )
predictedLabels = []
for j in range(0,oversampledImages.shape[0]-1):
transposed_img[j] = oversampledImages[j].transpose((2,0,1))
tp[0] = transposed_img[j]
net.blobs['data'].data[0] = tp
pred = net.forward(data=tp)
predictedLabels.append(pred['prob'].argmax())
print(predictedLabels)
prediction,num_most_common = Cnt(predictedLabels).most_common(1)[0]
print(prediction)
readImgCounter = readImgCounter + 1
if (prediction == int(sp[1])):
runningCorrect = runningCorrect + 1
runningAcc = runningCorrect / readImgCounter
print('runningAcc:')
print(runningAcc)
print('-----------')
print('runningCorrect:')
print(runningCorrect)
print('-----------')
print('totalImgRead:')
print(readImgCounter)
print('-----------')
testResultsFile.write(sp[0]+' '+str(prediction)+' '+sp[1])
testResultsFile.write('\n')
I have fixed this problem eventually. I am not 100% sure what worked but it was most likely changing the bias to 0 while learning.