Two models of the same architecture with same weights giving different results - tensorflow

Problem
After copying weights from a pretrained model, I do not get the same output.
Description
tf2cv repository provides pretrained models in TF2 for various backbones. Unfortunately the codebase is of limited use to me because they use subclassing via tf.keras.Model which makes it very hard to extract intermediate outputs and gradients at will. I therefore embarked upon rewriting the codes for the backbones using the functional API. After rewriting the resnet architecture codes, I copied their weights into my model and saved them in SavedModel format. In order to test if it is correctly done, I gave an input to my model instance and theirs and the results were different.
My approaches to debugging the problem
I checked the number of trainable and non-trainable parameters and they are the same between my model instance and theirs.
I checked if all trainable weights have been copied which they have.
My present line of thinking
I think it might be possible that weights have not been copied to the correct layers. For example :- Layer X and Layer Y might have weights of the same shape but during weight copying, weights of layer Y might have gone into Layer X and vice versa. This is only possible if I have not mapped the layer names between the two models properly.
However I have exhaustively checked and have not found any error so far.
The Code
My code is attached below. Their (tfcv) code for resnet can be found here
Please note that resnet_orig in the following snippet is the same as here
My converted code can be found here
from vision.image import resnet as myresnet
from glob import glob
from loguru import logger
import tensorflow as tf
import resnet_orig
import re
import os
import numpy as np
from time import time
from copy import deepcopy
tf.random.set_seed(time())
models = [
'resnet10',
'resnet12',
'resnet14',
'resnetbc14b',
'resnet16',
'resnet18_wd4',
'resnet18_wd2',
'resnet18_w3d4',
'resnet18',
'resnet26',
'resnetbc26b',
'resnet34',
'resnetbc38b',
'resnet50',
'resnet50b',
'resnet101',
'resnet101b',
'resnet152',
'resnet152b',
'resnet200',
'resnet200b',
]
def zipdir(path, ziph):
# ziph is zipfile handle
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file),
os.path.relpath(os.path.join(root, file),
os.path.join(path, '..')))
def find_model_file(model_type):
model_files = glob('*.h5')
for m in model_files:
if '{}-'.format(model_type) in m:
return m
return None
def remap_our_model_variables(our_variables, model_name):
remapped = list()
reg = re.compile(r'(stage\d+)')
for var in our_variables:
newvar = var.replace(model_name, 'features/features')
stage_search = re.search(reg, newvar)
if stage_search is not None:
stage_search = stage_search[0]
newvar = newvar.replace(stage_search, '{}/{}'.format(stage_search,
stage_search))
newvar = newvar.replace('conv_preact', 'conv/conv')
newvar = newvar.replace('conv_bn','bn')
newvar = newvar.replace('logits','output1')
remapped.append(newvar)
remap_dict = dict([(x,y) for x,y in zip(our_variables, remapped)])
return remap_dict
def get_correct_variable(variable_name, trainable_variable_names):
for i, var in enumerate(trainable_variable_names):
if variable_name == var:
return i
logger.info('Uffff.....')
return None
layer_regexp_compiled = re.compile(r'(.*)\/.*')
model_files = glob('*.h5')
a = np.ones(shape=(1,224,224,3), dtype=np.float32)
inp = tf.constant(a, dtype=tf.float32)
for model_type in models:
logger.info('Model is {}.'.format(model_type))
model = eval('myresnet.{}(input_height=224,input_width=224,'
'num_classes=1000,data_format="channels_last")'.format(
model_type))
model2 = eval('resnet_orig.{}(data_format="channels_last")'.format(
model_type))
model2.build(input_shape=(None,224, 224,3))
model_name=find_model_file(model_type)
logger.info('Model file is {}.'.format(model_name))
original_weights = deepcopy(model2.weights)
if model_name is not None:
e = model2.load_weights(model_name, by_name=True, skip_mismatch=False)
print(e)
loaded_weights = deepcopy(model2.weights)
else:
logger.info('Pretrained model is not available for {}.'.format(
model_type))
continue
diff = [np.mean(x.numpy()-y.numpy()) for x,y in zip(original_weights,
loaded_weights)]
our_model_weights = model.weights
their_model_weights = model2.weights
assert (len(our_model_weights) == len(their_model_weights))
our_variable_names = [x.name for x in model.weights]
their_variable_names = [x.name for x in model2.weights]
remap_dict = remap_our_model_variables(our_variable_names, model_type)
new_weights = list()
for i in range(len(our_model_weights)):
our_name = model.weights[i].name
remapped_name = remap_dict[our_name]
source_index = get_correct_variable(remapped_name, their_variable_names)
new_weights.append(
model2.weights[source_index].value())
logger.debug('Copying from {} ({}) to {} ({}).'.format(
model2.weights[
source_index].name,
model2.weights[source_index].value().shape,
model.weights[
i].name,
model.weights[i].value().shape))
logger.info(len(new_weights))
logger.info('Setting new weights')
model.set_weights(new_weights)
logger.info('Finished setting new weights.')
their_output = model2(inp)
our_output = model(inp)
logger.info(np.max(their_output.numpy() - our_output.numpy()))
logger.info(diff) # This must be 0.0
break

Related

How to avoid memory leakage in an autoregressive model within tensorflow

Recently, I am training a LSTM with attention mechanism for regressionin tensorflow 2.9 and I met an problem during training with model.fit():
At the beginning, the training time is okay, like 7s/step. However, it was increasing during the process and after several steps, like 1000, the value might be 50s/step. Here below is a part of the code for my model:
class AttentionModel(tf.keras.Model):
def __init__(self, encoder_output_dim, dec_units, dense_dim, batch):
super().__init__()
self.dense_dim = dense_dim
self.batch = batch
encoder = Encoder(encoder_output_dim)
decoder = Decoder(dec_units,dense_dim)
self.encoder = encoder
self.decoder = decoder
def call(self, inputs):
# Creat a tensor to record the result
tempt = list()
encoder_output, encoder_state = self.encoder(inputs)
new_features = np.zeros((self.batch, 1, 1))
dec_initial_state = encoder_state
for i in range(6):
dec_inputs = DecoderInput(new_features=new_features, enc_output=encoder_output)
dec_result, dec_state = self.decoder(dec_inputs, dec_initial_state)
tempt.append(dec_result.logits)
new_features = dec_result.logits
dec_initial_state = dec_state
result=tf.concat(tempt,1)
return result
In the official documents for tf.function, I notice: "Don't rely on Python side effects like object mutation or list appends".
Since I use a dynamic python list with append() to record the intermediate variables, I guess each time during training, a new tf.graph was added. Is the reason my training is getting slower and slower?
Additionally, what should I use instead of python list to avoid this? I have tried with a numpy.zeros matrix but it will lead to another problem:
tempt = np.zeros(shape=(1,6))
...
for i in range(6):
dec_inputs = DecoderInput(new_features=new_features, enc_output=encoder_output)
dec_result, dec_state = self.decoder(dec_inputs, dec_initial_state)
tempt[i]=(dec_result.logits)
...
Cannot convert a symbolic tf.Tensor (decoder/dense_3/BiasAdd:0) to a numpy array. This error may indicate that you're trying to pass a Tensor to a NumPy call, which is not supported.

How to perform custom operations in between keras layers?

I have one input and one output neural network and in between I need to perform small operation. I have two inputs (from the same distribution of either mean 0 or mean 1) which I need to fed to the neural network one at a time and compare the output of each input. After the comparison, I am finally generating the prediction of the model. The implementation is as follows:
from tensorflow import keras
import tensorflow as tf
import numpy as np
#define network
x1 = keras.Input(shape=(1), name="x1")
x2 = keras.Input(shape=(1), name="x2")
model = keras.layers.Dense(20)
model1 = keras.layers.Dense(1)
x11 = model1(model(x1))
x22 = model1(model(x2))
After this I need to perform following operations:
if x11>=x22:
Vm=x1
else:
Vm=x2
Finally I need to do:
out = Vm - 0.5
out= keras.activations.sigmoid(out)
model = keras.Model([x1,x2], out)
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
loss=tf.keras.losses.binary_crossentropy,
metrics=['accuracy']
)
model.summary()
tf.keras.utils.plot_model(model) #visualize model
I have normally distributed pair of data with same mean (mean 0 and mean 1 as generated below:
#Generating training dataset
from scipy.stats import skewnorm
n=1000 #sample each
s = 1 # scale to change o/p range
X1_0 = skewnorm.rvs(a = 0 ,loc=0, size=n)*s; X1_1 = skewnorm.rvs(a = 0 ,loc=1, size=n)*s #Skewnorm function
X2_0 = skewnorm.rvs(a = 0 ,loc=0, size=n)*s; X2_1 = skewnorm.rvs(a = 0 ,loc=1, size=n)*s #Skewnorm function
X1_train = list(X1_0) + list(X1_1) #append both data
X2_train = list(X2_0) + list(X2_1) #append both data
y_train = [x for x in (0,1) for i in range(0, n)] #make Y for above conditions
#reshape to proper format
X1_train = np.array(X1_train).reshape(-1,1)
X2_train = np.array(X2_train).reshape(-1,1)
y_train = np.array(y_train)
#train model
model.fit([X1_train, X2_train], y_train, epochs=10)
I am not been able to run the program if I include operation
if x11>=x22:
Vm=x1
else:
Vm=x2
in between layers. If I directly work with maximum of outputs as:
Vm = keras.layers.Maximum()([x11,x22])
The program is working fine. But I need to select either x1 or x2 based on the value of x11 and x22.
The problem might be due to the inclusion of the comparison operation while defining structure of the model where there is no value for x11 and x22 (I guess). I am totally new to all these stuffs and so I could not resolve this. I would greatly appreciate any help/suggestions. Thank you.
You can add this functionality via a Lambda layer.
Vm = tf.keras.layers.Lambda(lambda x: tf.where(x[0]>=x[1], x[2], x[3]))([x11, x22, x1, x2])

TensorFlow-Keras generator: Turn off auto-sharding or switch auto_shard_policiy to DATA

While training my model I ran into the issue described in the post Tensorflow - Keras: Consider either turning off auto-sharding or switching the auto_shard_policy to DATA to shard this dataset. My question now is: Does the solution mentioned by #Graham501617 work with generators as well? Here is some dummy code for what I use so far:
class BatchGenerator(Sequence):
def __init__(self, some_args):
...
def __len__(self):
num_batches_in_sequence = ...
def __getitem__(self, _):
data, labels = get_one_batch(self.some_args)
return data, labels
In the main script I do something like:
train_generator = BatchGenerator(some_args)
valid_generator = BatchGenerator(some_args)
cross_device_ops = tf.distribute.HierarchicalCopyAllReduce(num_packs=2)
strategy = tf.distribute.MirroredStrategy(cross_device_ops=cross_device_ops)
with strategy.scope():
model = some_model
model.compile(some_args)
history = model.fit(
x=train_generator,
validation_data=valid_generator,
...
)
I would probably have to modify the __getitem__ function somehow, do I?
I appreciate your support!
You'd have to wrap your generator into a single function...
Example below assumes your data is stored as numpy array (.npy), each file already has the correct amount of mini-batch size, is labeled 0_x.npy, 1_x.npy, 2_x.npy, etc.. and both data and label arrays are float64.
from pathlib import Path
import tensorflow as tf
import numpy as np
# Your new generator as a function rather than an object you need to instantiate
def getNextBatch(stop, data_dir):
i = 0
data_dir = data_dir.decode('ascii')
while True:
while i < stop:
x = np.load(str(Path(data_dir + "/" + str(i) + "_x.npy")))
y = np.load(str(Path(data_dir + "/" + str(i) + "_y.npy")))
yield x, y
i += 1
i = 0
# Make a dataset given the directory and strategy
def makeDataset(generator_func, dir, strategy=None):
# Get amount of files
data_size = int(len([name for name in os.listdir(dir) if os.path.isfile(os.path.join(dir, name))])/2)
ds = tf.data.Dataset.from_generator(generator_func, args=[data_size, dir], output_types=(tf.float64, tf.float64)) # Make a dataset from the generator. MAKE SURE TO SPECIFY THE DATA TYPE!!!
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF
ds = ds.with_options(options)
# Optional: Make it a distributed dataset if you're using a strategy
if strategy is not None:
ds = strategy.experimental_distribute_dataset(ds)
return ds
training_ds = makeDataset(getNextBatch, str(Path(data_dir + "/training")), None)
validation_ds = makeDataset(getNextBatch, str(Path(data_dir + "/validation")), None)
model.fit(training_ds,
epochs=epochs,
callbacks=callbacks,
validation_data=validation_ds)
You might need to pass the amount of steps per epoch in your fit() call, in which case you can use the generator you've already made.

Get logits of a trained Keras model [duplicate]

I am building a deconvolution network. I would like to add a layer to it which is the reverse of a softmax. I tried to write a basic python function that returns the inverse of a softmax for a given matrix and put that in a tensorflow Lambda and add it to my model.
I have no error but when I doing a predict I only have 0 at the exit. When I don't add this layer to my network I have output something other than zeros. This therefore justifies that they are due to my inv_softmax function which is bad.
Can you enlighten me how to proceed?
I define my funct as this :
def inv_softmax(x):
C=0
S = np.zeros((1,1,10)) #(1,1,10) is the shape of the datas that my layer will receive
try:
for j in range(np.max(np.shape(x))):
C+=np.exp(x[0,0,j])
for i in range(np.max(np.shape(x))):
S[0,0,i] = np.log(x[0,0,i]+C
except ValueError:
print("ValueError in inv_softmax")
pass
S = tf.convert_to_tensor(S,dtype=tf.float32)
return S
I add it to my network as :
x = ...
x = layers.Lambda(lambda x : inv_softmax(x),name='inv_softmax',output_shape=[1,1,10])(x)
x = ...
If you need more of my code or others informations ask me please.
Try this:
import tensorflow as tf
def inv_softmax(x, C):
return tf.math.log(x) + C
import math
input = tf.keras.layers.Input(shape=(1,10))
x = tf.keras.layers.Lambda(lambda x : inv_softmax(x, math.log(10.)),name='inv_softmax')(input)
model = tf.keras.Model(inputs=input, outputs=x)
a = tf.zeros([1, 1, 10])
a = tf.nn.softmax(a)
a = model(a)
print(a.numpy())
Thanks it works !
I put :
import keras.backend as K
def inv_softmax(x,C):
return K.log(x)+K.log(C)

How to construct and reuse networks across two input branches of a network?

How to do something like this?
nn = get_networks()
A = nn(X_input)
B = nn(X_other_input)
C = A + B
model = ...
So that all the tensors in nn are the same, only the input-training branches are different?
In pure tensorflow you do this with
tf.variable_scope('something', reuse=tf.AUTO_REUSE):
define stuff here
and carefully naming your layers.
But basically you can construct nn in the first place because you can not pass a non-called layer to a layer call!
For example:
In [21]: tf.keras.layers.Dense(16)(tf.keras.layers.Dense(8))
...
AttributeError: 'Dense' object has no attribute 'shape'
UPDATE:
I have been accomplishing this by creating an uncompiled model as the sub-network. That "model" can then be passed to other network creation functions. For example, if you have a functionaly equation that you want to solve, you might approximate the function with a network and then pass the network to the function which is itself a network.
It depends how you would like to reuse it, but the idea is to save your layers once initialized, and use them multiple times later.
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.layers as layers
import numpy as np
layers = {}
def net1(input):
layers["l1"] = keras.layers.Dense(10)
layers["l2"] = keras.layers.Dense(10)
return layers["l1"](layers["l2"](keras.layers.Flatten()(input)))
def net2(input):
return layers["l1"](layers["l2"](keras.layers.Flatten()(input)))
input1 = keras.layers.Input((2, 2))
input2 = keras.layers.Input((2, 2))
model1 = keras.Model(inputs=input1, outputs=net1(input1))
model1.compile(loss=keras.losses.mean_squared_error, optimizer=keras.optimizers.Adam())
model2 = keras.Model(inputs=input2, outputs=net2(input2))
model2.compile(loss=keras.losses.mean_squared_error, optimizer=keras.optimizers.Adam())
x = np.random.randint(0, 100, (50, 2, 2))
m1 = model1.predict(x)
m2 = model2.predict(x)
print(x[0])
print(m1[0])
print(m2[0])
Outputs are identical:
[ 10.114908 -13.074531 -8.671929 -59.03201 55.389366 1.3610549
-38.051434 8.355987 7.5310936 -27.717983 ]
[ 10.114908 -13.074531 -8.671929 -59.03201 55.389366 1.3610549
-38.051434 8.355987 7.5310936 -27.717983 ]