tf.placeholder inside a class in TF 2.0 - tensorflow

I'm trying to change a code I have written in TF 1.0 to TF 2.0 and I'm having difficulties with replacing the tf.placeholder inside a class function. My code is the following
class User:
x = tf.placeholder(tf.float32,shape=[None,784])
y_true = tf.placeholder(tf.float32, [None, 10])
W1 = tf.Variable(tf.random.truncated_normal([7840,1], stddev=0.1))
lambda_W = tf.Variable(tf.zeros([7840,1]))
W = tf.reshape(W1,[784, 10])
ylogits = W*x
y = tf.nn.softmax(ylogits)
def __init__(self):
pass
Is there a way to replace tf.placeholder inside the class to make code running in TF 2.0?

Firstly, I think you intended to have each of those objects created per instance of the class, not one for the whole class as it is now. I also think your product between W and x was meant to be a matrix product, not an element-wise product, which would not work with the given shapes:
class User:
def __init__(self):
self.x = tf.placeholder(tf.float32,shape=[None,784])
self.y_true = tf.placeholder(tf.float32, [None, 10])
self.W1 = tf.Variable(tf.random.truncated_normal([7840,1], stddev=0.1))
self.lambda_W = tf.Variable(tf.zeros([7840,1]))
self.W = tf.reshape(W1,[784, 10])
self.ylogits = self.x # self.W
self.y = tf.nn.softmax(ylogits)
To use it in TensorFlow 2.x, you would remove the placeholders and simply perform the operations each time with every new input, for example with a new function call:
class User:
def __init__(self):
self.W1 = tf.Variable(tf.random.truncated_normal([7840,1], stddev=0.1))
self.lambda_W = tf.Variable(tf.zeros([7840,1]))
self.W = tf.reshape(W1,[784, 10])
def call(self, x):
ylogits = self.x # self.W
return tf.nn.softmax(ylogits)
You could use this as:
user1 = User()
x = ... # Get some data
y = user1.call(x)
Or if you like to be more "idiomatic", you could use __call__:
class User:
def __init__(self):
self.W1 = tf.Variable(tf.random.truncated_normal([7840,1], stddev=0.1))
self.lambda_W = tf.Variable(tf.zeros([7840,1]))
self.W = tf.reshape(W1,[784, 10])
def __call__(self, x):
ylogits = x # W
return tf.nn.softmax(ylogits)
And then you would do:
user1 = User()
x = ... # Get some data
y = user1(x)

Related

Why the value of a variable inside a tensorflow graph not frozen

I'm trying to save and load the graph of a tensorflow module that contains a tf.Variable as an intern variable.
Here is the code:
import tensorflow as tf
class MyModule(tf.Module):
def __init__(self, v):
self.v = v
pass
#tf.function(input_signature=[tf.TensorSpec(shape=None, dtype=tf.int32), tf.TensorSpec(shape=None, dtype=tf.int32)])
def __call__(self, x, v):
self.v.assign( x * v, read_value=False )
return self.v
x = tf.constant( tf.random.uniform(shape=[2,1], maxval=3, dtype=tf.int32) )
v = tf.Variable([[1], [2]])
module = MyModule(v)
#############################################
x = tf.constant( tf.random.uniform(shape=[3,1], maxval=3, dtype=tf.int32) )
v = tf.Variable([[1], [2], [3]])
module = MyModule(v)
tf.saved_model.save(module, "module")
imported = tf.saved_model.load("module")
x = tf.constant([80,0,20,24,321])
v = tf.Variable(3*tf.ones_like(x), trainable=False)
result = imported(x,v)
print(result)
The output is this:
tf.Tensor([240 0 60 72 963], shape=(5,), dtype=int32)
My question is the following:
Given that the graph has been saved, why the value of the variable self.v can still be changed. Isn't is supposed to be frozen.

Tensorflow subclassing issue

I have a resnet model, defined by the following class:
class ModelResNet(tf.keras.models.Model):
def __init__(self):
super(ModelResNet, self).__init__()
self.resBlock1 = ResBlock(num_filters=32)
self.resBlock2 = ResBlock(num_filters=32)
self.dense1 = tf.keras.layers.Dense(units=128, activation='relu')
self.dense2 = tf.keras.layers.Dense(units=10, activation='softmax')
def call(self, input_tensor, training=False):
x = self.resBlock1(input_tensor, training=training)
x = self.resBlock2(x, training=training)
x = tf.keras.layers.Flatten()(x)
# Option 1
x = self.dense1(x)
x = self.dense2(x)
# Option 2
# x = tf.keras.layers.Dense(units=128, activation='relu')(x)
# x = tf.keras.layers.Dense(units=10, activation='softmax')(x)
return x
When I call ModelResNet2.fit(), everything works great!
But, if I replace Option1 by Option2 in the "call" method, I get the following error:
ValueError: tf.function-decorated function tried to create variables on non-first call.
python-BaseException
Not sure I understand where the issue comes from.
Thanks!
When calling:
x = tf.keras.layers.Dense(units=128, activation='relu')(x)
you are creating a dense layer which will be immediately destroyed (after you exit call() method). It doesn't make sense - you will not be able to train.
Tensorflow is not able to create a graph of layers with such calls. So it prohibits them.

how to get tensor in default graph

I just write a model class, in its init function, I build a graph and then I want to training with the tensor in the default graph, but I just dont know how to get those tensor in that graph. The ae is Autoencoder class and it has some class function like partial_fit(). For example, I want to get ae and x in train_test().
class Model:
def __init__(self, param):
# deal param
self.param = param
# create & build graph
self.graph = tf.Graph()
self.init_graph = self.build_graph()
# create session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
gpu_num = os.getpid() % 1 # cuda_gpu_count()
config.gpu_options.visible_device_list = str(gpu_num)
self.sess = tf.Session(config=config, graph=self.graph)
def build_graph(self):
with self.graph.as_default():
# define the autoencoder
x = tf.placeholder(tf.float32, [None, n_hidden2])
ae = Autoencoder(n_layers=[n_inputs, n_hidden],
transfer_function=tf.nn.relu,
optimizer=tf.train.AdamOptimizer(learning_rate=lr))
return tf.global_variables_initializer()
def __del__(self):
# explicitly collect resources by closing and deleting session and graph
self.sess.close()
del self.sess
del self.graph
del self.param
# train models and return the test accuracy
def train_test(self, train_data, train_label, test_data, test_label):
with self.graph.as_default():
# Initialization
sess = self.sess
sess.run(self.init_graph)
temp1 = ae.partial_fit()
temp2 = x
I think because I have use with self.graph.as_default(): in train_test function so I can get those tensor directely, but it shows that those name is not defined.
Define it as a class attribute like this:
def build_graph(self):
with self.graph.as_default():
# define the autoencoder
self.x = tf.placeholder(tf.float32, [None, n_hidden2])
self.ae = Autoencoder(
n_layers=[n_inputs, n_hidden], transfer_function=tf.nn.relu,
optimizer=tf.train.AdamOptimizer(learning_rate=lr))
return tf.global_variables_initializer()
And then access these attributes using self:
def train_test(self, train_data, train_label, test_data, test_label):
with self.graph.as_default():
# Initialization
sess = self.sess
sess.run(self.init_graph)
temp1 = self.ae.partial_fit()
temp2 = self.x
Alternatively, you could retrieve the tensor by using graph.as_graph_element(), for example:
graph = tf.Graph()
with graph.as_default():
x = tf.placeholder(tf.float32, shape=(None, 2), name='input')
logits = tf.layers.dense(x, 2)
input_ = graph.as_graph_element('input',
allow_tensor=True,
allow_operation=True)
# `input_` is an operation that outputs placeholder `x`
input_ = input_.outputs[0]
print(x == input_) # True

Tensorflow 2.0 Keras Model subclassing

I'm trying to implement a simple UNet-like model using the model subclassing method. Here's my code:
import tensorflow as tf
from tensorflow import keras as K
class Enc_block(K.layers.Layer):
def __init__(self, in_dim):
super(Enc_block, self).__init__()
self.conv_layer = K.layers.SeparableConv2D(in_dim,3, padding='same', activation='relu')
self.batchnorm_layer = K.layers.BatchNormalization()
self.pool_layer = K.layers.SeparableConv2D(in_dim,3, padding='same',strides=2, activation='relu')
def call(self, x):
x = self.conv_layer(x)
x = self.batchnorm_layer(x)
x = self.conv_layer(x)
x = self.batchnorm_layer(x)
return self.pool_layer(x), x
class Dec_block(K.layers.Layer):
def __init__(self, in_dim):
super(Dec_block, self).__init__()
self.conv_layer = K.layers.SeparableConv2D(in_dim,3, padding='same', activation='relu')
self.batchnorm_layer = K.layers.BatchNormalization()
def call(self, x):
x = self.conv_layer(x)
x = self.batchnorm_layer(x)
x = self.conv_layer(x)
x = self.batchnorm_layer(x)
return x
class Bottleneck(K.layers.Layer):
def __init__(self, in_dim):
super(Bottleneck, self).__init__()
self.conv_1layer = K.layers.SeparableConv2D(in_dim,1, padding='same', activation='relu')
self.conv_3layer = K.layers.SeparableConv2D(in_dim,3, padding='same', activation='relu')
self.batchnorm_layer = K.layers.BatchNormalization()
def call(self, x):
x = self.conv_1layer(x)
x = self.batchnorm_layer(x)
x = self.conv_3layer(x)
x = self.batchnorm_layer(x)
return x
class Output_block(K.layers.Layer):
def __init__(self, in_dim):
super(Output_block, self).__init__()
self.logits = K.layers.SeparableConv2D(in_dim,3, padding='same', activation=None)
self.out = K.layers.Softmax()
def call(self, x):
x_logits = self.logits(x)
x = self.out(x_logits)
return x_logits, x
class UNetModel(K.Model):
def __init__(self,in_dim):
super(UNetModel, self).__init__()
self.encoder_block = Enc_block(in_dim)
self.bottleneck = Bottleneck(in_dim)
self.decoder_block = Dec_block(in_dim)
self.output_block = Output_block(in_dim)
def call(self, inputs, training=None):
x, x_skip1 = self.encoder_block(32)(inputs)
x, x_skip2 = self.encoder_block(64)(x)
x, x_skip3 = self.encoder_block(128)(x)
x, x_skip4 = self.encoder_block(256)(x)
x = self.bottleneck(x)
x = K.layers.UpSampling2D(size=(2,2))(x)
x = K.layers.concatenate([x,x_skip4],axis=-1)
x = self.decoder_block(256)(x)
x = K.layers.UpSampling2D(size=(2,2))(x) #56x56
x = K.layers.concatenate([x,x_skip3],axis=-1)
x = self.decoder_block(128)(x)
x = K.layers.UpSampling2D(size=(2,2))(x) #112x112
x = K.layers.concatenate([x,x_skip2],axis=-1)
x = self.decoder_block(64)(x)
x = K.layers.UpSampling2D(size=(2,2))(x) #224x224
x = K.layers.concatenate([x,x_skip1],axis=-1)
x = self.decoder_block(32)(x)
x_logits, x = self.output_block(2)(x)
return x_logits, x
I am getting the following error:
ValueError: Input 0 of layer separable_conv2d is incompatible with the layer: expected ndim=4, found ndim=0. Full shape received: []
I'm not sure if this is the correct way to implement a network in tf.keras
The idea was to implement encoder and decoder blocks by subclassing keras layers and subclassing the Model later.
Take a look at this line from UNetModel class:
x, x_skip1 = self.encoder_block(32)(inputs)
where self.encoder_block() is defined by
self.encoder_block = Enc_block(in_dim)
encoder_block is an instance of class. By doing self.encoder_block(32) you are invoking a __call__() method of the End_block class which expect to receive an iterable of image inputs of rank=4. Instead you're passing an integer number 32 of rank=0 and you get ValueError which says exactly what I've just explained: expected ndim=4, found ndim=0. What probably you intended to do is:
x, x_skip1 = self.encoder_block(inputs)
You repeat the same mistake in the subsequent lines as well. There are additional errors where you define the same in_dim for every custom layer:
self.encoder_block = Enc_block(in_dim)
self.bottleneck = Bottleneck(in_dim)
self.decoder_block = Dec_block(in_dim)
self.output_block = Output_block(in_dim)
The input shape for Bottleneck layer should be the same shape as output of the Enc_Block layer and so one. I suggest you first to understand simple example before you're trying to implement more complicated ones. Take a look at this example. It has two custom layers:
import tensorflow as tf
import numpy as np
from tensorflow.keras import layers
class CustomLayer1(layers.Layer):
def __init__(self, outshape=4):
super(CustomLayer1, self).__init__()
self.outshape = outshape
def build(self, input_shape):
self.kernel = self.add_weight(name='kernel',
shape=(int(input_shape[1]), self.outshape),
trainable=True)
super(CustomLayer1, self).build(input_shape)
def call(self, inputs):
return tf.matmul(inputs, self.kernel)
class CustomLayer2(layers.Layer):
def __init__(self):
super(CustomLayer2, self).__init__()
def call(self, inputs):
return inputs / tf.reshape(tf.reduce_sum(inputs, 1), (-1, 1))
Now I will use both of these layers in the new CombinedLayers class:
class CombinedLayers(layers.Layer):
def __init__(self, units=3):
super(CombinedLayers, self).__init__()
# `units` defines a number of units in the layer. It is the
# output shape of the `CustomLayer`
self.layer1 = CustomLayer1(units)
# The input shape is inferred dynamically in the `build()`
# method of the `CustomLayer1` class
self.layer2 = CustomLayer1(units)
# Some layers such as this one do not need to know the shape
self.layer3 = CustomLayer2()
def call(self, inputs):
x = self.layer1(inputs)
x = self.layer2(x)
x = self.layer3(x)
return x
Note that the input shape of CustomLayer1 is inferred dynamically in the build() method. Now let's test it with some input:
x_train = [np.random.normal(size=(3, )) for _ in range(5)]
x_train_tensor = tf.convert_to_tensor(x_train)
combined = CombinedLayers(3)
result = combined(x_train_tensor)
result.numpy()
# array([[ 0.50822063, -0.0800476 , 0.57182697],
# [ -0.76052217, 0.50127872, 1.25924345],
# [-19.5887986 , 9.23529798, 11.35350062],
# [ -0.33696137, 0.22741248, 1.10954888],
# [ 0.53079047, -0.08941536, 0.55862488]])
This is how you should approach it. Create layers one by one. Each time you add a new layer test everything with some input to verify that you are doing things correctly.

How to input csv data in an autoencoder

I am using the code below that implements an autoencoder. How can I feed the autoencoder with data for training and testing?
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
class Autoencoder(object):
def __init__(self, n_input, n_hidden, transfer_function=tf.nn.softplus, optimizer = tf.train.AdamOptimizer()):
self.n_input = n_input
self.n_hidden = n_hidden
self.transfer = transfer_function
network_weights = self._initialize_weights()
self.weights = network_weights
# model
self.x = tf.placeholder(tf.float32, [None, self.n_input])
self.hidden = self.transfer(tf.add(tf.matmul(self.x, self.weights['w1']), self.weights['b1']))
self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])
# cost
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
self.optimizer = optimizer.minimize(self.cost)
init = tf.global_variables_initializer()
self.sess = tf.Session()
self.sess.run(init)
def _initialize_weights(self):
all_weights = dict()
all_weights['w1'] = tf.get_variable("w1", shape=[self.n_input, self.n_hidden],
initializer=tf.contrib.layers.xavier_initializer())
all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float32))
all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype=tf.float32))
all_weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype=tf.float32))
return all_weights
def partial_fit(self, X):
cost, opt = self.sess.run((self.cost, self.optimizer), feed_dict={self.x: X})
return cost
def calc_total_cost(self, X):
return self.sess.run(self.cost, feed_dict = {self.x: X})
def transform(self, X):
return self.sess.run(self.hidden, feed_dict={self.x: X})
def generate(self, hidden = None):
if hidden is None:
hidden = self.sess.run(tf.random_normal([1, self.n_hidden]))
return self.sess.run(self.reconstruction, feed_dict={self.hidden: hidden})
def reconstruct(self, X):
return self.sess.run(self.reconstruction, feed_dict={self.x: X})
def getWeights(self):
return self.sess.run(self.weights['w1'])
def getBiases(self):
return self.sess.run(self.weights['b1'])
# I instantiate the class autoencoder, 5 is the dimension of a raw input,
2 is the dimension of the hidden layer
autoencoder = Autoencoder(5, 2, transfer_function=tf.nn.softplus, optimizer
= tf.train.AdamOptimizer())
# I prepare my data**
IRIS_TRAINING = "C:\\Users\\Desktop\\iris_training.csv"
#Feeding data to Autoencoder ???
Train and Test ??
How can I train this model with csv file data? I think I need to run the following instruction as _, c = sess.run([optimizer, cost], feed_dict={self.x: batch_ofd_ata}) inside a loop of epochs, but I am struggling with it.
Check out Stanford CS20SI's tutorial.
https://github.com/chiphuyen/tf-stanford-tutorials/blob/master/examples/05_csv_reader.py