Trainable variables in loss function - tensorflow

I try to understand how to add trainable variable in my model.
I wrote some code :
def custom_loss( y_true, y_pred, k1, k2, k3):
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=y_true, logits=y_pred))
cosine0 = cosine_dist(y_true[:MAX_BITS],y_pred[:MAX_BITS])
error = custom___error(y_true,y_pred)
loss = loss * k1 + cosine0 * k2 + k3 * error
return loss
model.compile(loss=lambda y_true, y_pred: custom_loss(y_true, y_pred, k1, k2, k3), optimizer=optimizer, metrics=[‘accuracy’,custom_error,custom___error])
But the variables k1, k2, k3 never change : how can I add them to the trainable variables list ?
k1: <tf.Variable ‘Variable:0’ shape=() dtype=float32, numpy=0.5>
k2: <tf.Variable ‘Variable:0’ shape=() dtype=float32, numpy=0.5>
k3: <tf.Variable ‘Variable:0’ shape=() dtype=float32, numpy=0.5>
Regards

Related

memory increase when training in tensorflow2.2.0 keras graph with different shape

Below is the simple example:
import os
import psutil
import numpy as np
process = psutil.Process(os.getpid())
class TestKeras3:
def __init__(self):
pass
def build_graph(self):
inputs = tf.keras.Input(shape=(None, None, 3), batch_size=1)
x = tf.keras.layers.Conv2D(100, (2, 2), padding='SAME', name='x')(inputs)
y = tf.reshape(x, (-1,))
z = tf.multiply(y, y)
model = tf.keras.models.Model(inputs=inputs, outputs=z)
return model
def train(self):
model = self.build_graph()
model.summary()
size = np.arange(1000)
for i in range(1000):
inputs = tf.random.normal([1, size[999-i], size[999-i], 3])
with tf.GradientTape() as tage:
output = model(inputs)
print(i, tf.shape(output), process.memory_info().rss)
and the output is:
id output_shape memory cost
979 tf.Tensor([40000], shape=(1,), dtype=int32) 2481123328
980 tf.Tensor([36100], shape=(1,), dtype=int32) 2481582080
981 tf.Tensor([32400], shape=(1,), dtype=int32) 2482122752
982 tf.Tensor([28900], shape=(1,), dtype=int32) 2482393088
983 tf.Tensor([25600], shape=(1,), dtype=int32) 2482933760
984 tf.Tensor([22500], shape=(1,), dtype=int32) 2483453952
985 tf.Tensor([19600], shape=(1,), dtype=int32) 2483793920
986 tf.Tensor([16900], shape=(1,), dtype=int32) 2484330496
987 tf.Tensor([14400], shape=(1,), dtype=int32) 2484871168
988 tf.Tensor([12100], shape=(1,), dtype=int32) 2485137408
989 tf.Tensor([10000], shape=(1,), dtype=int32) 2485665792
990 tf.Tensor([8100], shape=(1,), dtype=int32) 2486206464
991 tf.Tensor([6400], shape=(1,), dtype=int32) 2486579200
992 tf.Tensor([4900], shape=(1,), dtype=int32) 2487119872
993 tf.Tensor([3600], shape=(1,), dtype=int32) 2487390208
994 tf.Tensor([2500], shape=(1,), dtype=int32) 2487930880
995 tf.Tensor([1600], shape=(1,), dtype=int32) 2488463360
996 tf.Tensor([900], shape=(1,), dtype=int32) 2488811520
997 tf.Tensor([400], shape=(1,), dtype=int32) 2489335808
998 tf.Tensor([100], shape=(1,), dtype=int32) 2489868288
999 tf.Tensor([0], shape=(1,), dtype=int32) 2490241024
I found that every time I changed the size of the input, the consumption of memory also increased.
I have a question that the size (2,2,3,100) of the conv2D parameter in the model is fixed. Is it true that the model will cache some Tensor during the forward calculation process, which will cause the memory to increase all the time? If so, how can these resources be released during training? If not, what else is the reason?
So after trying many method, i solved this problem.
It seems that using tf common operation in a keras graph will cause a memory leak, which can be solved by packaging the tf common op into the tf.keras.layers.Layer subclass.
class ReshapeMulti(tf.keras.layers.Layer):
def __init__(self):
super(ReshapeMulti, self).__init__()
def call(self, inputs):
y = tf.reshape(inputs, (-1, ))
z = tf.multiply(y, y)
return z
class TestKeras3:
def __init__(self):
pass
def build_graph(self):
inputs = tf.keras.Input(shape=(None, None, 3), batch_size=1)
x = tf.keras.layers.Conv2D(100, (2, 2), padding='SAME', name='x')(inputs)
# y = tf.reshape(x, (-1,))
# z = tf.multiply(y, y)
z = ReshapeMulti()(x)
model = tf.keras.models.Model(inputs=inputs, outputs=z)
return model
def train(self):
model = self.build_graph()
model.summary()
size = np.arange(1000)
for i in range(1000):
inputs = tf.random.normal([1, size[999-i], size[999-i], 3])
with tf.GradientTape() as tage:
output = model(inputs)
print(i, tf.shape(output), process.memory_info().rss)

Calculate gradients of intermediate nodes in tensorflow eager execution

I use tensorflow eager execution to do the following calculation:
y = x^2
z = y + 2.
My goal is to calculate dz/dx and dz/dy (the gradients of z over y and z)
dx, dy = GradientTape.gradient(z, [x, y]).
However, only dy is calculated and dx is None. Namely, only the gradients of tensors that directly rely on z can be calculated.
[None, <tf.Tensor: id=11, shape=(), dtype=float32, numpy=1.0>]
[None, <tf.Tensor: id=11, shape=(), dtype=float32, numpy=1.0>]
[None, <tf.Tensor: id=11, shape=(), dtype=float32, numpy=1.0>]
[None, <tf.Tensor: id=11, shape=(), dtype=float32, numpy=1.0>]
[None, <tf.Tensor: id=11, shape=(), dtype=float32, numpy=1.0>]
The following is the full code.
from __future__ import absolute_import, division, print_function
import tensorflow as tf
tf.enable_eager_execution()
tfe = tf.contrib.eager
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "6"
import warnings
warnings.filterwarnings('ignore')
train_steps = 5
for i in range(train_steps):
x = tf.contrib.eager.Variable(0.)
with tf.GradientTape() as tape:
y = tf.square(x)
z = y + 2
print(tape.gradient(z, [x,y]))
Any solution?

ValueError: Error converting shape to a TensorShape: Dimension -5 must be >= 0

I have no idea how this error is arising. I'm trying to change the input format to an RNN and have printed out the tensors in the original version (which works) and the modified version (which crashes).
FUNCTIONAL:
LABEL= Tensor("concat_1:0", shape=(?, 2), dtype=float32, device=/device:CPU:0) (?, 2)
inputs=Tensor("concat:0", shape=(?, 8), dtype=float32, device=/device:CPU:0)
x=[<tf.Tensor 'split:0' shape=(?, 1) dtype=float32>,
<tf.Tensor 'split:1' shape=(?, 1) dtype=float32>,
<tf.Tensor 'split:2' shape=(?, 1) dtype=float32>,
<tf.Tensor 'split:3' shape=(?, 1) dtype=float32>,
<tf.Tensor 'split:4' shape=(?, 1) dtype=float32>,
<tf.Tensor 'split:5' shape=(?, 1) dtype=float32>,
<tf.Tensor 'split:6' shape=(?, 1) dtype=float32>,
<tf.Tensor 'split:7' shape=(?, 1) dtype=float32>]
last outputs=Tensor("rnn/rnn/basic_lstm_cell/mul_23:0", shape=(?, 3), dtype=float32)
PREDICTION Tensor("add:0", shape=(?, 2), dtype=float32)
LOSS Tensor("mean_squared_error/value:0", shape=(), dtype=float32)
BROKEN:
X= 5 Tensor("Const:0", shape=(49, 10), dtype=float32, device=/device:CPU:0)
labels= Tensor("Const_5:0", shape=(49, 10), dtype=float32)
OUTPUTS Tensor("rnn/rnn/basic_lstm_cell/mul_14:0", shape=(49, 5), dtype=float32)
PREDICTIONS Tensor("add:0", shape=(49, 10), dtype=float32)
LABELS Tensor("Const_5:0", shape=(49, 10), dtype=float32)
LOSS Tensor("mean_squared_error/value:0", shape=(), dtype=float32)
Here is the code for the model, which is the same for each of them:
lstm_cell = rnn.BasicLSTMCell(LSTM_SIZE, forget_bias=1.0)
outputs, _ = tf.nn.static_rnn(lstm_cell, x, dtype=tf.float32)
outputs = outputs[-1]
print('-->OUTPUTS', outputs)
weight = tf.Variable(tf.random_normal([LSTM_SIZE, N_OUTPUTS]))
bias = tf.Variable(tf.random_normal([N_OUTPUTS]))
predictions = tf.matmul(outputs, weight) + bias
print('-->PREDICTIONS', predictions)
print('-->LABELS', labels)
loss = tf.losses.mean_squared_error(labels, predictions)
print('-->LOSS', loss)
train_op = tf.contrib.layers.optimize_loss(loss=loss, global_step=tf.train.get_global_step(), learning_rate=0.01, optimizer="SGD")
eval_metric_ops = {"rmse": tf.metrics.root_mean_squared_error(labels, predictions)}
TL;DR: Use x = tf.split( x, 10, axis = -1 ) to split x before feeding it.
TS;WM:
The error probably happens at tf.nn_static_rnn(), in the second line of your code (would have been nice had you posted the error line number):
outputs, _ = tf.nn.static_rnn(lstm_cell, x, dtype=tf.float32)
The "broken" version tries to feed a tensor with shape ( 49, 10 ) whereas the working version is feeding a list of 8 tensors with shape ( ?, 1 ). The documentation says:
inputs: A length T list of inputs, each a Tensor of shape [batch_size, input_size], or a nested tuple of such elements.
In the previous line you define the lstm_cell with tf.contrib.rnn.BasicLSTMCell.__init__() (presumably, because the import lines are omitted from your code), which has the num_units argument filled by LSTM_SIZE (which is, again, omitted from your code):
lstm_cell = rnn.BasicLSTMCell(LSTM_SIZE, forget_bias=1.0)
So you have to get your ducks in a row. x has to be a list of ( batch_size, 1 ) tensors, which you can achieve with tf.split():
x = tf.split( x, 10, axis = -1 )
where I presume 10 to be the length of the data you're trying to feed, just based on the output you pasted.

how to convert string Tensor to Tensor float list?

So, my code is like
parsed_line = tf.decode_csv(line, [[0], [0], [""]])
print(parsed_line[0])
del parsed_line[0]
del parsed_line[0]
features = parsed_line
print(parsed_line[0])
then the result is
[<tf.Tensor 'DecodeCSV:0' shape=() dtype=int32>, <tf.Tensor 'DecodeCSV:1' shape=() dtype=int32>, <tf.Tensor 'DecodeCSV:2' shape=() dtype=string>]
and
[<Tensor("DecodeCSV:2", shape=(), dtype=string)>]
the csv i will give to this decode function is
1, 0, 0101010010101010101010
and I want this "0101010010101010101010" to
[0,1,0,1,0,.........]
in tensorflow
[<Tensor("DecodeCSV:2", shape=(), dtype=string)>]
to
[<tf.Tensor 'DecodeCSV:0' shape=() dtype=int32>, <tf.Tensor 'DecodeCSV:1' shape=() dtype=int32>, ............]
do you have any ideas of this?
You can do it this way using tf.string_split and tf.string_to_number:
import tensorflow as tf
line = tf.constant("1000111101", shape=(1,))
b = tf.string_split(line, delimiter="").values
c = tf.string_to_number(b, tf.int32)
print(c)
with tf.Session() as sess:
print(sess.run(c))
[1 0 0 0 1 1 1 1 0 1]

Accessing implicit tensors in tensorflow

Below code creates some tensors, which are implicit. I wonder how I can view values of these tensors:
<tf.Variable 'rnn/basic_lstm_cell/kernel:0' shape=(43, 160) dtype=float32_ref>
<tf.Variable 'rnn/basic_lstm_cell/bias:0' shape=(160,) dtype=float32_ref>
<tf.Variable 'rnn/basic_lstm_cell/kernel/Adagrad:0' shape=(43, 160) dtype=float32_ref>
<tf.Variable 'rnn/basic_lstm_cell/bias/Adagrad:0' shape=(160,) dtype=float32_ref>
<tf.Variable 'softmax/W/Adagrad:0' shape=(40, 10) dtype=float32_ref>
<tf.Variable 'softmax/b/Adagrad:0' shape=(10,) dtype=float32_ref>
Here is code itself.
import tensorflow as tf
import numpy as np
VECTOR_SIZE = 3
SEQUENCE_LENGTH = 5
BATCH_SIZE = 7
STATE_SIZE = 40
NUM_CLASSES = 10
LEARNING_RATE = 0.1
x = tf.placeholder(tf.float32, [BATCH_SIZE, SEQUENCE_LENGTH, VECTOR_SIZE],
name='input_placeholder')
y = tf.placeholder(tf.int32, [BATCH_SIZE, SEQUENCE_LENGTH],
name='labels_placeholder')
init_state = tf.zeros([BATCH_SIZE, STATE_SIZE])
rnn_inputs = tf.unstack(x, axis = 1)
y_as_list = tf.unstack(y, axis=1)
cell = tf.contrib.rnn.BasicLSTMCell(STATE_SIZE, state_is_tuple = True)
rnn_outputs, final_state = tf.contrib.rnn.static_rnn(cell, rnn_inputs,
initial_state=(init_state,init_state))
with tf.variable_scope('softmax'):
W = tf.get_variable('W', [STATE_SIZE, NUM_CLASSES])
b = tf.get_variable('b', [NUM_CLASSES], initializer=tf.constant_initializer(0.0))
logits = [tf.matmul(rnn_output, W) + b for rnn_output in rnn_outputs]
predictions = [tf.nn.softmax(logit) for logit in logits]
losses = [tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label, logits=logit) for \
logit, label in zip(logits, y_as_list)]
total_loss = tf.reduce_mean(losses)
train_step = tf.train.AdagradOptimizer(LEARNING_RATE).minimize(total_loss)
X = np.ones([BATCH_SIZE, SEQUENCE_LENGTH, VECTOR_SIZE])
Y = np.ones([BATCH_SIZE, SEQUENCE_LENGTH])
saver = tf.train.Saver()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
batch_total_loss = sess.run([total_loss, train_step],
feed_dict = {x:X,y:Y})
save_path = saver.save(sess, "/tmp/model.ckpt")
for el in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
print(el)
Use sess.run():
for el in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
print(el) # this will print the tensor's name, shape, data type
print(sess.run(el)) # this will print the tensor's current value