Theano function equivalent in Tensorflow - tensorflow

I'm wonder wrt this topic
I want to resolve update issue in Theano.function with this lazy tensorflow constrution:
class TensorFlowTheanoFunction(object):
def __init__(self, inputs, outputs, session):
self._inputs = inputs
self._outputs = outputs
self.session = session
def __call__(self, *args, **kwargs):
feeds = {}
for (argpos, arg) in enumerate(args):
feeds[self._inputs[argpos]] = arg
return self.session.run(self._outputs, feeds)
If I want to pass an update argument (like in Theano) how I can modify this lazy call?
I just want that this can also work in tensorflow:
self.new = theano.function([], [], updates=zip(old_params, params))

Just modifying Yaroslav's code from that thread to use tf.assign, with a control dependency to make sure the outputs are computed before the assignments happen:
import tensorflow as tf
class TensorFlowTheanoFunction(object):
def __init__(self, inputs, outputs, updates=()):
self._inputs = inputs
self._outputs = outputs
self._updates = updates
def __call__(self, *args, **kwargs):
feeds = {}
for (argpos, arg) in enumerate(args):
feeds[self._inputs[argpos]] = arg
try:
outputs_identity = [tf.identity(output) for output in self._outputs]
output_is_list = True
except TypeError:
outputs_identity = [tf.identity(self._outputs)]
output_is_list = False
with tf.control_dependencies(outputs_identity):
assign_ops = [tf.assign(variable, replacement)
for variable, replacement in self._updates]
outputs_list = tf.get_default_session().run(
outputs_identity + assign_ops, feeds)[:len(outputs_identity)]
if output_is_list:
return outputs_list
else:
assert len(outputs_list) == 1
return outputs_list[0]
a = tf.placeholder(dtype=tf.int32)
b = tf.placeholder(dtype=tf.int32)
variable = tf.get_variable(
"variable", shape=[], dtype=tf.int32, initializer=tf.zeros_initializer)
c = a + b + variable
d = a - b
sess = tf.InteractiveSession()
sess.run(tf.initialize_all_variables())
f = TensorFlowTheanoFunction([a, b], [c, d], updates=[(variable, variable + 1)])
print f(1, 2)
print f(1, 2)
print f(0, 2)
f = TensorFlowTheanoFunction([a, b], c, updates=[(variable, variable + 1)])
print f(1, 2)
print f(1, 2)
print f(0, 2)
This updates the variable at each iteration:
[3, -1]
[4, -1]
[4, -2]
6
7
7

Related

tf.keras.layers.Cropping2D for pytorch

I'm currently working on a gan that generates time series and I want to tailor my generated time series to the desired time steps. In Tensorflow I always used "tf.keras.layers.Cropping2D" for this. Is there an equivalent for this in Pytorch?
class cont_cond_cnn_generator(nn.Module):
def __init__(self, nz=32, dim_embed=DIM_EMBED):
dim_embed=DIM_EMBED
super(cont_cond_cnn_generator, self).__init__()
self.z_dim = nz
self.dim_embed = dim_embed
self.dense = nn.Linear(self.z_dim, 3 * GEN_SIZE*16, bias=True)
self.dense = nn.Linear(self.z_dim, 4 * 4 * GEN_SIZE*16, bias=True)
self.final = nn.Conv1d(GEN_SIZE, channels, 3, stride=1, padding=1, bias=bias)
nn.init.xavier_uniform_(self.dense.weight.data, 1.)
nn.init.xavier_uniform_(self.final.weight.data, 1.)
self.genblock0 = ResBlockGenerator(in_channels=GEN_SIZE*16, out_channels = GEN_SIZE*8 , dim_embed=dim_embed) #4--->8 # 2. parameter original :GEN_SIZE*8
self.genblock1 = ResBlockGenerator(GEN_SIZE*8, GEN_SIZE*4, dim_embed=dim_embed) #8--->16 #2. parameter original :GEN_SIZE*4
self.genblock2 = ResBlockGenerator(GEN_SIZE*4,GEN_SIZE*2, dim_embed=dim_embed) #16--->32 # 2. parameter original :GEN_SIZE*2
self.genblock3 = ResBlockGenerator(GEN_SIZE*2, GEN_SIZE, dim_embed=dim_embed) #32--->64 2. parameter original :GEN_SIZE
self.final = nn.Sequential(
nn.BatchNorm1d(GEN_SIZE),
nn.ReLU(),
self.final,
nn.Sigmoid()
)
def forward(self, z, y):
z = z.view(z.size(0), z.size(1))
out = self.dense(z)
out = out.view(-1, GEN_SIZE*16, 3) #out = out.view(-1, GEN_SIZE*16, 4, 4)
out = self.genblock0(out, y)
out = self.genblock1(out, y)
out = self.genblock2(out, y)
out = self.genblock3(out, y)
out = self.final(out)
out = torchvision.transforms.functional.crop(out, 0,0,3,2201)
return out
I used the torchvision.transforms.functional.crop() and it works. But is it a good methode or would you recommend my another way to do that.

TypeError when trying to make a loop creating artificial neural networks

I am working on an artifical neural network which I have created via subclassing.
The subclassing looks like this:
import time
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import scipy.stats as si
import sympy as sy
from sympy.stats import Normal, cdf
from sympy import init_printing
class DGMNet(tf.keras.Model):
def __init__(self, n_layers, n_nodes, dimensions=1):
"""
Parameters:
- n_layers: number of layers
- n_nodes: number of nodes in (inner) layers
- dimensions: number of spacial dimensions
"""
super().__init__()
self.n_layers = n_layers
self.initial_layer = DenseLayer(dimensions + 1, n_nodes, activation="relu")
self.lstmlikelist = []
for _ in range(self.n_layers):
self.lstmlikelist.append(LSTMLikeLayer(dimensions + 1, n_nodes, activation="relu"))
self.final_layer = DenseLayer(n_nodes, 1, activation=None)
def call(self, t, x):
X = tf.concat([t,x], 1)
S = self.initial_layer.call(X)
for i in range(self.n_layers):
S = self.lstmlikelist[i].call({'S': S, 'X': X})
result = self.final_layer.call(S)
return result
class DenseLayer(tf.keras.layers.Layer):
def __init__(self, n_inputs, n_outputs, activation):
"""
Parameters:
- n_inputs: number of inputs
- n_outputs: number of outputs
- activation: activation function
"""
super(DenseLayer, self).__init__()
self.n_inputs = n_inputs
self.n_outputs = n_outputs
self.W = self.add_weight(shape=(self.n_inputs, self.n_outputs),
initializer='random_normal',
trainable=True)
self.b = self.add_weight(shape=(1, self.n_outputs),
initializer='random_normal',
trainable=True)
self.activation = _get_function(activation)
def call(self, inputs):
S = tf.add(tf.matmul(inputs, self.W), self.b)
S = self.activation(S)
return S
class LSTMLikeLayer(tf.keras.layers.Layer):
def __init__(self, n_inputs, n_outputs, activation):
"""
Parameters:
- n_inputs: number of inputs
- n_outputs: number of outputs
- activation: activation function
"""
super(LSTMLikeLayer, self).__init__()
self.n_outputs = n_outputs
self.n_inputs = n_inputs
self.Uz = self.add_variable("Uz", shape=[self.n_inputs, self.n_outputs])
self.Ug = self.add_variable("Ug", shape=[self.n_inputs, self.n_outputs])
self.Ur = self.add_variable("Ur", shape=[self.n_inputs, self.n_outputs])
self.Uh = self.add_variable("Uh", shape=[self.n_inputs, self.n_outputs])
self.Wz = self.add_variable("Wz", shape=[self.n_outputs, self.n_outputs])
self.Wg = self.add_variable("Wg", shape=[self.n_outputs, self.n_outputs])
self.Wr = self.add_variable("Wr", shape=[self.n_outputs, self.n_outputs])
self.Wh = self.add_variable("Wh", shape=[self.n_outputs, self.n_outputs])
self.bz = self.add_variable("bz", shape=[1, self.n_outputs])
self.bg = self.add_variable("bg", shape=[1, self.n_outputs])
self.br = self.add_variable("br", shape=[1, self.n_outputs])
self.bh = self.add_variable("bh", shape=[1, self.n_outputs])
self.activation = _get_function(activation)
def call(self, inputs):
S = inputs['S']
X = inputs['X']
Z = self.activation(tf.add(tf.add(tf.matmul(X, self.Uz), tf.matmul(S, self.Wz)), self.bz))
G = self.activation(tf.add(tf.add(tf.matmul(X, self.Ug), tf.matmul(S, self.Wg)), self.bg))
R = self.activation(tf.add(tf.add(tf.matmul(X, self.Ur), tf.matmul(S, self.Wr)), self.br))
H = self.activation(tf.add(tf.add(tf.matmul(X, self.Uh), tf.matmul(tf.multiply(S, R), self.Wh)), self.bh))
Snew = tf.add(tf.multiply(tf.subtract(tf.ones_like(G), G), H), tf.multiply(Z, S))
return Snew
def _get_function(name):
f = None
if name == "tanh":
f = tf.nn.tanh
elif name == "sigmoid":
f = tf.nn.sigmoid
elif name == "relu":
f = tf.nn.relu
elif not name:
f = tf.identity
assert f is not None
return f
# Sampling
def sampler(N1, N2, N3):
np.random.seed(42)
# Sampler #1: PDE domain
t1 = np.random.uniform(low=T0,
high=T,
size=[N1,1])
s1 = np.random.uniform(low=S1,
high=S2,
size=[N1,1])
# Sampler #2: boundary condition
t2 = np.zeros(shape=(1, 1))
s2 = np.zeros(shape=(1, 1))
# Sampler #3: initial/terminal condition
t3 = T * np.ones((N3,1)) #Terminal condition
s3 = np.random.uniform(low=S1,
high=S2,
size=[N3,1])
return (t1, s1, t2, s2, t3, s3)
# Loss function
def loss(model, t1, x1, t2, x2, t3, x3):
# Loss term #1: PDE
V = model(t1, x1)
V_t = tf.gradients(V, t1)[0]
V_x = tf.gradients(V, x1)[0]
V_xx = tf.gradients(V_x, x1)[0]
f = V_t + r*x1*V_x + 0.5*sigma**2*x1**2*V_xx - r*V
L1 = tf.reduce_mean(tf.square(f))
# Loss term #2: boundary condition
#L2 = tf.reduce_mean(tf.square(V))
# Loss term #3: initial/terminal condition
L3 = tf.reduce_mean(tf.square(model(t3, x3) - tf.math.maximum(x3-K,0)))
return (L1, L3)
# B-S's analytical known solution
def analytical_solution(t, x):
#C = SN(d1) - Xe- rt N(d2)
#S: spot price
#K: strike price
#T: time to maturity
#r: interest rate
#sigma: volatility of underlying asset
d1 = (np.log(x / K) + (r + 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))
d2 = (np.log(x / K) + (r - 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))
call = (x * si.norm.cdf(d1, 0.0, 1.0) - K * np.exp(-r * T) * si.norm.cdf(d2, 0.0, 1.0))
return call
# Set random seeds
np.random.seed(42)
tf.random.set_seed(42)
# Strike price
K = 0.5
# PDE parameters
r = 0.05 # Interest rate
sigma = 0.25 # Volatility
# Time limits
T0 = 0.0 + 1e-10 # Initial time
T = 1.0 # Terminal time
# Space limits
S1 = 0.0 + 1e-10 # Low boundary
S2 = 1.0 # High boundary
# Number of samples
NS_1 = 1000
NS_2 = 0
NS_3 = 100
t1, s1, t2, s2, t3, s3 = sampler(NS_1, NS_2, NS_3)
Now what I want to do is to iterate over different parameters and create a new ann for each iteration.
My plan was to do it in this way:
tf.compat.v1.disable_eager_execution()
t1_t = tf.compat.v1.placeholder(tf.float32, [None,1])
x1_t = tf.compat.v1.placeholder(tf.float32, [None,1])
t2_t = tf.compat.v1.placeholder(tf.float32, [None,1])
x2_t = tf.compat.v1.placeholder(tf.float32, [None,1])
t3_t = tf.compat.v1.placeholder(tf.float32, [None,1])
x3_t = tf.compat.v1.placeholder(tf.float32, [None,1])
volatility_list = [0.08]#[0.08, 0.16, 0.18, 0.2, 0.28]
stages_list = [10]#, 50, 100]
layers_list = [3]#, 5, 7]
npl_list = [3]#, 6, 9, 12, 15]
for sigma in volatility_list:
for st in stages_list:
for lay in layers_list:
for npl in npl_list:
# Neural Network definition
num_layers = lay
nodes_per_layer = npl
ann = DGMNet(num_layers, nodes_per_layer)
L1_t, L3_t = loss(ann, t1_t, x1_t, t2_t, x2_t, t3_t, x3_t)
loss_t = L1_t + L3_t
# Optimizer parameters
global_step = tf.Variable(1, trainable=False)
starter_learning_rate = 0.001
learning_rate = tf.compat.v1.train.exponential_decay(starter_learning_rate, global_step,
100000, 0.96, staircase=True)
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss_t)
# Training parameters
steps_per_sample = st
sampling_stages = 100#2000
# Plot tensors
tplot_t = tf.compat.v1.placeholder(tf.float32, [None,1], name="tplot_t") # We name to recover it later
xplot_t = tf.compat.v1.placeholder(tf.float32, [None,1], name="xplot_t")
vplot_t = tf.identity(ann(tplot_t, xplot_t), name="vplot_t") # Trick for naming the trained model
# Training data holders
sampling_stages_list = []
elapsed_time_list = []
loss_list = []
L1_list = []
L3_list = []
# Train network!!
init_op = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session()
sess.run(init_op)
for i in range(sampling_stages):
t1, x1, t2, x2, t3, x3 = sampler(NS_1, NS_2, NS_3)
start_time = time.clock()
for _ in range(steps_per_sample):
loss, L1, L3, _ = sess.run([loss_t, L1_t, L3_t, optimizer],
feed_dict = {t1_t:t1, x1_t:x1, t2_t:t2, x2_t:x2, t3_t:t3, x3_t:x3})
end_time = time.clock()
elapsed_time = end_time - start_time
sampling_stages_list.append(i)
elapsed_time_list.append(elapsed_time)
loss_list.append(loss)
L1_list.append(L1)
L3_list.append(L3)
text = "Stage: {:04d}, Loss: {:e}, L1: {:e}, L3: {:e}, {:f} seconds".format(i, loss, L1, L3, elapsed_time)
print(text)
#goodness of fit
time_0 = 0
listofzeros = [time_0] * 100
prices_for_goodness = np.linspace(S1,S2, 100)
goodness_list = []
solution_goodness = analytical_solution(listofzeros, prices_for_goodness)
ttt = time_0*np.ones_like(prices_for_goodness.reshape(-1,1))
nn_goodness, = sess.run([vplot_t],
feed_dict={tplot_t:ttt, xplot_t:prices_for_goodness.reshape(-1,1)})
deviation_list = np.abs(solution_goodness - nn_goodness)/(T-T0)
print("{0:.2f}%".format(np.average(deviation_list)*100))
Unfortunately as soon as it ends the first iteration I get a TypeError that 'numpy.float32' object is not callable
Error Traceback:
TypeError Traceback (most recent call last)
<ipython-input-14-bb14643d0c42> in <module>()
10
11
---> 12 L1_t, L3_t = loss(ann, t1_t, x1_t, t2_t, x2_t, t3_t, x3_t)
13 loss_t = L1_t + L3_t
14
TypeError: 'numpy.float32' object is not callable
I guess that the problem is with the creation of the placeholders, however I am not sure how to solve it. Maybe one of you can help me
Thanks in advance!
Chris
Did you create a variable called 'loss'? It seems that the loss function is redefined by a variable with the same name, so then python tries to call that variable as a function.

Tensorflow 2: Nested TensorArray

What's wrong with this code? Edit: It works on CPU, but fails when ran on GPU. It runs for a few iterations, then fails with one of errors (github issue here):
2019-12-02 12:59:29.727966: F tensorflow/core/framework/tensor_shape.cc:445] Check failed: end <= dims() (1 vs. 0)
Process finished with exit code -1073740791 (0xC0000409)
or
tensorflow.python.framework.errors_impl.InvalidArgumentError: Tried to set a tensor with incompatible shape at a list index. Item element shape: [3,3] list shape: [3]
[[{{node while/body/_1/TensorArrayV2Write/TensorListSetItem}}]] [Op:__inference_computeElement_73]
#tf.function
def computeElement_byBin():
c = tf.TensorArray(tf.int64, size=1, infer_shape=False, element_shape=(3,))
const = tf.cast(tf.constant([1, 2, 3]), tf.int64)
c = c.write(0, const)
c_c = c.concat()
return c_c
#tf.function
def computeElement():
c = tf.TensorArray(tf.int64, size=1, infer_shape=False, element_shape=(3,))
for x in tf.range(50):
byBinVariant = computeElement_byBin()
c = c.write(0, byBinVariant)
return c.concat()
k = 0
while True:
k += 1
r = computeElement()
print('iteration: %s, result: %s' % (k, r))
I played around with it more and narrowed it down a bit:
#tf.function
def computeElement():
tarr = tf.TensorArray(tf.int32, size=1,clear_after_read=False)
tarr = tarr.write(0, [1])
concat = tarr.concat()
# PROBLEM HERE
for x in tf.range(50):
concat = tarr.concat()
return concat
If you set tf.config.threading.set_inter_op_parallelism_threads(1) the bug goes away, which means it's to do with parallelization of the unrolled tensorflow loop. Knowing that tensorflow unrolls statically when looping over a python variable rather than a tensor, I could confirm that this code worked:
#tf.function
def computeElement(arr):
tarr = tf.TensorArray(tf.int32, size=1)
tarr = tarr.write(0, [1])
concat = tarr.concat()
a = 0
while a<arr:
concat = tarr.concat()
a+=1
return concat
k = 0
while True:
k += 1
r = computeElement(50)
So solution for now is to loop over a python variable rather than a tensor.

Calling a basic LSTM cell within a custom Tensorflow cell

I'm trying to implement the MATCH LSTM from this paper: https://arxiv.org/pdf/1608.07905.pdf
I'm using Tensorflow. One part of the architecture is an RNN that uses the input and the previous state to compute an attention vector which it applies to a context before concatenating the result with the inputs and sending them into an LSTM. To build the first part of this RNN, I wrote a custom cell for Tensorflow to call. But I'm not sure how to send the results into an LSTM. Is it possible to call the basic LSTM cell within the custom cell I'm writing? I tried this a few different ways but kept getting the error "module' object has no attribute 'rnn_cell'" at the line where the LSTM cell is called. Any help would be much appreciated!
EDIT to add code:
import numpy as np
import tensorflow as tf
class MatchLSTMCell(tf.contrib.rnn.RNNCell):
def __init__(self, state_size, question_tensor, encoded_questions, batch_size):
self._state_size = state_size
self.question_tensor = question_tensor
self.encoded_questions = encoded_questions
self.batch_size = batch_size
#property
def state_size(self):
return self._state_size
#property
def output_size(self):
return self._state_size
def __call__(self, inputs, state, scope=None):
scope = scope or type(self).__name__
with tf.variable_scope(scope):
W_p = tf.get_variable("W_p", dtype=tf.float64, shape=[self.state_size, self.state_size], initializer=tf.contrib.layers.xavier_initializer())
W_r = tf.get_variable("W_r", dtype=tf.float64, shape=[self.state_size, self.state_size], initializer=tf.contrib.layers.xavier_initializer())
b_p = tf.get_variable("b_p", dtype=tf.float64, shape=[self.state_size])
w = tf.get_variable("w", dtype=tf.float64, shape=[1,self.state_size])
b = tf.get_variable("b", dtype=tf.float64, shape=[])
#print 'question tensor', np.shape(self.question_tensor)
#print 'inputs', np.shape(inputs)
#print 'insides', np.shape(tf.matmul(inputs, W_p) + tf.matmul(state, W_r) + b_p)
G = tf.nn.tanh(
tf.transpose(tf.transpose(self.question_tensor, perm=[1,0,2]) +
(tf.matmul(inputs, W_p) + tf.matmul(state, W_r) + b_p), perm=[1,0,2])
)
#print 'big G', np.shape(G)
attention_list = []
for i in range(self.batch_size):
attention_matrix = tf.matmul(G[i,:,:], tf.transpose(w))
attention_list.append(attention_matrix)
attention_scores = tf.stack(attention_list)
a = tf.nn.softmax(attention_scores + b)
a = tf.reshape(a, [self.batch_size, -1])
#print 'a shape is', np.shape(a)
weighted_question_list = []
for i in range(self.batch_size):
attention_vector = tf.matmul(tf.reshape(a[i], [1,-1]), self.encoded_questions[i])
weighted_question_list.append(attention_vector)
weighted_questions = tf.stack(weighted_question_list)
weighted_questions = tf.reshape(weighted_questions, [32, -1])
#print'weighted questions', np.shape(weighted_questions)
z = tf.concat([inputs, weighted_questions], 1)
lstm_cell = tf.nn.rnn_cell.LSTMCell(self.state_size)
output, new_state = lstm_cell.__call__(z, state)
return output, new_state
I'm also trying to reimplement Match_LSTM for Squad for experiment.
I use MurtyShikhar's as reference. It works! However, he had to customize AttentionWrapper and use existed BasicLSTM cell.
I also try to create a Match_LSTM_cell by putting z and state as (inputs,state) pair in Basic_LSTM:
def __call__(self, inputs,state):
#c is not a output. c somehow is a "memory keeper".
#Necessary to update and pass new_c through LSTM
c,h=state
#...Calculate your z
#...inputs will be each tokens in context(passage) respectively
#...Calculate alpha_Q
z=tf.concat([inputs,alpha_Q],axis=1)
########This part is reimplement of Basic_LSTM
with vs.variable_scope("LSTM_core"):
sigmoid=math_ops.sigmoid
concat=_linear([z,h],dimension*4,bias=True)
i,j,f,o=array_ops.split(concat,num_or_size_splits=4,axis=1)
new_c=(c*sigmoid(f+self._forget_bias)+sigmoid(i)*self._activation(j))
new_h = self._activation(new_c) * sigmoid(o)
new_state=(new_c,new_h)
return new_h,new_state

Tensorflow: Calling externally set-up function in `tf.scan` (e.g using `tf.make_template`) results in error

I have a RNN like structure that has some building blocks (component neural networks) that are passed in by the user. Here is a minimal example:
import tensorflow as tf
tf.reset_default_graph()
def initialize(shape):
init = tf.random_normal(shape, mean=0, stddev=0.1, dtype=tf.float32)
return init
def test_rnn_with_external(input, hiddens, external_fct):
"""
A simple rnn that makes the standard update, then
feeds the new hidden state through some external
function.
"""
dim_in = input.get_shape().as_list()[-1]
btsz = input.get_shape().as_list()[1]
shape = (dim_in + hiddens, hiddens)
_init = initialize(shape)
W = tf.get_variable("rnn_w", initializer=_init)
_init = tf.zeros([hiddens])
b = tf.get_variable("rnn_b", initializer=_init)
def _step(previous, input):
concat = tf.concat(1, [input, previous])
h_t = tf.tanh(tf.add(tf.matmul(concat, W), b))
h_t = external_fct(h_t)
return h_t
h_0 = tf.zeros([btsz, hiddens])
states = tf.scan(_step,
input,
initializer=h_0,
name="states")
return states
# the external function, relying on the templating mechanism.
def ext_fct(hiddens):
"""
"""
def tmp(input):
shape = (hiddens, hiddens)
_init = initialize(shape)
W = tf.get_variable("ext_w", initializer=_init)
b = 0
return tf.add(tf.matmul(input, W), b, name="external")
return tf.make_template(name_="external_fct", func_=tmp)
# run from here on
t = 5
btsz = 4
dim = 2
hiddens = 3
x = tf.placeholder(tf.float32, shape=(t, btsz, dim))
ext = ext_fct(hiddens)
states = test_rnn_with_external(x, hiddens, external_fct=ext)
sess = tf.InteractiveSession()
sess.run(tf.initialize_all_variables())
with the error ending in:
InvalidArgumentError: All inputs to node external_fct/ext_w/Assign must be from the same frame.
With Frame, I would associate an area on the stack. So I thought that maybe tf.make_template does something very wired, and thus it is not useable here. The external function can be rewritten a bit and then called more directly, like so:
import tensorflow as tf
tf.reset_default_graph()
def initialize(shape):
init = tf.random_normal(shape, mean=0, stddev=0.1, dtype=tf.float32)
return init
def test_rnn_with_external(input, hiddens, external_fct):
dim_in = input.get_shape().as_list()[-1]
btsz = input.get_shape().as_list()[1]
shape = (dim_in + hiddens, hiddens)
_init = initialize(shape)
W = tf.get_variable("rnn_w", initializer=_init)
_init = tf.zeros([hiddens])
b = tf.get_variable("rnn_b", initializer=_init)
def _step(previous, input):
"""
"""
concat = tf.concat(1, [input, previous])
h_t = tf.tanh(tf.add(tf.matmul(concat, W), b))
h_t = external_fct(h_t, hiddens)
return h_t
h_0 = tf.zeros([btsz, hiddens])
states = tf.scan(_step,
input,
initializer=h_0,
name="states")
return states
def ext_fct_new(input, hiddens):
"""
"""
shape = (hiddens, hiddens)
_init = initialize(shape)
W = tf.get_variable("ext_w_new", initializer=_init)
b = 0
return tf.add(tf.matmul(input, W), b, name="external_new")
t = 5
btsz = 4
dim = 2
hiddens = 3
x = tf.placeholder(tf.float32, shape=(t, btsz, dim))
states = test_rnn_with_external(x, hiddens, external_fct=ext_fct_new)
sess = tf.InteractiveSession()
sess.run(tf.initialize_all_variables())
However, still the same error InvalidArgumentError: All inputs to node ext_w_new/Assign must be from the same frame.
Of course, moving contents of the external function into the _step part (and tf.get_variableing before) works. But then the flexibility (necessary in the original code) is gone.
What am I doing wrong? Any help/tips/pointers is greatly appreciated.
(Note: Asked this on github, too: https://github.com/tensorflow/tensorflow/issues/4478)
Using a tf.constant_initializer solves the problem. This is described here.