Use two set of data for likelihood of log_prob in tensorflow probability - tensorflow

I am new to tensorflow and trying to translate a STAN model into TFP. Here is my TFP model using JointDistributionCoroutineAutoBatched.
def make_joint_distribution_coroutine(Depth,N_RNA):
def model():
## c1 prior
c1 = yield tfd.Gamma(concentration = 1.1, rate = 0.005)
## c2 prior
c2 = yield tfd.Gamma(concentration = 1.1, rate = 0.005)
## s prior
s = yield GammaModeSD(1,1)
## theta prior
theta = yield tfd.LogNormal(0,s)
## p prior
p = yield BetaModeConc(0.1,c1)
## tfp bug, need to cast tensor to float32
#theta = tf.cast(theta, tf.float32)
#p = tf.cast(p, tf.float32)
## q formula
q = (theta*p)/(1-p+theta*p)
## qi prior
qi = yield BetaModeConc(tf.repeat(q,N_RNA), c2)
## qi likelihood
k = yield tfd.Binomial(tf.cast(Depth,tf.float32),qi)
# p likelihood
a = yield tfd.Binomial(tf.cast(Depth,tf.float32),p)
return tfd.JointDistributionCoroutineAutoBatched(model)
My model generates two different sets of data which are a and k. If it only has a or k, then I could specify my log_prob function by
def joint_log_prob(*args):
return joint.log_prob(*args, likelihood = data)
or
joint_log_prob = lambda *x: model.log_prob(x + (data,))
But my question is how to incorporate two different sets of data into one log_prob function? Thank you!

The simplest solution would just be specifying both. Assuming data is a tuple:
def joint_log_prob(*args):
return joint.log_prob(*args, a=data[0], k=data[1])
or
joint_log_prob = lambda *x: model.log_prob(x + data)
You might also like to write:
joint_log_prob = joint.experimental_pin(a=.., k=..).unnormalized_log_prob
(See JointDistributionPinned)

Related

How to specify integration of a quantity as an objective in Dymos

I am implementing Bryson-Denham problem. The objective is:
$J=\frac{1}{2}\int_{0}^{1}u^2\left(t\right)dt$
and in the doc of Dymos, all explanation and examples state objective value as a scalar at loc=initial or loc=final. I could not find any example that use integral of some quantity over time as an objective function. Is this possible? How can I implement this?
FYI, Bryson-Denham problem is well-explained in this page:
https://www.gpops2.com/Examples/Bryson-Denham.html
Dymos will integrate any state you give it. In this case, you need to add a state for J and then also compute a state rate for it --- J_dot.
import openmdao.api as om
import dymos as dm
class BrysonDedhamODE(om.ExplicitComponent):
def initialize(self):
self.options.declare('num_nodes', types=int)
def setup(self):
nn = self.options['num_nodes']
# static parameters
self.add_input('x', shape=nn)
self.add_input('v', shape=nn)
self.add_input('u', shape=nn)
self.add_input('J', shape=nn)
# state rates
self.add_output('x_dot', shape=nn, tags=['dymos.state_rate_source:x'])
self.add_output('v_dot', shape=nn, tags=['dymos.state_rate_source:v'])
self.add_output('J_dot', shape=nn, tags=['dymos.state_rate_source:J'])
# Ask OpenMDAO to compute the partial derivatives using complex-step
# with a partial coloring algorithm for improved performance, and use
# a graph coloring algorithm to automatically detect the sparsity pattern.
self.declare_coloring(wrt='*', method='cs')
def compute(self, inputs, outputs):
v, u, j = inputs["v"], inputs["u"], inputs["J"]
outputs['x_dot'] = v
outputs['v_dot'] = u
outputs['J_dot'] = 0.5*u**2
p = om.Problem()
p.driver = om.pyOptSparseDriver()
p.driver.options['optimizer'] = 'SLSQP'
p.driver.declare_coloring()
traj = p.model.add_subsystem('traj', dm.Trajectory())
transcription = dm.Radau(num_segments=10, order=3, compressed=True)
phase0 = dm.Phase(ode_class=BrysonDedhamODE, transcription=transcription)
traj.add_phase('phase0', phase0)
phase0.set_time_options(fix_initial=True, fix_duration=True)
phase0.set_state_options("x", fix_initial=True, fix_final=True, lower=0, upper=2)
phase0.set_state_options("v", fix_initial=True, fix_final=True, lower=-2, upper=2)
phase0.set_state_options("J", fix_initial=False, fix_final=False,lower=-10, upper=10)
phase0.add_control('u', lower=-10, upper=10)
phase0.add_path_constraint('x', upper=1/9.)
phase0.add_objective('J', loc="final")
p.setup()
#initial conditions
p['traj.phase0.states:x'] = phase0.interp('x', [0,0])
p['traj.phase0.states:x'] = phase0.interp('x', [0,0])
p['traj.phase0.states:v'] = phase0.interp('v', [1,-1])
p['traj.phase0.t_duration'] = 1
p['traj.phase0.t_initial'] = 0
dm.run_problem(p, make_plots=True)

How to show the class distribution in Dataset object in Tensorflow

I am working on a multi-class classification task using my own images.
filenames = [] # a list of filenames
labels = [] # a list of labels corresponding to the filenames
full_ds = tf.data.Dataset.from_tensor_slices((filenames, labels))
This full dataset will be shuffled and split into train, valid and test dataset
full_ds_size = len(filenames)
full_ds = full_ds.shuffle(buffer_size=full_ds_size*2, seed=128) # seed is used for reproducibility
train_ds_size = int(0.64 * full_ds_size)
valid_ds_size = int(0.16 * full_ds_size)
train_ds = full_ds.take(train_ds_size)
remaining = full_ds.skip(train_ds_size)
valid_ds = remaining.take(valid_ds_size)
test_ds = remaining.skip(valid_ds_size)
Now I am struggling to understand how each class is distributed in train_ds, valid_ds and test_ds. An ugly solution is to iterate all the element in the dataset and count the occurrence of each class. Is there any better way to solve it?
My ugly solution:
def get_class_distribution(dataset):
class_distribution = {}
for element in dataset.as_numpy_iterator():
label = element[1]
if label in class_distribution.keys():
class_distribution[label] += 1
else:
class_distribution[label] = 0
# sort dict by key
class_distribution = collections.OrderedDict(sorted(class_distribution.items()))
return class_distribution
train_ds_class_dist = get_class_distribution(train_ds)
valid_ds_class_dist = get_class_distribution(valid_ds)
test_ds_class_dist = get_class_distribution(test_ds)
print(train_ds_class_dist)
print(valid_ds_class_dist)
print(test_ds_class_dist)
The answer below assumes:
there are five classes.
labels are integers from 0 to 4.
It can be modified to suit your needs.
Define a counter function:
def count_class(counts, batch, num_classes=5):
labels = batch['label']
for i in range(num_classes):
cc = tf.cast(labels == i, tf.int32)
counts[i] += tf.reduce_sum(cc)
return counts
Use the reduce operation:
initial_state = dict((i, 0) for i in range(5))
counts = train_ds.reduce(initial_state=initial_state,
reduce_func=count_class)
print([(k, v.numpy()) for k, v in counts.items()])
A solution inspired by user650654 's answer, only using TensorFlow primitives (with tf.unique_with_counts instead of for loop):
In theory, this should have better performance and scale better to large datasets, batches or class count.
num_classes = 5
#tf.function
def count_class(counts, batch):
y, _, c = tf.unique_with_counts(batch[1])
return tf.tensor_scatter_nd_add(counts, tf.expand_dims(y, axis=1), c)
counts = train_ds.reduce(
initial_state=tf.zeros(num_classes, tf.int32),
reduce_func=count_class)
print(counts.numpy())
Similar and simpler version with numpy that actually had better performances for my simple use-case:
count = np.zeros(num_classes, dtype=np.int32)
for _, labels in train_ds:
y, _, c = tf.unique_with_counts(labels)
count[y.numpy()] += c.numpy()
print(count)

how to calculate entropy on float numbers over a tensor in python keras

I have been struggling on this and could not get it to work. hope someone can help me with this.
I want to calculate the entropy on each row of the tensor. Because my data are float numbers not integers I think I need to use bin_histogram.
For example a sample of my data is tensor =[[0.2, -0.1, 1],[2.09,-1.4,0.9]]
Just for information My model is seq2seq and written in keras with tensorflow backend.
This is my code so far: I need to correct rev_entropy
class entropy_measure(Layer):
def __init__(self, beta,batch, **kwargs):
self.beta = beta
self.batch = batch
self.uses_learning_phase = True
self.supports_masking = True
super(entropy_measure, self).__init__(**kwargs)
def call(self, x):
return K.in_train_phase(self.rev_entropy(x, self.beta,self.batch), x)
def get_config(self):
config = {'beta': self.beta}
base_config = super(entropy_measure, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def rev_entropy(self, x, beta,batch):
for i in x:
i = pd.Series(i)
p_data = i.value_counts() # counts occurrence of each value
entropy = entropy(p_data) # get entropy from counts
rev = 1/(1+entropy)
return rev
new_f_w_t = x * (rev.reshape(rev.shape[0], 1))*beta
return new_f_w_t
Any input is much appreciated:)
It looks like you have a series of questions that come together on this issue. I'll settle it here.
You calculate entropy in the following form of scipy.stats.entropy according to your code:
scipy.stats.entropy(pk, qk=None, base=None)
Calculate the entropy of a distribution for given probability values.
If only probabilities pk are given, the entropy is calculated as S =
-sum(pk * log(pk), axis=0).
Tensorflow does not provide a direct API to calculate entropy on each row of the tensor. What we need to do is to implement the above formula.
import tensorflow as tf
import pandas as pd
from scipy.stats import entropy
a = [1.1,2.2,3.3,4.4,2.2,3.3]
res = entropy(pd.value_counts(a))
_, _, count = tf.unique_with_counts(tf.constant(a))
# [1 2 2 1]
prob = count / tf.reduce_sum(count)
# [0.16666667 0.33333333 0.33333333 0.16666667]
tf_res = -tf.reduce_sum(prob * tf.log(prob))
with tf.Session() as sess:
print('scipy version: \n',res)
print('tensorflow version: \n',sess.run(tf_res))
scipy version:
1.329661348854758
tensorflow version:
1.3296613488547582
Then we need to define a function and achieve for loop through tf.map_fn in your custom layer according to above code.
def rev_entropy(self, x, beta,batch):
def row_entropy(row):
_, _, count = tf.unique_with_counts(row)
prob = count / tf.reduce_sum(count)
return -tf.reduce_sum(prob * tf.log(prob))
value_ranges = [-10.0, 100.0]
nbins = 50
new_f_w_t = tf.histogram_fixed_width_bins(x, value_ranges, nbins)
rev = tf.map_fn(row_entropy, new_f_w_t,dtype=tf.float32)
new_f_w_t = x * 1/(1+rev)*beta
return new_f_w_t
Notes that the hidden layer will not produce a gradient that cannot propagate backwards since entropy is calculated on the basis of statistical probabilistic values. Maybe you need to rethink your hidden layer structure.

How to extract cell state from a LSTM at each timestep in Keras?

Is there a way in Keras to retrieve the cell state (i.e., c vector) of a LSTM layer at every timestep of a given input?
It seems the return_state argument returns the last cell state after the computation is done, but I need also the intermediate ones. Also, I don't want to pass these cell states to the next layer, I only want to be able to access them.
Preferably using TensorFlow as backend.
Thanks
I was looking for a solution to this issue and after reading the guidance for creating your own custom RNN Cell in tf.keras (https://www.tensorflow.org/api_docs/python/tf/keras/layers/AbstractRNNCell), I believe the following is the most concise and easy to read way of doing this for Tensorflow 2:
import tensorflow as tf
from tensorflow.keras.layers import LSTMCell
class LSTMCellReturnCellState(LSTMCell):
def call(self, inputs, states, training=None):
real_inputs = inputs[:,:self.units] # decouple [h, c]
outputs, [h,c] = super().call(real_inputs, states, training=training)
return tf.concat([h, c], axis=1), [h,c]
num_units = 512
test_input = tf.random.uniform([5,100,num_units])
rnn = tf.keras.layers.RNN(LSTMCellReturnCellState(num_units),
return_sequences=True, return_state=True)
whole_seq_output, final_memory_state, final_carry_state = rnn(test_input)
print(whole_seq_output.shape)
>>> (5,100,1024)
# Hidden state sequence
h_seq = whole_seq_output[:,:,:num_units] # (5,100,512)
# Cell state sequence
c_seq = whole_seq_output[:,:,num_units:] # (5,100,512)
As mentioned in an above solution, you can see the advantage of this is that it can be easily wrapped into tf.keras.layers.RNN as a drop-in for the normal LSTMCell.
Here is a Colab Notebook with the code running as expected for tensorflow==2.6.0
I know it's pretty late, I hope this can help.
what you are asking, technically, is possible by modifying the LSTM-cell in call method. I modify it and make it return 4 dimension instead of 3 when you give return_sequences=True.
Code
from keras.layers.recurrent import _generate_dropout_mask
class Mod_LSTMCELL(LSTMCell):
def call(self, inputs, states, training=None):
if 0 < self.dropout < 1 and self._dropout_mask is None:
self._dropout_mask = _generate_dropout_mask(
K.ones_like(inputs),
self.dropout,
training=training,
count=4)
if (0 < self.recurrent_dropout < 1 and
self._recurrent_dropout_mask is None):
self._recurrent_dropout_mask = _generate_dropout_mask(
K.ones_like(states[0]),
self.recurrent_dropout,
training=training,
count=4)
# dropout matrices for input units
dp_mask = self._dropout_mask
# dropout matrices for recurrent units
rec_dp_mask = self._recurrent_dropout_mask
h_tm1 = states[0] # previous memory state
c_tm1 = states[1] # previous carry state
if self.implementation == 1:
if 0 < self.dropout < 1.:
inputs_i = inputs * dp_mask[0]
inputs_f = inputs * dp_mask[1]
inputs_c = inputs * dp_mask[2]
inputs_o = inputs * dp_mask[3]
else:
inputs_i = inputs
inputs_f = inputs
inputs_c = inputs
inputs_o = inputs
x_i = K.dot(inputs_i, self.kernel_i)
x_f = K.dot(inputs_f, self.kernel_f)
x_c = K.dot(inputs_c, self.kernel_c)
x_o = K.dot(inputs_o, self.kernel_o)
if self.use_bias:
x_i = K.bias_add(x_i, self.bias_i)
x_f = K.bias_add(x_f, self.bias_f)
x_c = K.bias_add(x_c, self.bias_c)
x_o = K.bias_add(x_o, self.bias_o)
if 0 < self.recurrent_dropout < 1.:
h_tm1_i = h_tm1 * rec_dp_mask[0]
h_tm1_f = h_tm1 * rec_dp_mask[1]
h_tm1_c = h_tm1 * rec_dp_mask[2]
h_tm1_o = h_tm1 * rec_dp_mask[3]
else:
h_tm1_i = h_tm1
h_tm1_f = h_tm1
h_tm1_c = h_tm1
h_tm1_o = h_tm1
i = self.recurrent_activation(x_i + K.dot(h_tm1_i,
self.recurrent_kernel_i))
f = self.recurrent_activation(x_f + K.dot(h_tm1_f,
self.recurrent_kernel_f))
c = f * c_tm1 + i * self.activation(x_c + K.dot(h_tm1_c,
self.recurrent_kernel_c))
o = self.recurrent_activation(x_o + K.dot(h_tm1_o,
self.recurrent_kernel_o))
else:
if 0. < self.dropout < 1.:
inputs *= dp_mask[0]
z = K.dot(inputs, self.kernel)
if 0. < self.recurrent_dropout < 1.:
h_tm1 *= rec_dp_mask[0]
z += K.dot(h_tm1, self.recurrent_kernel)
if self.use_bias:
z = K.bias_add(z, self.bias)
z0 = z[:, :self.units]
z1 = z[:, self.units: 2 * self.units]
z2 = z[:, 2 * self.units: 3 * self.units]
z3 = z[:, 3 * self.units:]
i = self.recurrent_activation(z0)
f = self.recurrent_activation(z1)
c = f * c_tm1 + i * self.activation(z2)
o = self.recurrent_activation(z3)
h = o * self.activation(c)
if 0 < self.dropout + self.recurrent_dropout:
if training is None:
h._uses_learning_phase = True
return tf.expand_dims(tf.concat([h,c],axis=0),0), [h, c]
Sample code
# create a cell
test = Mod_LSTMCELL(100)
# Input timesteps=10, features=7
in1 = Input(shape=(10,7))
out1 = RNN(test, return_sequences=True)(in1)
M = Model(inputs=[in1],outputs=[out1])
M.compile(keras.optimizers.Adam(),loss='mse')
ans = M.predict(np.arange(7*10,dtype=np.float32).reshape(1, 10, 7))
print(ans.shape)
# state_h
print(ans[0,0,0,:])
# state_c
print(ans[0,0,1,:])
First, this is not possible do with the tf.keras.layers.LSTM. You have to use LSTMCell instead or subclass LSTM. Second, there is no need to subclass LSTMCell to get the sequence of cell states. LSTMCell already returns a list of the hidden state (h) and cell state (c) everytime you call it.
For those not familiar with LSTMCell, it takes in the current [h, c] tensors, and the input at the current timestep (it cannot take in a sequence of times) and returns the activations, and the updated [h,c].
Here is an example of showing how to use LSTMCell to process a sequence of timesteps and to return the accumulated cell states.
# example inputs
inputs = tf.convert_to_tensor(np.random.rand(3, 4), dtype='float32') # 3 timesteps, 4 features
h_c = [tf.zeros((1,2)), tf.zeros((1,2))] # must initialize hidden/cell state for lstm cell
h_c = tf.convert_to_tensor(h_c, dtype='float32')
lstm = tf.keras.layers.LSTMCell(2)
# example of how you accumulate cell state over repeated calls to LSTMCell
inputs = tf.unstack(inputs, axis=0)
c_states = []
for cur_inputs in inputs:
out, h_c = lstm(tf.expand_dims(cur_inputs, axis=0), h_c)
h, c = h_c
c_states.append(c)
You can access the states of any RNN by setting return_sequences = True in the initializer. You can find more information about this parameter here.

How to use scipy minimize when the constraints are dynamic?

I have the following optimization problem:
Where X and q are endogenous while the other variables are known.
I use scipy minimize function to solve it. I have no problems with the bounds and constraints:
# objective function
def objective(q,s):
return -sumprod(q,s)
def sumprod(l1,l2):
return sum([x*y for x,y in zip(*[l1,l2])])
# constraints
def cons_periodicflow_min(q):
return q.sum()-qpmin
con1 = {'type':'ineq','fun':cons_periodicflow_min}
def cons_periodicflow_max(q):
return qpmax - q.sum()
con2 = {'type':'ineq','fun':cons_periodicflow_max}
def cons_daily_reservoir(q):#xmin,q,X,a,delta):
return X+a-q-delta-xmin
con3 = {'type':'ineq','fun':cons_daily_reservoir}
def cons_end_reservoir(q):#xend,q,X,a,delta):
return X[-1]+a[-1]-q[-1]-delta[-1]-xend
con4 = {'type':'ineq','fun':cons_end_reservoir}
cons=[con1,con2,con3,con4]
# definition of the parameters
T=3
q0 = np.zeros(T)
s0 = np.array([10,10,10])
qmin = [0,0,0]
qmax = [10,10,10]
delta = [1,1,1]
a = [2,2,2]
X = [10,0,0]
qpmax = 50
qpmin=10
b = [(qmin[t],qmax[t]) for t in range(T)]
sol = sco.minimize(objective,q0,bounds=b,constraints=cons)
My only problem is that X depends on q so I need to update X at each time step, can I add it to the minimize function? Else how to do it?
EDIT:
I can express X in the following way (please don't mind the t / t+1 issues):
Therefore the constraint with Xmin can rewrites:
Does it help to express the optimisation problem?