Create two 0-d tensors x and y randomly selected from -1 and 1 - tensorflow

x = tf.random_uniform([], -1, 1)
y = tf.random_uniform([], -1, 1)
def f1():return x+y
def f2():return x-y
def f3():return 0
r = tf.case({tf.less(x,y): f1, tf.greater(x,y): f2}, default = f3,exclusive
= True)
this is the question : Return x + y if x < y, x - y if x > y, 0 otherwise,i seem to be getting int object has not attribute 'name'.any suggestions

Make f3 return float instead of integer.
This works for me:
x = tf.random_uniform([], -1, 1)
y = tf.random_uniform([], -1, 1)
def f1():return x+y
def f2():return x-y
def f3():return 0.0
r = tf.case({tf.less(x,y): f1, tf.greater(x,y): f2}, default=f3,
exclusive=True)
Edit:
While the code above works with recent Tensorflow to make it work with older versions it is required to make f3 return Tensor as well.
x = tf.random_uniform([], -1, 1)
y = tf.random_uniform([], -1, 1)
def f1():return x+y
def f2():return x-y
def f3():return tf.constant(0.0)
r = tf.case({tf.less(x,y): f1, tf.greater(x,y): f2}, default=f3,
exclusive=True)

Just wrap your random numbers in tf.sign() function:
import tensorflow as tf
x = tf.sign(tf.random_uniform((10,), -1, 1))
with tf.Session() as sess:
print sess.run(x)
and you will get something like: [ 1. 1. 1. -1. 1. -1. -1. -1. -1. 1.]. It works because:
Returns an element-wise indication of the sign of a number.
y = sign(x) = -1 if x < 0; 0 if x == 0 or tf.is_nan(x); 1 if x > 0.

Related

python function as cvxpy parameter for dynamic optimization (optimal control)

import numpy as np
def af(a,b):
return np.array([[a,b],[b**2, b]])
np.random.seed(1)
n = 2
m = 2
T = 50
alpha = 0.2
beta = 3
# A = np.eye(n) - alpha * np.random.rand(n, n)
B = np.random.randn(n, m)
x_0 = beta * np.random.randn(n)
import cvxpy as cp
x = cp.Variable((n, T + 1))
u = cp.Variable((m, T))
A = cp.Parameter((2,2))
cost = 0
constr = []
for t in range(T):
cost += cp.sum_squares(x[:, t + 1]) + cp.sum_squares(u[:, t])
A = af(*x[:,t])
constr += [x[:, t + 1] == A # x[:, t] + B # u[:, t], cp.norm(u[:, t], "inf") <= 1]
# sums problem objectives and concatenates constraints.
constr += [x[:, T] == 0, x[:, 0] == x_0]
problem = cp.Problem(cp.Minimize(cost), constr)
problem.solve()
I want to use python function (lambdify function) as cvxpy parameter. I tried this method, please let me know if cvxpy support python function as parameter. thank you.

How to set 'y > 0' formula in set_xlim of matplotlib?

I want to set x range according to y value in plotting graph such as y > 0 but I'm not sure how to set this one. Could you let me know how to set it?
df = pd.read_csv(file.csv)
x = np.array(df1['A'])
y = np.array(df1['B'])
z = np.array(df1['C'])
x_for_ax1 = np.ma.masked_where((y < 0) | (y > 100), x)
fig, (ax2, ax1) = plt.subplots(ncols=1, nrows=2)
# range of ax1.set_xlim and ax1.set_xlim is same.
ax1.set_ylim([-10, 40])
ax2.set_ylim([-5, 5])
ax1.set_xlim([x_for_ax1.min(), x_for_ax1.max()])
ax2.set_xlim([x_for_ax1.min(), x_for_ax1.max()])
If you want to set the x-limits to the range of the y-axis, you can use a masked array and get its minimum and maximum.
In the example below, at the left both subplots get the x-limits where either y or z are in range. At the right, each subplot only gets the x-range where its corresponding y is in range.
For demonstration purposes, the example creates a data frame from some dummy data.
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
a = np.linspace(-1, 4, 500)
b = np.sin(a) * 100
c = np.cos(a) * 150
df = pd.DataFrame({'A': a, 'B': b, 'C': c})
x = np.array(df['A'])
y = np.array(df['B'])
z = np.array(df['C'])
fig, ((ax1, ax3),(ax2, ax4)) = plt.subplots(ncols=2, nrows=2)
ax1.set_xlabel('x')
ax2.set_xlabel('x')
ax3.set_xlabel('x')
ax4.set_xlabel('x')
ax1.set_ylabel('y')
ax3.set_ylabel('y')
ax2.set_ylabel('z')
ax4.set_ylabel('z')
ymin = 1
ymax = 100
zmin = 1
zmax = 150
x_for_ax1 = np.ma.masked_where(((y < ymin) | (y > ymax)) & ((z < zmin) | (z > zmax)), x)
x_for_ax3 = np.ma.masked_where((y < ymin) | (y > ymax), x)
x_for_ax4 = np.ma.masked_where((z < zmin) | (z > zmax), x)
ax1.plot(x, y)
ax3.plot(x, y)
ax1.set_ylim([ymin, ymax])
ax3.set_ylim([ymin, ymax])
ax2.plot(x, z)
ax4.plot(x, z)
ax2.set_ylim([zmin, zmax])
ax4.set_ylim([zmin, zmax])
ax1.set_xlim([x_for_ax1.min(), x_for_ax1.max()])
ax2.set_xlim([x_for_ax1.min(), x_for_ax1.max()])
ax1.set_title('x limited to y and z range')
ax2.set_title('x limited to y and z range')
ax3.set_xlim([x_for_ax3.min(), x_for_ax3.max()])
ax3.set_title('x limited to y range')
ax4.set_xlim([x_for_ax4.min(), x_for_ax4.max()])
ax4.set_title('x limited to z range')
plt.tight_layout(w_pad=1)
plt.show()

How to vectorise an integration function?

I'm a beginner in numpy and I want to vectorise this function:
I don't quite understand what I need to do but this is what I've come up with:
n = 1000000
h = 1/n
x = np.arange(1,n,1)
def f(x):
return x ** 3
def rec(x):
result = np.zeros_like(x)
result[x < n] = f((x[x < n])*h)
return result
integral = 0.5*h + h*rec(x)
print integral
I end up with an array of 0's. Could someone please point me in the right direction?
Try:
def trap(f, a, b, n):
xs = np.linspace(a, b, n + 1)
ys = f(xs)
return (0.5 * ys[0] + 0.5 * ys[-1] + np.sum(ys[1:-1])) * (b - a) / n

Evaluate a condition on each element of a vector y in tensorflow

I am trying to evaluate a condition on each element of a vector y so that I get a vector whose i’th element tells me whether y[i]satisfies the condition. Is there any way to do this without using loops? So far, I have tried the following:
dim = 3
x = tf.placeholder(tf.float32, shape = [dim])
y = tf.log(x)
tf1 = tf.constant(1)
tf0 = tf.constant(0)
x_0 = tf.tile([x[0]], [dim])
delta = tf.cond(tf.equal(y,x_0), tf1, tf0))
sess = tf.Session()
a = np.ones((1,3))
print(sess.run(delta, feed_dict={x:a}))
For a given input x, I want delta[i] to be 1 if y[i] = x[0] and 0 otherwise.
I get error
shape must be of equal rank but are 0 and 1 for 'Select_2' (op: 'select') with input shapes [3], [],[]
I am new to TensorFlow, any help would be appreciated!
Seems like that you have error because you are trying to compare tensors with different shape.
That's working code:
import tensorflow as tf
import numpy as np
dim = 3
x = tf.placeholder(tf.float32, shape=(1, dim), name='ktf')
y = tf.log(x)
delta = tf.cast(tf.equal(y, x[0]), dtype=tf.int32)
sess = tf.Session()
a = np.ones((1, 3))
print(sess.run(delta, feed_dict={x: a}))
For you case, there is no need to use tf.cond, you can use tf.equal that does this without the loops, and because of the broadcasting there is no need to tile it. Just use:
dim = 3
x = tf.placeholder(tf.float32, shape = [dim])
y = tf.log(x)
delta = tf.cast(tf.equal(y,x[0]),tf.float32) # or integer type
sess = tf.Session()
a = np.ones((1,3))
print(sess.run(delta, feed_dict={x:a}))

batch process of graph_cnn in tensorflow

I want to use the graph_cnn (Defferrard et al. 2016) for inputs with variation of number of nodes. The author provided the example code (see graph_cnn). Below is the what I think the critical part of the code
def chebyshev5(self, x, L, Fout, K):
N, M, Fin = x.get_shape()
N, M, Fin = int(N), int(M), int(Fin)
# Rescale Laplacian and store as a TF sparse tensor. Copy to not modify the shared L.
L = scipy.sparse.csr_matrix(L)
L = graph.rescale_L(L, lmax=2)
L = L.tocoo()
indices = np.column_stack((L.row, L.col))
L = tf.SparseTensor(indices, L.data, L.shape)
L = tf.sparse_reorder(L)
# Transform to Chebyshev basis
x0 = tf.transpose(x, perm=[1, 2, 0]) # M x Fin x N
x0 = tf.reshape(x0, [M, Fin*N]) # M x Fin*N
x = tf.expand_dims(x0, 0) # 1 x M x Fin*N
def concat(x, x_):
x_ = tf.expand_dims(x_, 0) # 1 x M x Fin*N
return tf.concat([x, x_], axis=0) # K x M x Fin*N
if K > 1:
x1 = tf.sparse_tensor_dense_matmul(L, x0)
x = concat(x, x1)
for k in range(2, K):
x2 = 2 * tf.sparse_tensor_dense_matmul(L, x1) - x0 # M x Fin*N
x = concat(x, x2)
x0, x1 = x1, x2
x = tf.reshape(x, [K, M, Fin, N]) # K x M x Fin x N
x = tf.transpose(x, perm=[3,1,2,0]) # N x M x Fin x K
x = tf.reshape(x, [N*M, Fin*K]) # N*M x Fin*K
# Filter: Fin*Fout filters of order K, i.e. one filterbank per feature pair.
W = self._weight_variable([Fin*K, Fout], regularization=False)
x = tf.matmul(x, W) # N*M x Fout
return tf.reshape(x, [N, M, Fout]) # N x M x Fout
Essentially, I think what this does can be simplified as something like
return = concat{(L*x)^k for (k=0 to K-1)} * W
x is the input of N x M x Fin (size variable in any batch):
L is an array of operators on x each with the size of M x M matching the corresponding sample (size variable in any batch).
W is the neural network parameters to be optimized, its size is Fin x K x Fout
N: number of samples in a batch (size fixed for any batch);
M: the number of nodes in the graph (size variable in any batch);
Fin: the number of input features (size fixed for any batch)].
Fout is the number of output features (size fixed for any batch).
K is a constant representing the number of steps (hops) in the graph
For single example, the above code works. But since both x and L have variable length for each sample in a batch, I don't know how to make it work for a batch of samples.
The tf.matmul currently (v1.4) only supports batch matrix multiplication on the lowest 2 dims for dense tensors. If either of the input tensor is sparse, it will prompt dimension mismatch error. tf.sparse_tensor_dense_matmul cannot be applied to batch inputs either.
Therefore, my current solution is to move all L preparation steps before calling the function, pass the L as a dense tensor (shape: [N, M, M]), and use the tf.matmul to perform the batch matrix multiplication.
Here is my revised code:
'''
chebyshev5_batch
Purpose:
perform the graph filtering on the given layer
Args:
x: the batch of inputs for the given layer,
dense tensor, size: [N, M, Fin],
L: the batch of sorted Laplacian of the given layer (tf.Tensor)
if in dense format, size of [N, M, M]
Fout: the number of output features on the given layer
K: the filter size or number of hopes on the given layer.
lyr_num: the idx of the original Laplacian lyr (start form 0)
Output:
y: the filtered output from the given layer
'''
def chebyshev5_batch(x, L, Fout, K, lyr_num):
N, M, Fin = x.get_shape()
#N, M, Fin = int(N), int(M), int(Fin)
# # Rescale Laplacian and store as a TF sparse tensor. Copy to not modify the shared L.
# L = scipy.sparse.csr_matrix(L)
# L = graph.rescale_L(L, lmax=2)
# L = L.tocoo()
# indices = np.column_stack((L.row, L.col))
# L = tf.SparseTensor(indices, L.data, L.shape)
# L = tf.sparse_reorder(L)
# # Transform to Chebyshev basis
# x0 = tf.transpose(x, perm=[1, 2, 0]) # M x Fin x N
# x0 = tf.reshape(x0, [M, Fin*N]) # M x Fin*N
def expand_concat(orig, new):
new = tf.expand_dims(new, 0) # 1 x N x M x Fin
return tf.concat([orig, new], axis=0) # (shape(x)[0] + 1) x N x M x Fin
# L: # N x M x M
# x0: # N x M x Fin
# L*x0: # N x M x Fin
x0 = x # N x M x Fin
stk_x = tf.expand_dims(x0, axis=0) # 1 x N x M x Fin (eventually K x N x M x Fin, if K>1)
if K > 1:
x1 = tf.matmul(L, x0) # N x M x Fin
stk_x = expand_concat(stk_x, x1)
for kk in range(2, K):
x2 = tf.matmul(L, x1) - x0 # N x M x Fin
stk_x = expand_concat(stk_x, x2)
x0 = x1
x1 = x2
# now stk_x has the shape of K x N x M x Fin
# transpose to the shape of N x M x Fin x K
## source positions 1 2 3 0
stk_x_transp = tf.transpose(stk_x, perm=[1,2,3,0])
stk_x_forMul = tf.reshape(stk_x_transp, [N*M, Fin*K])
#W = self._weight_variable([Fin*K, Fout], regularization=False)
W_initial = tf.truncated_normal_initializer(0, 0.1)
W = tf.get_variable('weights_L_'+str(lyr_num), [Fin*K, Fout], tf.float32, initializer=W_initial)
tf.summary.histogram(W.op.name, W)
y = tf.matmul(stk_x_forMul, W)
y = tf.reshape(y, [N, M, Fout])
return y