One dimensional sparse tensor - tensorflow

I am trying to pass in a one-dimensional sparse vector to Tensorflow:
import tensorflow as tf
import numpy as np
x = tf.sparse_placeholder(tf.float32)
y = tf.sparse_reduce_sum(x)
with tf.Session() as sess:
indices = np.array([0, 1], dtype=np.int64)
values = np.array([1.5, 3.0], dtype=np.float32)
shape = np.array([2], dtype=np.int64)
print(sess.run(y, feed_dict={
x: tf.SparseTensorValue(indices, values, shape)}))
This code throws the following error:
ValueError: Cannot feed value of shape (2,) for Tensor u'Placeholder_2:0', which has shape '(?, ?)'
Am I passing the shape wrong?

The indices should be of size (2,1). So change the indices to: indices = np.array([[0], [1]], dtype=np.int64). The below code works:
x = tf.sparse_placeholder(tf.float32)
y = tf.sparse_reduce_sum(x)
with tf.Session() as sess:
indices = np.array([[0], [1]], dtype=np.int64)
values = np.array([1.5, 3.0], dtype=np.float32)
shape = np.array([2], dtype=np.int64)
print(sess.run(y, feed_dict={
x: tf.SparseTensorValue(indices, values, shape)}))
#Output
#4.5

Related

How to wrap tensorflow graph with placeholder in keras

I have a tensorflow graph (stored in a protobuffer file) with placeholder operations as inputs. I want to wrap this graph as a keras layer or model.
Here is an example:
with tf.Graph().as_default() as gf:
x = tf.placeholder(tf.float32, shape=(None, 123), name='x')
c = tf.constant(100, dtype=tf.float32, name='C')
y = tf.multiply(x, c, name='y')
with tf.gfile.GFile("test_graph/y.pb", "wb") as f:
raw = gf.as_graph_def().SerializeToString()
f.write(raw)
Load back as a tensorflow graph:
persisted_sess = tf.Session()
with persisted_sess.as_default():
with gfile.FastGFile("./test_graph/y.pb",'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
persisted_sess.graph.as_default()
tf.import_graph_def(graph_def, name='')
for i, op in enumerate(persisted_sess.graph.get_operations()):
tensor = persisted_sess.graph.get_tensor_by_name(op.name + ':0')
print(i, '\t', op.name, op.type, tensor)
x_tensor = persisted_sess.graph.get_tensor_by_name('x:0')
y_tensor = persisted_sess.graph.get_tensor_by_name('y:0')
We can see the x and y operations and tensors:
0 x Placeholder Tensor("x:0", shape=(?, 123), dtype=float32)
1 C Const Tensor("C:0", shape=(), dtype=float32)
2 y Mul Tensor("y:0", shape=(?, 123), dtype=float32)
Then I try to wrap it into a keras model using different method:
Method 1:
output_layer = Lambda(lambda x: y_tensor, name='output_y')(x_tensor)
model = Model(inputs=[x_tensor], outputs=[output_layer]) # ERROR!
This already produce error InvalidArgumentError: You must feed a value for placeholder tensor 'x' with dtype float and shape [?,123] [[{{node x}}]]
Method 2:
input_x = Input(name='x', shape=(123,), dtype='float32')
output_layer = Lambda(lambda x: y_tensor, name='output_y')(input_x)
model = Model(inputs=[input_x], outputs=[output_layer]) # OK
model.predict({'x': np.ones((3, 123), dtype=np.float32)}) # ERROR!
This causes the same error at the predict call.
The closest info I can find relating to my question is this, but it doesn't address the handling of placeholders. What would be the correct way to do this?
I figured out the way. We need to use InputLayer instead of Input.
First the codes that create the demo tensorflow graph PB:
def dump_model(): # just to hide all vars during creation demo
import numpy as np
import sys
import tensorflow as tf
with tf.Graph().as_default() as gf:
x = tf.placeholder(tf.float32, shape=(None, 123), name='x')
b = tf.placeholder(tf.float32, shape=(None, 123), name='b')
c = tf.constant(100, dtype=tf.float32, name='C')
y = tf.multiply(x, c, name='y')
z = tf.add(y, x, name='z')
print(x, b, c, y, z)
with tf.gfile.GFile("test_graph/y.pb", "wb") as f:
raw = gf.as_graph_def().SerializeToString()
print(type(raw), len(raw))
f.write(raw)
dump_model()
Then import the graph and find the input/output tensors:
import numpy as np
import sys
import tensorflow as tf
persisted_sess = tf.Session()
with tf.Session().as_default() as session:
with tf.gfile.FastGFile("./test_graph/y.pb",'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
persisted_sess.graph.as_default()
tf.import_graph_def(graph_def, name='')
print(persisted_sess.graph.get_name_scope())
for i, op in enumerate(persisted_sess.graph.get_operations()):
tensor = persisted_sess.graph.get_tensor_by_name(op.name + ':0')
print(i, '\t', op.name, op.type, tensor)
x_tensor = persisted_sess.graph.get_tensor_by_name('x:0')
b_tensor = persisted_sess.graph.get_tensor_by_name('b:0')
y_tensor = persisted_sess.graph.get_tensor_by_name('y:0')
z_tensor = persisted_sess.graph.get_tensor_by_name('z:0')
Then we can create the keras model and make inference:
from tensorflow.keras.layers import Lambda, InputLayer
from tensorflow.keras import Model
from tensorflow.python.util import nest
from tensorflow.python.keras.utils import layer_utils
input_x = InputLayer(name='x', input_tensor=x_tensor)
input_x.is_placeholder = True # this is the critical bits
input_b = InputLayer(name='b2', input_tensor=b_tensor) # note the keras name can be different than the tf name
input_b.is_placeholder = True
output_y = Lambda(lambda x: y_tensor, name='output_y')(input_x.output)
output_z = Lambda(lambda x_b: z_tensor, name='output_z')([input_x.output, input_b.output])
base_model_inputs = nest.flatten([layer_utils.get_source_inputs(input_x.output),
layer_utils.get_source_inputs(input_b.output)])
base_model = Model(base_model_inputs, [output_y, output_z])
y_out, z_out = base_model.predict({'x': np.ones((3, 123), dtype=np.float32),
'b2': np.full((3, 123), 100.0, dtype=np.float32)})
y_out.shape, z_out.shape
And we can even create a new model from the base model:
from tensorflow.keras.layers import Add
derived_output = Add(name='derived')([output_y, output_z])
derived_model = Model(base_model.inputs, [derived_output])
derived_out = derived_model.predict({'x': np.ones((3, 123), dtype=np.float32),
'b2': np.full((3, 123), 100.0, dtype=np.float32)})
derived_out.shape

Tensorflow Embedding using Continous and Categorical Variable

Based on this post, I tried to create another model, where I'm adding both categorical and continous variables.
Please find the code below:
from __future__ import print_function
import pandas as pd;
import tensorflow as tf
import numpy as np
from sklearn.preprocessing import LabelEncoder
if __name__ == '__main__':
# 1 categorical input feature and a binary output
df = pd.DataFrame({'cat2': np.array(['o', 'm', 'm', 'c', 'c', 'c', 'o', 'm', 'm', 'm']),
'num1': np.random.rand(10),
'label': np.array([0, 0, 1, 1, 0, 0, 1, 0, 1, 1])})
encoder = LabelEncoder()
encoder.fit(df.cat2.values)
X1 = encoder.transform(df.cat2.values).reshape(-1,1)
X2 = np.array(df.num1.values).reshape(-1,1)
# X = np.concatenate((X1,X2), axis=1)
Y = np.zeros((len(df), 2))
Y[np.arange(len(df)), df.label.values] = 1
# Neural net parameters
training_epochs = 5
learning_rate = 1e-3
cardinality = len(np.unique(X))
embedding_size = 2
input_X_size = 1
n_labels = len(np.unique(Y))
n_hidden = 10
# Placeholders for input, output
cat2 = tf.placeholder(tf.int32, [None], name='cat2')
x = tf.placeholder(tf.float32, [None, 1], name="input_x")
y = tf.placeholder(tf.float32, [None, 2], name="input_y")
embed_matrix = tf.Variable(
tf.random_uniform([cardinality, embedding_size], -1.0, 1.0),
name="embed_matrix"
)
embed = tf.nn.embedding_lookup(embed_matrix, cat2)
inputs_with_embed = tf.concat([x, embedding_aggregated], axis=2, name="inputs_with_embed")
# Neural network weights
h = tf.get_variable(name='h2', shape=[inputs_with_embed, n_hidden],
initializer=tf.contrib.layers.xavier_initializer())
W_out = tf.get_variable(name='out_w', shape=[n_hidden, n_labels],
initializer=tf.contrib.layers.xavier_initializer())
# Neural network operations
#embedded_chars = tf.nn.embedding_lookup(embeddings, x)
layer_1 = tf.matmul(inputs_with_embed,h)
layer_1 = tf.nn.relu(layer_1)
out_layer = tf.matmul(layer_1, W_out)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=out_layer, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
for epoch in range(training_epochs):
avg_cost = 0.
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, cost],
feed_dict={x: X2,cat2:X1, y: Y})
print("Optimization Finished!")
But I'm getting the following error. It seems I'm not concatenating the continous variable and embedding properly. But I'm not understanding how to fix it.
Please if someone can please guide me.
ValueError: Shape must be at least rank 3 but is rank 2 for 'inputs_with_embed_2' (op: 'ConcatV2') with input shapes: [?,1], [?,2], [] and with computed input tensors: input[2] = <2>.
Thanks!
If by embedding_agregated you mean embed (probably typo)
The error is that there is no axis=2 in your case , it should be axis=1
inputs_with_embed = tf.concat([x, embed], axis=1, name="inputs_with_embed")
embed has a shape [None, embedding_dimension] and x has a shape [None, 1]
They are both 2D tensors, so you have access to axis=0 or axis=1 (indexing at 0 not 1), therefore to have your input_with_embed of shape [None, embedding_dimension+1] you need to concat on the axis=1

Difference between SparseTensor and SparseTensorValue

What is the difference between SparseTensor and SparseTensorValue? Is there anything I should keep in mind if I want to build the sparse tensor based on fed indices and values? I could only find a few toy examples.
It depends on where you define your Sparse Tensor.
If you would like to define the tensor outside the graph, e.g. define the sparse tensor for later data feed, use SparseTensorValue. In contrast, if the sparse tensor is defined in graph, use SparseTensor
Sample code for tf.SparseTensorValue:
x_sp = tf.sparse_placeholder(dtype=tf.float32)
W = tf.Variable(tf.random_normal([6, 6]))
y = tf.sparse_tensor_dense_matmul(sp_a=x_sp, b=W)
init = tf.global_variables_initializer()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess.run(init)
stv = tf.SparseTensorValue(indices=[[0, 0], [1, 2]], values=[1.1, 1.2],
dense_shape=[2,6])
result = sess.run(y,feed_dict={x_sp:stv})
print(result)
Sample code for tf.SparseTensor:
indices_i = tf.placeholder(dtype=tf.int64, shape=[2, 2])
values_i = tf.placeholder(dtype=tf.float32, shape=[2])
dense_shape_i = tf.placeholder(dtype=tf.int64, shape=[2])
st = tf.SparseTensor(indices=indices_i, values=values_i, dense_shape=dense_shape_i)
W = tf.Variable(tf.random_normal([6, 6]))
y = tf.sparse_tensor_dense_matmul(sp_a=st, b=W)
init = tf.global_variables_initializer()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess.run(init)
result = sess.run(y,feed_dict={indices_i:[[0, 0], [1, 2]], values_i:[1.1, 1.2], dense_shape_i:[2,6]})
print(result)
Hope this help~

feature concatenation tf.concat(x, tf.square(x), axis=1) fails for a placeholder x

If I try
import tensorflow as tf
x_data = [1,2,3]
x = tf.placeholder(tf.float32)
z = tf.concat([x, tf.square(x)], axis=1)
with tf.Session() as sess:
sess.run(z, feed_dict={x: x_data})
it fails. I basically want to make a vector [[x],[x^2]]. Could you help?
tf.concat can only be used to concatenate tensors along dimensions that already exist. If you want to concatenate tensors along a new dimension you can use tf.stack:
import tensorflow as tf
x_data = [1,2,3]
x = tf.placeholder(tf.float32)
z = tf.stack([x, tf.square(x)], axis=1)
with tf.Session() as sess:
sess.run(z, feed_dict={x: x_data})

Indexing using a tensor

I'm trying to use a Targmax tensor to index a tensor.
In numpy you can do the following indexing:
mat = np.random.uniform(size = 3*10*10).reshape((3,10,10))
indices = [np.array([0,0,1,2]),np.array([1,1,2,3]), np.array([1,3,0,3])]
mat[indices]
Is there an equivalent operation in tensorflow?
x = tf.constant([[1,2],[3,4]])
sess = tf.Session()
sess.run(tf.gather_nd(x,[[0,0],[1,1]]))
Out
array([1, 4], dtype=int32)