AttributeError: 'Tensor' object has no attribute 'is_initialized' - tensorflow

I got this error when I try to fit the model.
I tried to use a single GPU version but it remains. If I upgrade to TensorFlow 2 it will be solved but I need to keep it that in this version of TensorFlow.
This is the code for the model that I have used. This model consists of different layers.
def hybrid_LSTM(depth=2,conv_size=16,dense_size=512,input_dim=(100,5,9,),dropoutRate=0.2):
"""
Autoencoder model builder composes of CNNs and a LSTM
Args:
depth (int): number of CNN blocks, each has 3 CNN layers with BN and a dropout
conv_size (int): initial CNN filter size, doubled in each depth level
dense_size (int): size of latent vector and a number of filters of ConvLSTM2D
input_dim (tuple): input dimention, should be in (y_spatial,x_spatial,temporal)
dropoutRate (float): dropout rate used in all nodes
Return:
keras model
"""
"""Setup"""
temp_filter = conv_size
X = Input(shape=input_dim, name = 'input')
model_input = X
# X = Permute((3,1,2))(X) #move temporal axes to be first dim
X = Reshape((100,5,9,1))(X) #reshape (,1) to be feature of each spatial
"""Encoder"""
for i in range(depth):
for j in range(3):
if j == 0: #j==0 is first layer(j) of the CNN block(i); apply stride with double filter size
X = TimeDistributed(Conv2D(2*temp_filter,(3,3),padding='same' ,strides=(2,2),data_format="channels_last"),name = 'encoder_'+str(i)+str(j)+'_timeConv2D')(X)
else:
X = TimeDistributed(Conv2D(temp_filter,(3,3), padding='same', data_format="channels_last"),name = 'encoder_'+str(i)+str(j)+'_timeConv2D')(X)
X = BatchNormalization(name = 'encoder_'+str(i)+str(j)+'_BN')(X)
X = LeakyReLU(alpha=0.1,name = 'encoder_'+str(i)+str(j)+'_relu')(X)
X = Dropout(dropoutRate,name = 'encoder_'+str(i)+str(j)+'_drop')(X)
temp_filter = int(temp_filter * 2)
X = TimeDistributed(Flatten())(X)
X = LSTM(dense_size, recurrent_dropout=dropoutRate ,return_sequences=False, implementation=2)(X)
"""Latent"""
latent = X
"""Setup for decoder"""
X = RepeatVector(100)(X)
temp_filter = int(temp_filter/2)
"""Decoder"""
X = LSTM(temp_filter*2*3, recurrent_dropout=dropoutRate ,return_sequences=True, implementation=2)(X)
X = Reshape((100,2,3,temp_filter))(X)
for i in range(depth):
for j in range(3):
if j == 0:
X = TimeDistributed(UpSampling2D((2,2)),name = 'decoder_'+str(i)+str(j)+'_upsampling')(X)
X = TimeDistributed(ZeroPadding2D(((1,0),(1,0))),name = 'decoder_'+str(i)+str(j)+'_padding')(X)
X = TimeDistributed(Conv2D(temp_filter,(3,3),data_format="channels_last"),name = 'decoder_'+str(i)+str(j)+'_timeConv2D')(X)
else:
X = TimeDistributed(Conv2D(temp_filter,(3,3), padding='same', data_format="channels_last"),name = 'decoder_'+str(i)+str(j)+'_timeConv2D')(X)
X = BatchNormalization(name = 'decoder_'+str(i)+str(j)+'_BN')(X)
X = LeakyReLU(alpha=0.1,name = 'decoder_'+str(i)+str(j)+'_relu')(X)
X = Dropout(dropoutRate,name = 'decoder_'+str(i)+str(j)+'_drop')(X)
temp_filter = int(temp_filter / 2)
X = TimeDistributed(Conv2D(1,(1,1), padding='same', data_format="channels_last"),name = 'decoder__timeConv2D')(X)
X = Reshape((100,5,9))(X)
# X = Permute((2,3,1))(X)
decoded = X
X = latent
X = Dense(1,name = 'Dense10',activation='sigmoid')(X)
return Model(inputs = model_input, outputs = [decoded,X])
File "/Midgard/home/projects/Pre-trained-EEG-for-Deep-Learning-master/trainSafe_version1.py", line 167, in train_subtasks_all_tasks_keras
parallel_model = multi_gpu_model(model, gpus=2)
File "/Midgard/home/miniconda3/envs/erpenet5/lib/python3.7/site-packages/tensorflow/python/keras/utils/multi_gpu_utils.py", line 172, in multi_gpu_model
available_devices = _get_available_devices()
File "/Midgard/home/miniconda3/envs/erpenet5/lib/python3.7/site-packages/tensorflow/python/keras/utils/multi_gpu_utils.py", line 28, in _get_available_devices
return [x.name for x in K.get_session().list_devices()]
File "/Midgard/home/miniconda3/envs/erpenet5/lib/python3.7/site-packages/tensorflow/python/keras/backend.py", line 462, in get_session
_initialize_variables(session)
File "/Midgard/home/miniconda3/envs/erpenet5/lib/python3.7/site-packages/tensorflow/python/keras/backend.py", line 879, in _initialize_variables
[variables_module.is_variable_initialized(v) for v in candidate_vars])
File "/Midgard/home/miniconda3/envs/erpenet5/lib/python3.7/site-packages/tensorflow/python/keras/backend.py", line 879, in <listcomp>
[variables_module.is_variable_initialized(v) for v in candidate_vars])
File "/Midgard/home/miniconda3/envs/erpenet5/lib/python3.7/site-packages/tensorflow/python/util/tf_should_use.py", line 193, in wrapped
return _add_should_use_warning(fn(*args, **kwargs))
File "/Midgard/home/miniconda3/envs/erpenet5/lib/python3.7/site-packages/tensorflow/python/ops/variables.py", line 3083, in is_variable_initialized
return state_ops.is_variable_initialized(variable)
File "/Midgard/home/miniconda3/envs/erpenet5/lib/python3.7/site-packages/tensorflow/python/ops/state_ops.py", line 133, in is_variable_initialized
return ref.is_initialized(name=name)
AttributeError: 'Tensor' object has no attribute 'is_initialized'
model = hybrid_LSTM(depth=2, conv_size=8, dense_size=512, input_dim=(100, 5, 9), dropoutRate=0.2)
model.compile(optimizer=SGD(learning_rate=lr, decay=1E-5),
loss=[mean_squared_error_ignore_0, 'binary_crossentropy'],
# metrics=['AUC','Recall', 'Precision','binary_accuracy','accuracy'],
metrics={'Dense10': ['AUC', 'Recall',
tf.keras.metrics.SensitivityAtSpecificity(specificity=0.02),
tf.keras.metrics.SpecificityAtSensitivity(sensitivity=0.02),
'accuracy']},
loss_weights=[0.4, 0.6])
parallel_model = multi_gpu_model(model, gpus=2)
parallel_model.__setattr__('callback_model', model)
parallel_model.compile(optimizer=SGD(learning_rate=lr, decay=1E-5),
loss=[mean_squared_error_ignore_0, 'binary_crossentropy'],
# metrics=['AUC','Recall', 'Precision','binary_accuracy','accuracy'],
metrics={'Dense10': ['AUC', 'Recall',
tf.keras.metrics.SensitivityAtSpecificity(specificity=0.02),
tf.keras.metrics.SpecificityAtSensitivity(sensitivity=0.02),
'accuracy']},
loss_weights=[0.4, 0.6])
tensorflow-gpu 1.14.0,
cudatoolkit 10.1.243,
cudnn 7.6.5,

This is likely an incompatibility between your version of TF and Keras. Daniel Möller got you on the right path but tf.keras is a TF2 thing, and you are using TF1, so your solution will be different.
What you need to do is install a version of Keras that is compatible with TF 1.14. According to pypi, TF 1.14 was released June 18, 2019.
https://pypi.org/project/tensorflow/#history
You should do a grid search of the Keras versions just before and after that date.
https://pypi.org/project/keras/#history
I'd go with these Keras versions.
2.2.4
2.2.5
2.3.1
2.4.1
Install these versions using for example
pip3 install --upgrade keras==2.2.4
I have run into a similar problem recently in the mismatch between TF2.7/2.8 and Keras 2.7/2.8.

Related

FailedPreconditionError: FailedPr...onError()

I have FailedPreconditionError when running sess.
My network has two different parts, pretrained-network and new add in Recognition network.
Pretrained model is used to extract features and the feature is used to train again for recognition.
In my code, pre-trained model is loaded first.
graph = tf.Graph()
with graph.as_default():
input_data, input_labels, input_boxes = input_train_data.input_fn()
input_boxes = tf.reshape(input_boxes,[input_boxes.shape[0]*2,-1])#convert from Nx8 to 2Nx4
# build model and loss
net = Net(input_data, is_training = False)
f_saver = tf.train.Saver(max_to_keep=1000, write_version=tf.train.SaverDef.V2, save_relative_paths=True)
sess_config = tf.ConfigProto(log_device_placement = False, allow_soft_placement = True)
if FLAGS.gpu_memory_fraction < 0:
sess_config.gpu_options.allow_growth = True
elif FLAGS.gpu_memory_fraction > 0:
sess_config.gpu_options.per_process_gpu_memory_fraction = FLAGS.gpu_memory_fraction;
session = tf.Session(graph=graph, config=sess_config)
tf.logging.info('Initialize from: ' + config.train.init_checkpoint)
f_saver.restore(session, config.train.init_checkpoint)
f_saver restores the pre-trained model.
Then feature conv5_3 is extracted and fed into Recognition network.
conv5_3 = net.end_points['conv5_3']
with tf.variable_scope("Recognition"):
global_step_rec = tf.Variable(0, name='global_step_rec', trainable=False)
#Pass through recognition net
r_net = regnet.ConstructRecNet(conv5_3)
conv7_7 = r_net.end_points['pool7']
#implement ROI Pooling
#input boxes be in x1, y1, x2, y2
h_fmap = tf.dtypes.cast(tf.shape(conv7_7)[1],tf.float32)
w_fmap = tf.dtypes.cast(tf.shape(conv7_7)[2],tf.float32)
#remap boxes at input images to feature mats
#input_boxes = input_boxes / tf.constant([config.train.input_shape[0], config.train.input_shape[0],\
# config.train.input_shape[0], config.train.input_shape[0]], dtype=tf.float32)#Normalize with image size first
remap_boxes=tf.matmul(input_boxes,tf.diag([w_fmap,h_fmap,w_fmap,h_fmap]))
#put first column with image indexes
rows = tf.expand_dims(tf.range(remap_boxes.shape[0]), 1)/2
add_index = tf.concat([tf.cast(rows,tf.float32),remap_boxes],-1)
index = tf.not_equal(tf.reduce_sum(add_index[:,4:],axis=1),0)
remap_boxes = tf.gather_nd(add_index,tf.where(index))
remap_boxes=tf.dtypes.cast(remap_boxes,tf.int32)
prob = roi_pooling(conv7_7, remap_boxes, pool_height=1, pool_width=28)
#Get features for CTC training
prob = tf.transpose(prob, (1, 0, 2)) # prepare for CTC
data_length = tf.fill([tf.shape(prob)[1]], tf.shape(prob)[0]) # input seq length, batch size
ctc = tf.py_func(CTCUtils.compute_ctc_from_labels, [input_labels], [tf.int64, tf.int64, tf.int64])
ctc_labels = tf.to_int32(tf.SparseTensor(ctc[0], ctc[1], ctc[2]))
predictions = tf.to_int32(tf.nn.ctc_beam_search_decoder(prob, data_length, merge_repeated=False, beam_width=10)[0][0])
tf.sparse_tensor_to_dense(predictions, default_value=-1, name='d_predictions')
tf.reduce_mean(tf.edit_distance(predictions, ctc_labels, normalize=False), name='error_rate')
loss = tf.reduce_mean(tf.compat.v1.nn.ctc_loss(inputs=prob, labels=ctc_labels, sequence_length=data_length, ctc_merge_repeated=True), name='loss')
learning_rate = tf.train.piecewise_constant(global_step_rec, [150000, 200000],[config.train.learning_rate, 0.1 * config.train.learning_rate,0.01 * config.train.learning_rate])
opt_loss = tf.contrib.layers.optimize_loss(loss, global_step_rec, learning_rate, config.train.opt_type,config.train.grad_noise_scale, name='train_step')
tf.global_variables_initializer()
I can run sess till feature extraction conv5_3. But can't run those in Recognition and got error as FailedPreconditionError: FailedPr...onError(). What could be the problem?
graph.finalize()
with tf.variable_scope("Recognition"):
for i in range(config.train.steps):
input_data_, input_labels_, input_boxes_ = session.run([input_data, input_labels, input_boxes])
conv5_3_ = session.run([conv5_3]) #can run this line
global_step_rec_ = session.run([global_step_rec]) # got FailedPreconditionError: FailedPr...onError() error at this line
conv7_7_ = session.run([conv7_7])
h_fmap_ = session.run([h_fmap])
Now it works.
Since my graph has two parts, I need to initialize separately.
(1)First get all variables from pre-trained model to initialize with those from checkpoint.
Then initialize with tf.train.Saver.
(2)Then initialize the rest add-in layers using tf.global_variables_initializer()
My code is as follow.
#Initialization
#Initialize pre-trained model first
#Since we need to restore pre-trained model and initialize to respective variables in this current graph
#(1)make a variable list for checkpoint
#(2)initialize a saver for the variable list
#(3)then restore
#(1)
def print_tensors_in_checkpoint_file(file_name, tensor_name, all_tensors):
varlist=[]
reader = pywrap_tensorflow.NewCheckpointReader(file_name)
if all_tensors:
var_to_shape_map = reader.get_variable_to_shape_map()
for key in sorted(var_to_shape_map):
print(key)
varlist.append(key)
return varlist
varlist=print_tensors_in_checkpoint_file(file_name=config.train.init_checkpoint,all_tensors=True,tensor_name=None)
#(2)prepare the list of variables by calling variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
#countcheckpt_vars=0
#for n in tf.get_default_graph().as_graph_def().node:
# print(n.name)
#for op in tf.get_default_graph().get_operations():
# print(str(op.name))
#for var in zip(variables):
# countcheckpt_vars=countcheckpt_vars+1
#(3)
loader = tf.train.Saver(variables[:46])#since I need to initialize only 46 variables from global variables
tf.logging.info('Initialize from: ' + config.train.init_checkpoint)
sess_config = tf.ConfigProto(log_device_placement = False, allow_soft_placement = True)
if FLAGS.gpu_memory_fraction < 0:
sess_config.gpu_options.allow_growth = True
elif FLAGS.gpu_memory_fraction > 0:
sess_config.gpu_options.per_process_gpu_memory_fraction = FLAGS.gpu_memory_fraction;
session = tf.Session(graph=graph, config=sess_config)
loader.restore(session, config.train.init_checkpoint)
Then initialize the rest of variables
init = tf.global_variables_initializer()
session.run(init)

ctc_loss error "No valid path found."

Training a model with tf.nn.ctc_loss produces an error every time the train op is run:
tensorflow/core/util/ctc/ctc_loss_calculator.cc:144] No valid path found.
Unlike in previous questions about this function, this is not due to divergence. I have a low learning rate, and the error occurs on even the first train op.
The model is a CNN -> LSTM -> CTC. Here is the model creation code:
# Build Graph
self.videoInput = tf.placeholder(shape=(None, self.maxVidLen, 50, 100, 3), dtype=tf.float32)
self.videoLengths = tf.placeholder(shape=(None), dtype=tf.int32)
self.keep_prob = tf.placeholder(dtype=tf.float32)
self.targets = tf.sparse_placeholder(tf.int32)
self.targetLengths = tf.placeholder(shape=(None), dtype=tf.int32)
conv1 = tf.layers.conv3d(self.videoInput ...)
pool1 = tf.layers.max_pooling3d(conv1 ...)
conv2 = ...
pool2 = ...
conv3 = ...
pool3 = ...
cnn_out = tf.reshape(pool3, shape=(-1, self.maxVidLength, 4*7*96))
fw_cell = tf.nn.rnn_cell.MultiRNNCell(self.cell(), for _ in range(3))
bw_cell = tf.nn.rnn_cell.MultiRNNCell(self.cell(), for _ in range(3))
outputs, _ = tf.nn.bidirectional_dynamic_rnn(
fw_cell, bw_cell, cnn_out, sequence_length=self.videoLengths, dtype=tf.float32)
outputs = tf.concat(outputs, 2)
outputs = tf.reshape(outputs, [-1, self.hidden_size * 2])
w = tf.Variable(tf.random_normal((self.hidden_size * 2, len(self.char2index) + 1), stddev=0.2))
b = tf.Variable(tf.zeros(len(self.char2index) + 1))
out = tf.matmul(outputs, w) + b
out = tf.reshape(out, [-1, self.maxVidLen, len(self.char2index) + 1])
out = tf.transpose(out, [1, 0, 2])
cost = tf.reduce_mean(tf.nn.ctc_loss(self.targets, out, self.targetLengths))
self.train_op = tf.train.AdamOptimizer(0.0001).minimize(cost)
And here is the feed dict creation code:
indices = []
values = []
shape = [len(vids) * 2, self.maxLabelLen]
vidInput = np.zeros((len(vids) * 2, self.maxVidLen, 50, 100, 3), dtype=np.float32)
# Actual video, then left-right flip
for j in range(len(vids) * 2):
# K is video index
k = j if j < len(vids) else j - len(vids)
# convert video and label to input format
vidInput[j, 0:len(vids[k])] = vids[k] if k == j else vids[k][:,::-1,:]
indices.extend([j, i] for i in range(len(labelList[k])))
values.extend(self.char2index[c] for c in labelList[k])
fd[self.targets] = (indices, values, shape)
fd[self.videoInput] = vidInput
# Collect video lengths and label lengths
vidLengths = [len(j) for j in vids] + [len(j) for j in vids]
labelLens = [len(l) for l in labelList] + [len(l) for l in labelList]
fd[self.videoLengths] = vidLengths
fd[self.targetLengths] = labelLens
It turns out that the ctc_loss requires that the label lengths be shorter than the input lengths. If the label lengths are too long, the loss calculator cannot unroll completely and therefore cannot compute the loss.
For example, the label BIFI would require input length of at least 4 while the label BIIF would require input length of at least 5 due to a blank being inserted between the repeated symbols.
I had the same issue but I soon realized it was just because I was using glob and my label was in the filename so it was exceeding.
You can fix this issue by using:
os.path.join(*(filename.split(os.path.sep)[noOfDir:]))
For me the problem was fixed by setting preprocess_collapse_repeated=True.
FWIW: My target sequence length was already shorter than inputs, and the RNN outputs are that of softmax.
Another possible reason which I found out in my case is the input data range is not normalized to 0~1, due to that LSTM activation function becomes saturated in the beginning of the training, and causes "no valid path" log somehow.

How to use `sparse_softmax_cross_entropy_with_logits`: without getting Incompatible Shapes Error

I would like to use the sparse_softmax_cross_entropy_with_logits
with the julia TensorFlow wrapper.
The operations is defined in the code here.
Basically, as I understand it the first argument should be logits, that would normally be fed to softmax to get them to be category probabilities (~1hot output).
And the second should be the correct labels as label ids.
I have adjusted the example code from the TensorFlow.jl readme
See below:
using Distributions
using TensorFlow
# Generate some synthetic data
x = randn(100, 50)
w = randn(50, 10)
y_prob = exp(x*w)
y_prob ./= sum(y_prob,2)
function draw(probs)
y = zeros(size(probs))
for i in 1:size(probs, 1)
idx = rand(Categorical(probs[i, :]))
y[i, idx] = 1
end
return y
end
y = draw(y_prob)
# Build the model
sess = Session(Graph())
X = placeholder(Float64)
Y_obs = placeholder(Float64)
Y_obs_lbl = indmax(Y_obs, 2)
variable_scope("logisitic_model", initializer=Normal(0, .001)) do
global W = get_variable("weights", [50, 10], Float64)
global B = get_variable("bias", [10], Float64)
end
L = X*W + B
Y=nn.softmax(L)
#costs = log(Y).*Y_obs #Dense (Orginal) way
costs = nn.sparse_softmax_cross_entropy_with_logits(L, Y_obs_lbl+1) #sparse way
Loss = -reduce_sum(costs)
optimizer = train.AdamOptimizer()
minimize_op = train.minimize(optimizer, Loss)
saver = train.Saver()
# Run training
run(sess, initialize_all_variables())
cur_loss, _ = run(sess, [Loss, minimize_op], Dict(X=>x, Y_obs=>y))
When I run it however, I get an error:
Tensorflow error: Status: Incompatible shapes: [1,100] vs. [100,10]
[[Node: gradients/SparseSoftmaxCrossEntropyWithLogits_10_grad/mul = Mul[T=DT_DOUBLE, _class=[], _device="/job:localhost/replica:0/task:0/cpu:0"](gradients/SparseSoftmaxCrossEntropyWithLogits_10_grad/ExpandDims, SparseSoftmaxCrossEntropyWithLogits_10:1)]]
in check_status(::TensorFlow.Status) at /home/ubuntu/.julia/v0.5/TensorFlow/src/core.jl:101
in run(::TensorFlow.Session, ::Array{TensorFlow.Port,1}, ::Array{Any,1}, ::Array{TensorFlow.Port,1}, ::Array{Ptr{Void},1}) at /home/ubuntu/.julia/v0.5/TensorFlow/src/run.jl:96
in run(::TensorFlow.Session, ::Array{TensorFlow.Tensor,1}, ::Dict{TensorFlow.Tensor,Array{Float64,2}}) at /home/ubuntu/.julia/v0.5/TensorFlow/src/run.jl:143
This only happens when I try to train it.
If I don't include an optimise function/output then it works fine.
So I am doing something that screws up the gradient math.

KeyError in Tensorflow when calling predict on trained model

I have trained a LinearRegressor with two features: x,y and the label: l
def train_input_fn():
x = [1,2,3,4]
y = [2,3,4,5]
feature_cols = tf.constant(x)
labels = tf.constant(y)
return feature_cols, labels
x = tf.contrib.layers.real_valued_column("x")
y = tf.contrib.layers.real_valued_column("y")
m = tf.contrib.learn.LinearRegressor(feature_columns=[ x,y],
model_dir=model_dir)
m.fit(input_fn=train_input_fn, steps=100)
After training I want to predict from two new values
new_sample = np.array([20,20])
m.predict(new_sample)
but I get this error message when calling predict
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/layers/python/layers/feature_column.py", line 870, in insert_transformed_feature
input_tensor = columns_to_tensors[self.name]
KeyError: 'x'
Does anyone know why I get KeyError?
Try this:
my_feature_columns = [tf.contrib.layers.real_valued_column("", dimension=2)]
m = tf.contrib.learn.LinearRegressor(feature_columns=my_feature_columns,
model_dir=model_dir)
m.fit(input_fn=train_input_fn, steps=100)
I am not an expert in Tensorflow but this works for me:
new_sample = np.array([20,20],dtype='float32')
empty_y = np.zeros(len(new_sample),dtype='float32')
prediction_x = tf.contrib.learn.io.numpy_input_fn({"x":new_sample},empty_y, batch_size=45, num_epochs=100)
forecast = list(estimator.predict(input_fn=prediction_x,as_iterable=False))

Display python variable in tensorboard

i wanna display some python variables in tensorboard, but i dont get it done.
My code so far, display only a line in tensorboard for the lines with static number, if i use the outcommented-lines, it does not work ? It then prints:
ValueError: Shapes () and (?,) are not compatible
Someone has an idea?
import tensorflow as tf
step = 0
session = tf.Session()
tensorboardVar = tf.Variable(0, "tensorboardVar")
pythonVar = tf.placeholder("int32", [None])
#update_tensorboardVar = tensorboardVar.assign(pythonVar)
update_tensorboardVar = tensorboardVar.assign(4)
tf.scalar_summary("myVar", update_tensorboardVar)
merged = tf.merge_all_summaries()
sum_writer = tf.train.SummaryWriter('/tmp/train/c/', session.graph)
session.run(tf.initialize_all_variables())
for i in range(100):
_, result = session.run([update_tensorboardVar, merged])
#_, result = session.run([update_tensorboardVar, merged], feed_dict={pythonVar: i})
sum_writer.add_summary(result, step)
step += 1
this is working:
import tensorflow as tf
import numpy as np
step = 0
session = tf.Session()
tensorboardVar = tf.Variable(0, "tensorboardVar")
pythonVar = tf.placeholder("int32", [])
update_tensorboardVar = tensorboardVar.assign(pythonVar)
tf.scalar_summary("myVar", update_tensorboardVar)
merged = tf.merge_all_summaries()
sum_writer = tf.train.SummaryWriter('/tmp/train/c/', session.graph)
session.run(tf.initialize_all_variables())
for i in range(100):
#_, result = session.run([update_tensorboardVar, merged])
j = np.array(i)
_, result = session.run([update_tensorboardVar, merged], feed_dict={pythonVar: j})
sum_writer.add_summary(result, step)
step += 1
An alternative way can be found in the second answer to Computing exact moving average over multiple batches in tensorflow. There it is shown how you can create custom summaries.