Sharing of array list or variable between 2 distributed tensorflow processes - tensorflow

I am presently working on Distributed tensorflow considering 2 worker processes and facing the issue of sharing variable between these two worker process.
I found tf.get_collection/tf.add_collection but still unable to get the variable value shared between the 2 processes.
Adding Few details around how I want to share the data among the worker processes in Distributed Tensorflow :
def create_variable(layer_shape):
with tf.variable_scope("share_lay"):
layers = tf.get_variable("layers", shape=layer_shape, trainable=True)
with tf.variable_scope("share_lay", reuse=tf.AUTO_REUSE):
layers = tf.get_variable("layers", shape=layer_shape, trainable=True)
return layers
def set_layer(layers):
tf.add_to_collection("layers", layers)
def get_layer(name):
return tf.get_collection(name)[0]
taskid == 0:
layers = create_variable(layer_shape)
layers = <some value>
set_layer(layers)
taskid == 1:
layers = create_variable(layer_shape)
layers = get_layer("layers")
I am getting an error when performing get_layer() as :
return tf.get_collection(name)[0]
IndexError: list index out of range
It appears that the data cannot be share between the workers
Request some suggestions regarding the same
Any suggestions / pointers is appreciated,
Thanks,
Kapil

I finally solve the same problem by using tf.train.replica_device_setter() to place the variables on parameter server and add them to a colletion. Later, I can use tf.get_collection() in any worker to return that collection, which is actually a python list. Note that tf.get_collection only return a copy of original collection. If you want to change the variables in the original collection, you should use tf.get_collecion_ref which actually returns the collection list itself.
Here is an example:
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('job_name', '',
"""One of 'ps', 'worker' """)
tf.app.flags.DEFINE_integer('task_index', 0,
"""Index of task within the job""")
cluster = tf.train.ClusterSpec(
{'ps': ['localhost:22222'],
'worker': ['localhost:22223', 'localhost:22227']})
config = tf.ConfigProto(
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
if FLAGS.job_name == 'ps':
server = tf.train.Server(cluster, job_name='ps', task_index=FLAGS.task_index, config=config)
server.join()
else:
server = tf.train.Server(cluster, job_name='worker', task_index=FLAGS.task_index, config=config)
with tf.device(tf.train.replica_device_setter(cluster=cluster)):
#create a colletion 'shared_list' and add two variables to the collection 'shared_list'
#note that these two variables are placed on parameter server
a = tf.Variable(name='a', initial_value=tf.constant(1.0),
collections=[tf.GraphKeys.GLOBAL_VARIABLES, 'shared_list'])
b = tf.Variable(name='b', initial_value=tf.constant(2.0),
collections=[tf.GraphKeys.GLOBAL_VARIABLES, 'shared_list'])
#now let's print out the value of a+2.0 and b+2.0 using the collection 'shared_list' from different worker
#note that tf.get_collection will return a copy of exiting collection which is actually a python list
with tf.device('/job:worker/task:%d' %FLAGS.task_index):
c = tf.get_collection('shared_list')[0] + 2.0 # a+2.0
d = tf.get_collection('shared_list')[1] + 2.0 # b+2.0
with tf.train.MonitoredTrainingSession(master=server.target,
is_chief=(FLAGS.task_index==0),
config=config) as sess:
print('this is worker %d' % FLAGS.task_index)
print(c.eval(session=sess))
print(d.eval(session=sess))
server.join()
worker 0 will print out:
this is worker 0
3.0
4.0
worker 1 will print out:
this is worker 1
3.0
4.0
Edit: work 0 modifies the variable 'a' to 10, and then worker 1 prints out the new value of 'a', which becomes 10 immediately. Actually, variable 'a' is available for both worker 0 and worker 1 because they are in distributed setting. Below is an example. Also refers to this blog in Amid Fish by Matthew Rahtz for how to share variables in distributed tensorflow. Actually, we don't need any parameter server to share variables. Any two workers can share the same variable with each other as long as the two workers create two variables having exactly the same name.
Here is the example
import tensorflow as tf
from time import sleep
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('job_name', '',
"""One of 'ps', 'worker' """)
tf.app.flags.DEFINE_integer('task_index', 0,
"""Index of task within the job""")
cluster = tf.train.ClusterSpec(
{'ps': ['localhost:22222'],
'worker': ['localhost:22223', 'localhost:22227']})
if FLAGS.job_name == 'ps':
server = tf.train.Server(cluster, job_name='ps', task_index=FLAGS.task_index)
server.join()
else:
server = tf.train.Server(cluster, job_name='worker', task_index=FLAGS.task_index)
with tf.device(tf.train.replica_device_setter(cluster=cluster)):
# create a colletion 'shared_list' and add two variables to the collection 'shared_list'
# note that these two variables are placed on parameter server
a = tf.Variable(name='a', initial_value=tf.constant(1.0),
collections=[tf.GraphKeys.GLOBAL_VARIABLES, 'shared_list'])
b = tf.Variable(name='b', initial_value=tf.constant(2.0),
collections=[tf.GraphKeys.GLOBAL_VARIABLES, 'shared_list'])
# change the value of 'a' in worker 0
if FLAGS.task_index == 0:
change_a = a.assign(10)
# print out the new value of a in worker 1 using get_collction. Note that we may need to
# use read_value() method to force the op to read the current value of a
if FLAGS.task_index == 1:
with tf.device('/job:worker/task:1'): # place read_a to worker 1
read_a = tf.get_collection('shared_list')[0].read_value() # a = 10
with tf.train.MonitoredTrainingSession(master=server.target,
is_chief=(FLAGS.task_index == 0))as sess:
if FLAGS.task_index == 0:
sess.run(change_a)
if FLAGS.task_index == 1:
sleep(1) # sleep a little bit to wait until change_a has been executed
print(read_a.eval(session=sess))
server.join()
worker 1 prints out
10

Related

Tensorflow - running total

How can I add the number 5 after every iteration of the loop?
I want to do something like this:
weight = 0.225
for i in range(10):
weight += 5
print (weight)
Here is how I am trying in tensorflow but it never updates the weight
import tensorflow as tf
def dummy(x):
weights['h0'] = tf.add(weights['h0'], 5)
res = tf.add(weights['h0'], x)
return res
# build computational graph
a = tf.placeholder('float', None)
d = dummy(a)
weights = {
'h0': tf.Variable(tf.random_normal([1]))
}
# initialize variables
init = tf.global_variables_initializer()
# create session and run the graph
with tf.Session() as sess:
sess.run(init)
for i in range(10):
print (sess.run(d, feed_dict={a: [2]}))
# close session
sess.close()
There's an operation explicitly created for adding a value and assigning the result back to the input node: tf.assign_add
You should use it instead of tf.assing + tf.add.
Also, it's more important that you understand why you previous code won't work.
weights['h0'] = tf.add(weights['h0'], 5)
res = tf.add(weights['h0'], x)
At the fist line, you're defining a node add, whose inputs are weights['h0'] and 5 and you're assigning this node to a python variable weights['h0'].
Now, thus, weights['h0'] is a python variable holding a tensorflow node.
In the next line, you're defining another add node, between the previous node and x, and you return this node.
When the graph is evaluated, you evaluate the node pointed by res, that force the evaluation of the previous node (because res is a function of the node holded by weights['h0']).
The problem is the that your assignment at line 1 is a python assignment and not a tensorflow assignment.
Thus that assign operation is executed only in the python environment but it has no defined an assign node into the tensorflow graph.
P.S: when you use with you're defining a context manager that handles the closing operations for you. You can thus remove sess.close() because is executed automatically when you exit from that context
Apparently there is an assign operator
https://www.tensorflow.org/api_docs/python/tf/assign
weights['h0'] = tf.assign(weights['h0'], tf.add(weights['h0'], 5))

Dequeueing from RandomShuffleQueue does not reduce size

In order to train a model I have encapsulated my model in a class.
I use a tf.RandomShuffleQueue to enqueue a list of filenames to.
However when I dequeue the elements they get dequeued but the size of the queue does not reduce.
Following are more specific questions followed by the code snippet :
If I have only 5 images for example, but steps range upto 100, would this result in the addfilenames called repeatedly automatically ? It does not give me any error on dequeuing so I am thinking that it is getting called automatically.
Why the size of the tf.RandomShuffleQueue is not changing ? It remains constant.
import os
import time
import functools
import tensorflow as tf
from Read_labelclsloc import readlabel
def ReadTrain(traindir):
# Returns a list of training images, their labels and a dictionay.
# The dictionary maps label names to integer numbers.
return trainimgs, trainlbls, classdict
def ReadVal(valdir, classdict):
# Reads the validation image labels.
# Returns a dictionary with filenames as keys and
# corresponding labels as values.
return valdict
def lazy_property(function):
# Just a decorator to make sure that on repeated calls to
# member functions, ops don't get created repeatedly.
# Acknowledgements : https://danijar.com/structuring-your-tensorflow-models/
attribute= '_cache_' + function.__name__
#property
#functools.wraps(function)
def decorator(self):
if not hasattr(self, attribute):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return decorator
class ModelInitial:
def __init__(self, traindir, valdir):
self.graph
self.traindir = traindir
self.valdir = valdir
self.traininginfo()
self.epoch = 0
def traininginfo(self):
self.trainimgs, self.trainlbls, self.classdict = ReadTrain(self.traindir)
self.valdict = ReadVal(self.valdir, self.classdict)
with self.graph.as_default():
self.trainimgs_tensor = tf.constant(self.trainimgs)
self.trainlbls_tensor = tf.constant(self.trainlbls, dtype=tf.uint16)
self.trainimgs_dict = {}
self.trainimgs_dict["ImageFile"] = self.trainimgs_tensor
return None
#lazy_property
def graph(self):
g = tf.Graph()
with g.as_default():
# Layer definitions go here
return g
#lazy_property
def addfilenames (self):
# This is the function where filenames are pushed to a RandomShuffleQueue
filename_queue = tf.RandomShuffleQueue(capacity=len(self.trainimgs), min_after_dequeue=0,\
dtypes=[tf.string], names=["ImageFile"],\
seed=0, name="filename_queue")
sz_op = filename_queue.size()
dq_op = filename_queue.dequeue()
enq_op = filename_queue.enqueue_many(self.trainimgs_dict)
return filename_queue, enq_op, sz_op, dq_op
def Train(self):
# The function for training.
# I have not written the training part yet.
# Still struggling with preprocessing
with self.graph.as_default():
filename_q, filename_enqueue_op, sz_op, dq_op= self.addfilenames
qr = tf.train.QueueRunner(filename_q, [filename_enqueue_op])
filename_dequeue_op = filename_q.dequeue()
init_op = tf.global_variables_initializer()
sess = tf.Session(graph=self.graph)
sess.run(init_op)
coord = tf.train.Coordinator()
enq_threads = qr.create_threads(sess, coord=coord, start=True)
counter = 0
for step in range(100):
print(sess.run(dq_op["ImageFile"]))
print("Epoch = %d "%(self.epoch))
print("size = %d"%(sess.run(sz_op)))
counter+=1
names = [n.name for n in self.graph.as_graph_def().node]
coord.request_stop()
coord.join(enq_threads)
print("Counter = %d"%(counter))
return None
if __name__ == "__main__":
modeltrain = ModelInitial(<Path to training images>,\
<Path to validation images>)
a = modeltrain.graph
print(a)
modeltrain.Train()
print("Success")
The mystery is caused by the tf.train.QueueRunner that you created for the queue, which causes it to be filled in the background.
The following lines cause a background "queue runner" thread to be created:
qr = tf.train.QueueRunner(filename_q, [filename_enqueue_op])
# ...
enq_threads = qr.create_threads(sess, coord=coord, start=True)
This thread calls filename_enqueue_op in a loop, which causes the queue to be filled up as you remove elements from it.
The background thread from step 1 will almost always have a pending enqueue operation (filename_enqueue_op) on the queue. This means that after you dequeue a filename, the pending enqueue will run add fill the queue back up to capacity. (Technically there is a race condition here and you could see a size of capacity - 1, but this is quite unlikely).

How to finish this very easy distributed training example?

I'm using tensorflow version 0.12.1, and following this doc.
What I want to do is to add 1 to count in every worker.
My goal is to print results that are >1, but I'm only getting 1.
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('job_name', '', '')
tf.app.flags.DEFINE_string('ps_hosts', '','')
tf.app.flags.DEFINE_string('worker_hosts', '','')
tf.app.flags.DEFINE_integer('task_index', 0, '')
ps_hosts = FLAGS.ps_hosts.split(',')
worker_hosts = FLAGS.worker_hosts.split(',')
cluster_spec = tf.train.ClusterSpec({'ps': ps_hosts,'worker': worker_hosts})
server = tf.train.Server(
{'ps': ps_hosts,'worker': worker_hosts},
job_name=FLAGS.job_name,
task_index=FLAGS.task_index)
if FLAGS.job_name == 'ps':
server.join()
with tf.device(tf.train.replica_device_setter(
worker_device="/job:worker/task:%d" % FLAGS.task_index,
cluster=cluster_spec)):
count = tf.Variable(0)
count = tf.add(count,tf.constant(1))
init = tf.global_variables_initializer()
sv = tf.train.Supervisor(is_chief=(FLAGS.task_index == 0),
logdir="./checkpoint/",
init_op=init,
summary_op=None,
saver=None,
global_step=None,
save_model_secs=60)
with sv.managed_session(server.target) as sess:
sess.run(init)
step = 1
while step <= 999999999:
result = sess.run(count)
if step%10000 == 0:
print(result)
if result>=2:
print("!!!!!!!!")
step += 1
print("Finished!")
sv.stop()
The problem is actually independent of distributed execution, and stems from these two lines:
count = tf.Variable(0)
count = tf.add(count,tf.constant(1))
The tf.add() op is a pure functional op, which creates a new tensor with its output each time it runs, rather than modifying its input. If you want the value to increase, and that increase to be visible across workers, you must use the tf.Variable.assign_add() method instead, as follows:
count = tf.Variable(0)
increment_count = count.assign_add(1)
Then call sess.run(increment_count) inside your training loop to increment the value of the count variable.

Redis not returning result after upgrading Celery from 3.1 to 4.0

I recently upgraded my Celery installation to 4.0. After a few days of wrestling with the upgrade process, I finally got it to work... sort of. Some tasks will return, but the final task will not.
I have a class, SFF, that takes in and parses a file:
# Constructor with I/O file
def __init__(self, file):
# File data that's gonna get used a lot
sffDescriptor = file.fileno()
fileName = abspath(file.name)
# Get the pointer to the file
filePtr = mmap.mmap(sffDescriptor, 0, flags=mmap.MAP_SHARED, prot=mmap.PROT_READ)
# Get the header info
hdr = filePtr.read(HEADER_SIZE)
self.header = SFFHeader._make(unpack(HEADER_FMT, hdr))
# Read in the palette maps
print self.header.onDemandDataSize
print self.header.onLoadDataSize
palMapsResult = getPalettes.delay(fileName, self.header.palBankOff - HEADER_SIZE, self.header.onDemandDataSize, self.header.numPals)
# Read the sprite list nodes
nodesStart = self.header.sprListOff
nodesEnd = self.header.palBankOff
print nodesEnd - nodesStart
sprNodesResult = getSprNodes.delay(fileName, nodesStart, nodesEnd, self.header.numSprites)
# Get palette data
self.palettes = palMapsResult.get()
# Get sprite data
spriteNodes = sprNodesResult.get()
# TESTING
spritesResultSet = ResultSet([])
numSpriteNodes = len(spriteNodes)
# Split the nodes into chunks of size 32 elements
for x in xrange(0, numSpriteNodes, 32):
spritesResult = getSprites.delay(spriteNodes, x, x+32, fileName, self.palettes, self.header.palBankOff, self.header.onDemandDataSizeTotal)
spritesResultSet.add(spritesResult)
break # REMEMBER TO REMOVE FOR ENTIRE SFF
self.sprites = spritesResultSet.join_native()
It doesn't matter if it's a single task that returns the entire spritesResult, or if I split it using a ResultSet, the outcome is always the same: the Python console I'm using just hangs at either spritesResultSet.join_native() or spritesResult.get() (depending on how I format it).
Here is the task in question:
#task
def getSprites(nodes, start, end, fileName, palettes, palBankOff, onDemandDataSizeTotal):
sprites = []
with open(fileName, "rb") as file:
sffDescriptor = file.fileno()
sffData = mmap.mmap(sffDescriptor, 0, flags=mmap.MAP_SHARED, prot=mmap.PROT_READ)
for node in nodes[start:end]:
sprListNode = dict(SprListNode._make(node)._asdict()) # Need to convert it to a dict since values may change.
#print node
#print sprListNode
# If it's a linked sprite, the data length is 0, so get the linked index.
if sprListNode['dataLen'] == 0:
sprListNodeTemp = SprListNode._make(nodes[sprListNode['index']])
sprListNode['dataLen'] = sprListNodeTemp.dataLen
sprListNode['dataOffset'] = sprListNodeTemp.dataOffset
sprListNode['compression'] = sprListNodeTemp.compression
# What does the offset need to be?
dataOffset = sprListNode['dataOffset']
if sprListNode['loadMode'] == 0:
dataOffset += palBankOff #- HEADER_SIZE
elif sprListNode['loadMode'] == 1:
dataOffset += onDemandDataSizeTotal #- HEADER_SIZE
#print sprListNode
# Seek to the data location and "read" it in. First 4 bytes are just the image length
start = dataOffset + 4
end = dataOffset + sprListNode['dataLen']
#sffData.seek(start)
compressedSprite = sffData[start:end]
# Create the sprite
sprite = Sprite(sprListNode, palettes[sprListNode['palNo']], np.fromstring(compressedSprite, dtype=np.uint8))
sprites.append(sprite)
return json.dumps(sprites, cls=SpriteJSONEncoder)
I know it reaches the return statement, because if I put a print right above it, it will print in the Celery window. I also know that the task is running to completion because I get the following message from the worker:
[2016-11-16 00:03:33,639: INFO/PoolWorker-4] Task framedatabase.tasks.getSprites[285ac9b1-09b4-4cf1-a251-da6212863832] succeeded in 0.137236133218s: '[{"width": 120, "palNo": 30, "group": 9000, "xAxis": 0, "yAxis": 0, "data":...'
Here are my celery settings in settings.py:
# Celery settings
BROKER_URL='redis://localhost:1717/1'
CELERY_RESULT_BACKEND='redis://localhost:1717/0'
CELERY_IGNORE_RESULT=False
CELERY_IMPORTS = ("framedatabase.tasks", )
... and my celery.py:
from __future__ import absolute_import
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'framedatabase.settings')
from django.conf import settings # noqa
app = Celery('framedatabase', backend='redis://localhost:1717/1', broker="redis://localhost:1717/0",
include=['framedatabase.tasks'])
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
#app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
Found the problem. Apparently it was leading to deadlock as mentioned in the section "Avoid launching synchronous subtasks" in the Celery documentation here: http://docs.celeryproject.org/en/latest/userguide/tasks.html#tips-and-best-practices
So I got rid of the line:
sprNodesResult.get()
And changed the final result to a chain:
self.sprites = chain(getSprNodes.s(fileName, nodesStart, nodesEnd, self.header.numSprites),
getSprites.s(0,32,fileName,self.palettes,self.header.palBankOff,self.header.onDemandDataSizeTotal))().get()
And it works! Now I just have to find a way to split this the way I want!

Using Tensorflow for workload distribution

My code looks like:
import tensorflow as tf
N = 16, num_ckfs = 5
init_variances = tf.placeholder(tf.float64, shape=[ num_ckfs, N],name='inital_variances')
init_states = tf.placeholder(tf.float64, shape=[num_ckfs, N], name='init_states')
#some more code
predicted_state = prior_state_expanded + kalman_gain * diff_expanded
error_covariance = sum_cov_cholesky + tf.batch_matmul(kg , kalman_gain, adj_x=True)
projected_output = tf.batch_matmul(predicted_state,input_vectors_extra, adj_y=True)
session = tf.Session()
# read data from input file
init_var = [10 for i in range(N)]
init_var_ckfs = [init_var for i in range(num_ckfs)]
init_state = [0 for i in range(N)]
init_state_ckfs = [init_state for i in range(num_ckfs)]
for timestep in range(10):
out= session.run([projected_output, predicted_state, error_covariance], {init_variances:init_var_ckfs, init_states:init_state_ckfs })
init_state_ckfs = np.array([i.tolist()[0] for i in out[1]])
init_var_ckfs = np.array([i.diagonal().tolist() for i in out[2]])
This code is for running a Cubature Kalman Filter(CKF) in a batched mode. For example:
num_ckfs = 5
means that this code will run 5 CKFs in parallel. Now, what I would like to do is to distribute the workload to multiple nodes depending upon the value of num_ckfs. For example, if I pass num_ckfs as an argument to the code, and it is set to 20,000, then I would distribute the workload to 4 nodes running 5000 each.
I would like to do this using the distributed version of Tensorflow. Can someone please give me some hints on how this could be achieved? Ideally, I should have to execute the code on a single node and it should then get distributed to as many nodes as defined in
tf.train.ClusterSpec