RoBERTa example from tfhub produces error "During Variant Host->Device Copy: non-DMA-copy attempted of tensor type: string" - tensorflow

I would like to use the roberta-base model from tfhub. I am trying to run the example below, although I get an error when I try to feed sentences to model as input. I get the following error Invalid argument: During Variant Host->Device Copy: non-DMA-copy attempted of tensor type: string. I am using python 3.7, tensorflow 2.5, and tensorflow_hub 0.12.
If I try to replace preprocessor and encoder with the corresponding BERT versions, the code above works. However, I would like it to work for RoBERTa as well (as shown below).
preprocess = hub.KerasLayer("https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3")
encoder = hub.KerasLayer("https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/4", trainable=True)
# define a text embedding model
text_input = tf.keras.layers.Input(shape=(), dtype=tf.string)
preprocessor = hub.KerasLayer("https://tfhub.dev/jeongukjae/roberta_en_cased_preprocess/1")
encoder_inputs = preprocessor(text_input)
encoder = hub.KerasLayer("https://tfhub.dev/jeongukjae/roberta_en_cased_L-12_H-768_A-12/1", trainable=True)
encoder_outputs = encoder(encoder_inputs)
pooled_output = encoder_outputs["pooled_output"] # [batch_size, 768].
sequence_output = encoder_outputs["sequence_output"] # [batch_size, seq_length, 768].
model = tf.keras.Model(text_input, pooled_output)
# You can embed your sentences as follows
sentences = tf.constant(["(your text here)"])
print(model(sentences))
Additionally, the code above with the RoBERTa preprocessor/encoder seems to work if I use CPU instead of GPU (adding with tf.device('/cpu:0')), but this is not feasible because I need to fine-tune a model on lots of data.

Related

Problem with inputs when building a model with TFBertModel and AutoTokenizer from HuggingFace's transformers

I'm trying to build the model illustrated in this picture:
I obtained a pre-trained BERT and respective tokenizer from HuggingFace's transformers in the following way:
from transformers import AutoTokenizer, TFBertModel
model_name = "dbmdz/bert-base-italian-xxl-cased"
tokenizer = AutoTokenizer.from_pretrained(model_name)
bert = TFBertModel.from_pretrained(model_name)
The model will be fed a sequence of italian tweets and will need to determine if they are ironic or not.
I'm having problems building the initial part of the model, which takes the inputs and feeds them to the tokenizer in order to get a representation I can feed to BERT.
I can do it outside of the model-building context:
my_phrase = "Ciao, come va?"
# an equivalent version is tokenizer(my_phrase, other parameters)
bert_input = tokenizer.encode(my_phrase, add_special_tokens=True, return_tensors='tf', max_length=110, padding='max_length', truncation=True)
attention_mask = bert_input > 0
outputs = bert(bert_input, attention_mask)['pooler_output']
but I'm having troubles building a model that does this. Here is the code for building such a model (the problem is in the first 4 lines ):
def build_classifier_model():
text_input = tf.keras.layers.Input(shape=(), dtype=tf.string, name='text')
encoder_inputs = tokenizer(text_input, return_tensors='tf', add_special_tokens=True, max_length=110, padding='max_length', truncation=True)
outputs = bert(encoder_inputs)
net = outputs['pooler_output']
X = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64, return_sequences=True, dropout=0.1, recurrent_dropout=0.1))(net)
X = tf.keras.layers.Concatenate(axis=-1)([X, input_layer])
X = tf.keras.layers.MaxPooling1D(20)(X)
X = tf.keras.layers.SpatialDropout1D(0.4)(X)
X = tf.keras.layers.Flatten()(X)
X = tf.keras.layers.Dense(128, activation="relu")(X)
X = tf.keras.layers.Dropout(0.25)(X)
X = tf.keras.layers.Dense(2, activation='softmax')(X)
model = tf.keras.Model(inputs=text_input, outputs = X)
return model
And when I call the function for creating this model I get this error:
text input must of type str (single example), List[str] (batch or single pretokenized example) or List[List[str]] (batch of pretokenized examples).
One thing I thought was that maybe I had to use the tokenizer.batch_encode_plus function which works with lists of strings:
class BertPreprocessingLayer(tf.keras.layers.Layer):
def __init__(self, tokenizer, maxlength):
super().__init__()
self._tokenizer = tokenizer
self._maxlength = maxlength
def call(self, inputs):
print(type(inputs))
print(inputs)
tokenized = tokenizer.batch_encode_plus(inputs, add_special_tokens=True, return_tensors='tf', max_length=self._maxlength, padding='max_length', truncation=True)
return tokenized
def build_classifier_model():
text_input = tf.keras.layers.Input(shape=(), dtype=tf.string, name='text')
encoder_inputs = BertPreprocessingLayer(tokenizer, 100)(text_input)
outputs = bert(encoder_inputs)
net = outputs['pooler_output']
# ... same as above
but I get this error:
batch_text_or_text_pairs has to be a list (got <class 'keras.engine.keras_tensor.KerasTensor'>)
and beside the fact I haven't found a way to convert that tensor to a list with a quick google search, it seems weird that I have to go in and out of tensorflow in this way.
I've also looked up on the huggingface's documentation but there is only a single usage example, with a single phrase, and what they do is analogous at my "out of model-building context" example.
EDIT:
I also tried with Lambdas in this way:
tf.executing_eagerly()
def tokenize_tensor(tensor):
t = tensor.numpy()
t = np.array([str(s, 'utf-8') for s in t])
return tokenizer(t.tolist(), return_tensors='tf', add_special_tokens=True, max_length=110, padding='max_length', truncation=True)
def build_classifier_model():
text_input = tf.keras.layers.Input(shape=(1,), dtype=tf.string, name='text')
encoder_inputs = tf.keras.layers.Lambda(tokenize_tensor, name='tokenize')(text_input)
...
outputs = bert(encoder_inputs)
but I get the following error:
'Tensor' object has no attribute 'numpy'
EDIT 2:
I also tried the approach suggested by #mdaoust of wrapping everything in a tf.py_function and got this error.
def py_func_tokenize_tensor(tensor):
return tf.py_function(tokenize_tensor, [tensor], Tout=[tf.int32, tf.int32, tf.int32])
eager_py_func() missing 1 required positional argument: 'Tout'
Then I defined Tout as the type of the value returned by the tokenizer:
transformers.tokenization_utils_base.BatchEncoding
and got the following error:
Expected DataType for argument 'Tout' not <class
'transformers.tokenization_utils_base.BatchEncoding'>
Finally I unpacked the value in the BatchEncoding in the following way:
def tokenize_tensor(tensor):
t = tensor.numpy()
t = np.array([str(s, 'utf-8') for s in t])
dictionary = tokenizer(t.tolist(), return_tensors='tf', add_special_tokens=True, max_length=110, padding='max_length', truncation=True)
#unpacking
input_ids = dictionary['input_ids']
tok_type = dictionary['token_type_ids']
attention_mask = dictionary['attention_mask']
return input_ids, tok_type, attention_mask
And get an error in the line below:
...
outputs = bert(encoder_inputs)
ValueError: Cannot take the length of shape with unknown rank.
For now I solved by taking the tokenization step out of the model:
def tokenize(sentences, tokenizer):
input_ids, input_masks, input_segments = [],[],[]
for sentence in sentences:
inputs = tokenizer.encode_plus(sentence, add_special_tokens=True, max_length=128, pad_to_max_length=True, return_attention_mask=True, return_token_type_ids=True)
input_ids.append(inputs['input_ids'])
input_masks.append(inputs['attention_mask'])
input_segments.append(inputs['token_type_ids'])
return np.asarray(input_ids, dtype='int32'), np.asarray(input_masks, dtype='int32'), np.asarray(input_segments, dtype='int32')
The model takes two inputs which are the first two values returned by the tokenize funciton.
def build_classifier_model():
input_ids_in = tf.keras.layers.Input(shape=(128,), name='input_token', dtype='int32')
input_masks_in = tf.keras.layers.Input(shape=(128,), name='masked_token', dtype='int32')
embedding_layer = bert(input_ids_in, attention_mask=input_masks_in)[0]
...
model = tf.keras.Model(inputs=[input_ids_in, input_masks_in], outputs = X)
for layer in model.layers[:3]:
layer.trainable = False
return model
I'd still like to know if someone has a solution which integrates the tokenization step inside the model-building context so that an user of the model can simply feed phrases to it to get a prediction or to train the model.
text input must of type str (single example), List[str] (batch or single pretokenized example) or List[List[str]] (batch of pretokenized examples).
Solution to the above error:
Just use text_input = 'text'
instead of
text_input = tf.keras.layers.Input(shape=(), dtype=tf.string, name='text')
It looks like this is not TensorFlow compatible.
https://huggingface.co/dbmdz/bert-base-italian-xxl-cased#model-weights
Currently only PyTorch-Transformers compatible weights are available. If you need access to TensorFlow checkpoints, please raise an issue!
But remember that some things are easier if you don't use keras's functional-model-api. That's what got <class 'keras.engine.keras_tensor.KerasTensor'> is complaining about.
Try passing a tf.Tensor to see if that works.
What happens when you try:
text_input = tf.constant('text')
Try writing your model as a subclass of model.
Yeah, my first answer was wrong.
The problem is that tensorflow has two types of tensors. Eager tensors (these have a value). And "symbolic tensors" or "graph tensors" that don't have a value, and are just used to build up a calculation.
Your tokenize_tensor function expects an eager tensor. Only eager tensors have a .numpy() method.
def tokenize_tensor(tensor):
t = tensor.numpy()
t = np.array([str(s, 'utf-8') for s in t])
return tokenizer(t.tolist(), return_tensors='tf', add_special_tokens=True, max_length=110, padding='max_length', truncation=True)
But keras Input is a symbolic tensor.
text_input = tf.keras.layers.Input(shape=(1,), dtype=tf.string, name='text')
encoder_inputs = tf.keras.layers.Lambda(tokenize_tensor, name='tokenize')(text_input)
To fix this, you can use tf.py_function. It works in graph mode, and will call the wrapped function with eager tensors when the graph is executed, instead of passing it the graph-tensors while the graph is being constructed.
def py_func_tokenize_tensor(tensor):
return tf.py_function(tokenize_tensor, [tensor])
...
encoder_inputs = tf.keras.layers.Lambda(py_func_tokenize_tensor, name='tokenize')(text_input)
Found this Use `sentence-transformers` inside of a keras model and this amazing articles https://www.philschmid.de/tensorflow-sentence-transformers, which explain you how to do what you're trying to achieve.
The first one is using the py_function approach, the second uses tf.Model to wrap everything into model classes.
Hope this helps anyone arriving here in the future.
This is how to use tf.py_function correctly to create a model that takes string as an input:
model_name = "dbmdz/bert-base-italian-xxl-cased"
tokenizer = AutoTokenizer.from_pretrained(model_name)
bert = TFBertModel.from_pretrained(model_name)
def build_model():
text_input = tf.keras.layers.Input(shape=(), dtype=tf.string, name='text')
def encode_text(text):
inputs = [tf.compat.as_str(x) for x in text.numpy().tolist()]
tokenized = tokenizer(
inputs,
return_tensors='tf',
add_special_tokens=True,
max_length=110,
padding='max_length',
truncation=True)
return tokenized['input_ids'], tokenized['attention_mask']
input_ids, attention_mask = tf.py_function(encode_text, inp=[text_input], Tout=[tf.int32, tf.int32])
input_ids = tf.ensure_shape(input_ids, [None, 110])
attention_mask = tf.ensure_shape(attention_mask, [None, 110])
outputs = bert(input_ids, attention_mask)
net = outputs['last_hidden_state']
# Some other layers, this part is not important
x = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64, return_sequences=True))(net)
x = tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(1, name='classifier'))(x)
return tf.keras.Model(inputs=text_input, outputs=x)
I use last_hidden_state instead of pooler_output, that's where outputs for each token in the sequence are located. (See discussion here on difference between last_hidden_state and pooler_output). We usually use last_hidden_state when doing token level classification (e.g. named entity recognition).
To use pooler_output would be even simpler, e.g:
net = outputs['pooler_output']
x = tf.keras.layers.Dense(1, name='classifier')(net)
return tf.keras.Model(inputs=text_input, outputs=x)
pooler_output can be used in simpler classification problems (like irony detection), but of course it's still possible to use last_hidden_state to create more powerful models. (When you use bert(input_ids_in, attention_mask=input_masks_in)[0] in your solution, it actually returns last_hidden_state.)
Making sure the model works:
model = build_model()
my_phrase = "Ciao, come va?"
model(tf.constant([my_phrase]))
>>> <tf.Tensor: shape=(1, 110, 1), dtype=float32, numpy=...>,
Making sure HuggingFace part of the model is trainable:
model.summary(show_trainable=True)

Tensorflow Model to TFLITE

I have this code for building a semantic search engine using pre-trained universal encoder from tensorflow hub. I am not able to convert to tlite. I have saved the model to my directory.
Importing the model:
module_path ="/content/drive/My Drive/4"
%time model = hub.load(module_path)
#print ("module %s loaded" % module_url)
#Create function for using modeltraining
def embed(input):
return model(input)
Training the model on data:
## training the model
Model_USE= embed(data)
Saving the model:
exported = tf.train.Checkpoint(v=tf.Variable(Model_USE))
exported.f = tf.function(
lambda x: exported.v * x,
input_signature=[tf.TensorSpec(shape=None, dtype=tf.float32)])
export_dir = "/content/drive/My Drive/"
tf.saved_model.save(exported,export_dir)
Saving works fine but when I convert to tflite it gives error.
Conversion code:
converter = tf.lite.TFLiteConverter.from_saved_model(export_dir)
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS,
tf.lite.OpsSet.SELECT_TF_OPS]
tflite_model = converter.convert()
open("converted_model.tflite", "wb").write(tflite_model)
Error:
as_list() is not defined on an unknown TensorShape.
First, you should need to add a data generator to have representative inputs for the converter. Just like this:
def representative_data_gen():
for input_value in dataset.take(100):
yield [input_value]
The input value must be of shape (1, your_iput_shape) as if it had batch shape of 1. It has to be yielded as a list; mandatory.
You should also declare which type of optimization do you want, for example:
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
Nevertheless, I have also encountered problems with the different options of the converter depending on the network structure, which in this case I do not know. So, to make a clean run of the converter I would just do:
converter = lite.TFLiteConverter.from_keras_model(model)
converter.experimental_new_converter = True
converter.optimizations = [lite.Optimize.DEFAULT]
tfmodel = converter.convert()
The converter.experimental_new_converter = True is for problems when converting RNNs as in https://github.com/tensorflow/tensorflow/issues/34813
EDIT:
As explained here: ValueError: None is only supported in the 1st dimension. Tensor 'flatbuffer_data' has invalid shape '[None, None, 1, 512]' TFLite only allows the first dimension of your data to be None, that is, the batch. All other dimensions must be fixed. Try padding them with, for example, tf.keras.preprocessing.sequence.pad_sequences.
Then mask your sequences in the network as described in: tensorflow.org/guide/keras/masking_and_padding with Embedding or Masking layers.

Tensorflow Serving, online predictions: How to build a signature_def that accepts 'image_bytes' as input tensor name?

I have successfully trained a Keras model and used it for predictions on my local machine, now i want to deploy it using Tensorflow Serving. My model takes images as input and returns a mask prediction.
According to the documentation here my instances need to be formatted like this:
{'image_bytes': {'b64': base64.b64encode(jpeg_data).decode()}}
Now, the saved_model.pb file automatically saved by my Keras model has the following tensor names:
input_tensor = graph.get_tensor_by_name('input_image:0')
output_tensor = graph.get_tensor_by_name('conv2d_23/Sigmoid:0')
therefore i need to save a new saved_model.pb file with a different signature_def.
I tried the following (see here for reference), which works:
with tf.Session(graph=tf.Graph()) as sess:
tf.saved_model.loader.load(sess, ['serve'], 'path/to/saved/model/')
graph = tf.get_default_graph()
input_tensor = graph.get_tensor_by_name('input_image:0')
output_tensor = graph.get_tensor_by_name('conv2d_23/Sigmoid:0')
tensor_info_input = tf.saved_model.utils.build_tensor_info(input_tensor)
tensor_info_output = tf.saved_model.utils.build_tensor_info(output_tensor)
prediction_signature = (
tf.saved_model.signature_def_utils.build_signature_def(
inputs={'image_bytes': tensor_info_input},
outputs={'output_bytes': tensor_info_output},
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME))
builder = tf.saved_model.builder.SavedModelBuilder('path/to/saved/new_model/')
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={'predict_images': prediction_signature, })
builder.save()
but when i deploy the model and request predictions to the AI platform, i get the following error:
RuntimeError: Prediction failed: Error processing input: Expected float32, got {'b64': 'Prm4OD7JyEg+paQkPrGwMD7BwEA'} of type 'dict' instead.
readapting the answer here, i also tried to rewrite
input_tensor = graph.get_tensor_by_name('input_image:0')
as
image_placeholder = tf.placeholder(tf.string, name='b64')
graph_input_def = graph.as_graph_def()
input_tensor, = tf.import_graph_def(
graph_input_def,
input_map={'b64:0': image_placeholder},
return_elements=['input_image:0'])
with the (wrong) understanding that this would add a layer on top of my input tensor with matching 'b64' name (as per documentation) that accepts a string and connects it the original input tensor
but the error from the AI platform is the same.
(the relevant code i use for requesting a prediction is:
instances = [{'image_bytes': {'b64': base64.b64encode(image).decode()}}]
response = service.projects().predict(
name=name,
body={'instances': instances}
).execute()
where image is a numpy.ndarray of dtype('float32'))
I feel i'm close enough but i'm definitely missing something. Can you please help?
After b64 encoded -> decoded, the buffer of img will be changed to type string and not fit your model input type.
You may try to add some preprocess in your model and send b64 request again.

How to use tensorflow seq2seq without embeddings?

I have been working on LSTM for timeseries forecasting by using tensorflow. Now, i want to try sequence to sequence (seq2seq). In the official site there is a tutorial which shows NMT with embeddings . So, how can I use this new seq2seq module without embeddings? (directly using time series "sequences").
# 1. Encoder
encoder_cell = tf.contrib.rnn.BasicLSTMCell(LSTM_SIZE)
encoder_outputs, encoder_state = tf.nn.static_rnn(
encoder_cell,
x,
dtype=tf.float32)
# Decoder
decoder_cell = tf.nn.rnn_cell.BasicLSTMCell(LSTM_SIZE)
helper = tf.contrib.seq2seq.TrainingHelper(
decoder_emb_inp, decoder_lengths, time_major=True)
decoder = tf.contrib.seq2seq.BasicDecoder(
decoder_cell, helper, encoder_state)
# Dynamic decoding
outputs, _ = tf.contrib.seq2seq.dynamic_decode(decoder)
outputs = outputs[-1]
# output is result of linear activation of last layer of RNN
weight = tf.Variable(tf.random_normal([LSTM_SIZE, N_OUTPUTS]))
bias = tf.Variable(tf.random_normal([N_OUTPUTS]))
predictions = tf.matmul(outputs, weight) + bias
What should be the args for TrainingHelper() if I use input_seq=x and output_seq=label?
decoder_emb_inp ???
decoder_lengths ???
Where input_seq are the first 8 point of the sequence, and output_seq are the last 2 point of the sequence.
Thanks on advance!
I got it to work for no embedding using a very rudimentary InferenceHelper:
inference_helper = tf.contrib.seq2seq.InferenceHelper(
sample_fn=lambda outputs: outputs,
sample_shape=[dim],
sample_dtype=dtypes.float32,
start_inputs=start_tokens,
end_fn=lambda sample_ids: False)
My inputs are floats with the shape [batch_size, time, dim]. For the example below dim would be 1, but this can easily be extended to more dimensions. Here's the relevant part of the code:
projection_layer = tf.layers.Dense(
units=1, # = dim
kernel_initializer=tf.truncated_normal_initializer(
mean=0.0, stddev=0.1))
# Training Decoder
training_decoder_output = None
with tf.variable_scope("decode"):
# output_data doesn't exist during prediction phase.
if output_data is not None:
# Prepend the "go" token
go_tokens = tf.constant(go_token, shape=[batch_size, 1, 1])
dec_input = tf.concat([go_tokens, target_data], axis=1)
# Helper for the training process.
training_helper = tf.contrib.seq2seq.TrainingHelper(
inputs=dec_input,
sequence_length=[output_size] * batch_size)
# Basic decoder
training_decoder = tf.contrib.seq2seq.BasicDecoder(
dec_cell, training_helper, enc_state, projection_layer)
# Perform dynamic decoding using the decoder
training_decoder_output = tf.contrib.seq2seq.dynamic_decode(
training_decoder, impute_finished=True,
maximum_iterations=output_size)[0]
# Inference Decoder
# Reuses the same parameters trained by the training process.
with tf.variable_scope("decode", reuse=tf.AUTO_REUSE):
start_tokens = tf.constant(
go_token, shape=[batch_size, 1])
# The sample_ids are the actual output in this case (not dealing with any logits here).
# My end_fn is always False because I'm working with a generator that will stop giving
# more data. You may extend the end_fn as you wish. E.g. you can append end_tokens
# and make end_fn be true when the sample_id is the end token.
inference_helper = tf.contrib.seq2seq.InferenceHelper(
sample_fn=lambda outputs: outputs,
sample_shape=[1], # again because dim=1
sample_dtype=dtypes.float32,
start_inputs=start_tokens,
end_fn=lambda sample_ids: False)
# Basic decoder
inference_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell,
inference_helper,
enc_state,
projection_layer)
# Perform dynamic decoding using the decoder
inference_decoder_output = tf.contrib.seq2seq.dynamic_decode(
inference_decoder, impute_finished=True,
maximum_iterations=output_size)[0]
Have a look at this question. Also I found this tutorial to be very useful to understand seq2seq models, although it does use embeddings. So replace their GreedyEmbeddingHelper by an InferenceHelper like the one I posted above.
P.s. I posted the full code at https://github.com/Andreea-G/tensorflow_examples

Load pretrained word embedding into Tensorflow model

I'm trying to modify this Tensorflow LSTM model to load this pre-trained GoogleNews word ebmedding GoogleNews-vectors-negative300.bin (or a tensorflow Word2Vec embedding would be just as good).
I've been reading examples on how to load a pre-trained word embedding into tensorflow (eg. 1: here, 2: here, 3: here and 4: here).
In the first linked example they can easily assign the embedding to the graph:
sess.run(cnn.W.assign(initW))
In the second linked example they create an embedding-wrapper variable:
with tf.variable_scope("embedding_rnn_seq2seq/rnn/embedding_wrapper", reuse=True):
em_in = tf.get_variable("embedding")
then they initialize the embedding wrapper:
sess.run(em_in.assign(initW))
Both those examples make sense, but it's not obvious to me how I can assign the unpacked embedding initW to the TF graph in my case. (I'm a TF beginner).
I can prepare initW like the first two examples:
def loadEmbedding(self, word_to_id):
# New model, we load the pre-trained word2vec data and initialize embeddings
with open(os.path.join('GoogleNews-vectors-negative300.bin'), "rb", 0) as f:
header = f.readline()
vocab_size, vector_size = map(int, header.split())
binary_len = np.dtype('float32').itemsize * vector_size
initW = np.random.uniform(-0.25,0.25,(len(word_to_id), vector_size))
for line in range(vocab_size):
word = []
while True:
ch = f.read(1)
if ch == b' ':
word = b''.join(word).decode('utf-8')
break
if ch != b'\n':
word.append(ch)
if word in word_to_id:
initW[word_to_id[word]] = np.fromstring(f.read(binary_len), dtype='float32')
else:
f.read(binary_len)
return initW
From the solution in example 4, I thought I should be able to do something like
session.run(tf.assign(embedding, initW)).
If I try to add the line here like this when the session is initialized :
with sv.managed_session() as session:
initializer = tf.random_uniform_initializer(-config.init_scale,
config.init_scale)
session.run(tf.assign(m.embedding, initW))
I get the following error:
ValueError: Fetch argument <tf.Tensor 'Assign:0' shape=(10000, 300) dtype=float32_ref> cannot be interpreted as a Tensor. (Tensor Tensor("Assign:0", shape=(10000, 300), dtype=float32_ref, device=/device:CPU:0) is not an element of this graph.)
Update: I updated the code following Nilesh Birari's suggestion: Full code. It results in no improvement in validation or test set perplexity, it only improves training set perplexity.
Correct me if I am wrong, trying to answer with my limited understanding of tensorflow.
ValueError: Fetch argument <tf.Tensor 'Assign:0' shape=(10000, 300) dtype=float32_ref> cannot be interpreted as a Tensor. (Tensor Tensor("Assign:0", shape=(10000, 300), dtype=float32_ref, device=/device:CPU:0) is not an element of this graph.)
This simply states you are trying to initialize element of different graph, so I guess you need to be in same scope in which your graph is define. Just adjusting your embedding initialization code in same scope can solve the problem.
with tf.Graph().as_default():
initializer = tf.random_uniform_initializer(-config.init_scale,
config.init_scale)
with tf.name_scope("Train"):
train_input = PTBInput(config=config, data=train_data, name="TrainInput")
with tf.variable_scope("Model", reuse=None, initializer=initializer):
m = PTBModel(is_training=True, config=config, input_=train_input)
tf.summary.scalar("Training Loss", m.cost)
tf.summary.scalar("Learning Rate", m.lr)
with tf.name_scope("Valid"):
valid_input = PTBInput(config=config, data=valid_data, name="ValidInput")
with tf.variable_scope("Model", reuse=True, initializer=initializer):
mvalid = PTBModel(is_training=False, config=config, input_=valid_input)
tf.summary.scalar("Validation Loss", mvalid.cost)
with tf.name_scope("Test"):
test_input = PTBInput(config=eval_config, data=test_data, name="TestInput")
with tf.variable_scope("Model", reuse=True, initializer=initializer):
mtest = PTBModel(is_training=False, config=eval_config,
input_=test_input)
sv = tf.train.Supervisor(logdir=FLAGS.save_path)
with sv.managed_session() as session:
word2vec = loadEmbedding(word_to_id)
session.run(tf.assign(m.embedding, word2vec))
print("WORKED!!!")
I guess this should be only problem, as you can see in your first example, initialization are under same scope.