NLP: How to train a spaCy NER Model using GoldParse objects - pandas

I am trying to train a spaCy NER model using GoldParse objects. This is what I have done:
Adding extra labels to NER model
add_ents = ['A1', 'B1', 'C1', 'D1', 'E1', 'F1', 'G1'] # sample labels
# Create a pipe if it does not exist
if "ner" not in nlp.pipe_names:
ner = nlp.create_pipe("ner")
nlp.add_pipe(ner)
else:
ner = nlp.get_pipe("ner")
for e in add_ents:
ner.add_label(e)
Training the NER Model
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != "ner"]
model = None # Since we training a fresh model not a saved model
with nlp.disable_pipes(*other_pipes): # only train ner
if model is None:
optimizer = nlp.begin_training()
else:
optimizer = nlp.resume_training()
for i in range(20):
loss = {}
nlp.update(X, y, sgd=optimizer, drop=0.0, losses=loss)
print("Loss: ", loss)
Here X is a list of Doc objects and y is a list of corresponding GoldParse objects. While executing I am running into the following error:
nn_parser.pyx in spacy.syntax.nn_parser.Parser.update()
nn_parser.pyx in spacy.syntax.nn_parser.Parser._init_gold_batch()
ner.pyx in spacy.syntax.ner.BiluoPushDown.preprocess_gold()
ner.pyx in spacy.syntax.ner.BiluoPushDown.lookup_transition()
ValueError: 'A1' is not in list
I tried searching for the solution but couldn't find anything relevant. Is there a way to fix this issue?

Related

`nlp.add_pipe` now takes the string name of the registered component factory, not a callable component

With Spacy version 3.0 facing problem in adding pipeline, Below is my code which I am using, also I have attached screenshot for error
nlp = spacy.blank('en')
def train_model(train_data):
# Remove all pipelines and add NER pipeline from the model
if 'ner' not in nlp.pipe_names:
ner = nlp.create_pipe('ner')
nlp.add_pipe(ner, last=True)
#Add labels in the NLP pipeline
for _, annotation in train_data:
for ent in annotation.get('entities'):
ner.add_label(ent[2])
#Remove other pipelines if they are there
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'ner']
print(other_pipes)
with nlp.disable_pipes(*other_pipes): # only train NER
optimizer = nlp.begin_training()
for itn in range(10): # train for 10 iterations
print("Starting iteration " + str(itn))
random.shuffle(train_data)
losses = {}
index = 0
for text, annotations in train_data:
try:
nlp.update(
[text], # batch of texts
[annotations], # batch of annotations
drop=0.2, # dropout - make it harder to memorise data
sgd=optimizer, # callable to update weights
losses=losses)
except Exception as e:
pass
print(losses)

How do you fit a tf.Dataset to a Keras Autoencoder Model when the Dataset has been generated using TFX?

Problem
As the title suggests I have been trying to create a pipeline for training an Autoencoder model using TFX. The problem I'm having is fitting the tf.Dataset returned by the DataAccessor.tf_dataset_factory object to the Autoencoder.
Below I summarise the steps I've taken through this project, and have some Questions at the bottom if you wish to skip the background information.
Intro
TFX Pipeline
The TFX components I have used so far have been:
CsvExampleGenerator (the dataset has 82 columns, all numeric, and the sample csv has 739 rows)
StatisticsGenerator / SchemaGenerator, the schema has been edited as is now loaded in using an Importer
Transform
Trainer (this is the component I am currently having problems with)
Model
The model that I am attempting to train is based off of the example laid out here https://www.tensorflow.org/tutorials/generative/autoencoder. However, my model is being trained on tabular data, searching for anomalous results, as opposed to image data.
As I have tried a couple of solutions I have tried using both the Keras.layers and Keras.model format for defining the model and I outline both below:
Subclassing Keras.Model
class Autoencoder(keras.models.Model):
def __init__(self, features):
super(Autoencoder, self).__init__()
self.encoder = tf.keras.Sequential([
keras.layers.Dense(82, activation = 'relu'),
keras.layers.Dense(32, activation = 'relu'),
keras.layers.Dense(16, activation = 'relu'),
keras.layers.Dense(8, activation = 'relu')
])
self.decoder = tf.keras.Sequential([
keras.layers.Dense(16, activation = 'relu'),
keras.layers.Dense(32, activation = 'relu'),
keras.layers.Dense(len(features), activation = 'sigmoid')
])
def call(self, x):
inputs = [keras.layers.Input(shape = (1,), name = f) for f in features]
dense = keras.layers.concatenate(inputs)
encoded = self.encoder(dense)
decoded = self.decoder(encoded)
return decoded
Subclassing Keras.Layers
def _build_keras_model(features: List[str]) -> tf.keras.Model:
inputs = [keras.layers.Input(shape = (1,), name = f) for f in features]
dense = keras.layers.concatenate(inputs)
dense = keras.layers.Dense(32, activation = 'relu')(dense)
dense = keras.layers.Dense(16, activation = 'relu')(dense)
dense = keras.layers.Dense(8, activation = 'relu')(dense)
dense = keras.layers.Dense(16, activation = 'relu')(dense)
dense = keras.layers.Dense(32, activation = 'relu')(dense)
outputs = keras.layers.Dense(len(features), activation = 'sigmoid')(dense)
model = keras.Model(inputs = inputs, outputs = outputs)
model.compile(
optimizer = 'adam',
loss = 'mae'
)
return model
TFX Trainer Component
For creating the Trainer Component I have been mainly following the implementation details laid out here: https://www.tensorflow.org/tfx/guide/trainer
As well as following the default penguins example: https://www.tensorflow.org/tfx/tutorials/tfx/penguin_simple#write_model_training_code
run_fn defintion
def run_fn(fn_args: tfx.components.FnArgs) -> None:
tft_output = tft.TFTransformOutput(fn_args.transform_output)
train_dataset = _input_fn(
file_pattern = fn_args.train_files,
data_accessor = fn_args.data_accessor,
tf_transform_output = tft_output,
batch_size = fn_args.train_steps
)
eval_dataset = _input_fn(
file_pattern = fn_args.eval_files,
data_accessor = fn_args.data_accessor,
tf_transform_output = tft_output,
batch_size = fn_args.custom_config['eval_batch_size']
)
# model = Autoencoder(
# features = fn_args.custom_config['features']
# )
model = _build_keras_model(features = fn_args.custom_config['features'])
model.compile(optimizer = 'adam', loss = 'mse')
model.fit(
train_dataset,
steps_per_epoch = fn_args.train_steps,
validation_data = eval_dataset,
validation_steps = fn_args.eval_steps
)
...
_input_fn definition
def _apply_preprocessing(raw_features, tft_layer):
transformed_features = tft_layer(raw_features)
return transformed_features
def _input_fn(
file_pattern,
data_accessor: tfx.components.DataAccessor,
tf_transform_output: tft.TFTransformOutput,
batch_size: int) -> tf.data.Dataset:
"""
Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
data_accessor: DataAccessor for converting input to RecordBatch.
tf_transform_output: A TFTransformOutput.
batch_size: representing the number of consecutive elements of returned
dataset to combine in a single batch
Returns:
A dataset that contains features where features is a
dictionary of Tensors.
"""
dataset = data_accessor.tf_dataset_factory(
file_pattern,
tfxio.TensorFlowDatasetOptions(batch_size = batch_size),
tf_transform_output.transformed_metadata.schema
)
transform_layer = tf_transform_output.transform_features_layer()
def apply_transform(raw_features):
return _apply_preprocessing(raw_features, transform_layer)
return dataset.map(apply_transform).repeat()
This differs from the _input_fn example given above as I was following the example in the next tfx tutorial found here: https://www.tensorflow.org/tfx/tutorials/tfx/penguin_tft#run_fn
Also for reference, there is no Target within the example data so there is no label_key to be passed to the tfxio.TensorFlowDatasetOptions object.
Error
When trying to run the Trainer component using a TFX InteractiveContext object I receive the following error.
ValueError: No gradients provided for any variable: ['dense_460/kernel:0', 'dense_460/bias:0', 'dense_461/kernel:0', 'dense_461/bias:0', 'dense_462/kernel:0', 'dense_462/bias:0', 'dense_463/kernel:0', 'dense_463/bias:0', 'dense_464/kernel:0', 'dense_464/bias:0', 'dense_465/kernel:0', 'dense_465/bias:0'].
From my own attempts to solve this I believe the problem lies in the way that an Autoencoder is trained. From the Autoencoder example linked here https://www.tensorflow.org/tutorials/generative/autoencoder the data is fitted like so:
autoencoder.fit(x_train, x_train,
epochs=10,
shuffle=True,
validation_data=(x_test, x_test))
therefore it stands to reason that the tf.Dataset should also mimic this behaviour and when testing with plain Tensor objects I have been able to recreate the error above and then solve it when adding the target to be the same as the training data in the .fit() function.
Things I've Tried So Far
Duplicating Train Dataset
model.fit(
train_dataset,
train_dataset,
steps_per_epoch = fn_args.train_steps,
validation_data = eval_dataset,
validation_steps = fn_args.eval_steps
)
Raises error due to Keras not accepting a 'y' value when a dataset is passed.
ValueError: `y` argument is not supported when using dataset as input.
Returning a dataset that is a tuple with itself
def _input_fn(...
dataset = data_accessor.tf_dataset_factory(
file_pattern,
tfxio.TensorFlowDatasetOptions(batch_size = batch_size),
tf_transform_output.transformed_metadata.schema
)
transform_layer = tf_transform_output.transform_features_layer()
def apply_transform(raw_features):
return _apply_preprocessing(raw_features, transform_layer)
dataset = dataset.map(apply_transform)
return dataset.map(lambda x: (x, x))
This raises an error where the keys from the features dictionary don't match the output of the model.
ValueError: Found unexpected keys that do not correspond to any Model output: dict_keys(['feature_string', ...]). Expected: ['dense_477']
At this point I switched to using the keras.model Autoencoder subclass and tried to add output keys to the Model using an output which I tried to create dynamically in the same way as the inputs.
def call(self, x):
inputs = [keras.layers.Input(shape = (1,), name = f) for f in x]
dense = keras.layers.concatenate(inputs)
encoded = self.encoder(dense)
decoded = self.decoder(encoded)
outputs = {}
for feature_name in x:
outputs[feature_name] = keras.layers.Dense(1, activation = 'sigmoid')(decoded)
return outputs
This raises the following error:
TypeError: Cannot convert a symbolic Keras input/output to a numpy array. This error may indicate that you're trying to pass a symbolic value to a NumPy call, which is not supported. Or, you may be trying to pass Keras symbolic inputs/outputs to a TF API that does not register dispatching, preventing Keras from automatically converting the API call to a lambda layer in the Functional Model.
I've been looking into solving this issue but am no longer sure if the data is being passed correctly and am beginning to think I'm getting side-tracked from the actual problem.
Questions
Has anyone managed to get an Autoencoder working when connected via TFX examples?
Did you alter the tf.Dataset or handled the examples in a different way to the _input_fn demonstrated?
So I managed to find an answer to this and wanted to leave what I found here in case anyone else stumbles onto a similar problem.
It turns out my feelings around the error were correct and the solution did indeed lie in how the tf.Dataset object was presented.
This can be demonstrated when I ran some code which simulated the incoming data using randomly generated tensors.
tensors = [tf.random.uniform(shape = (1, 82)) for i in range(739)]
# This gives us a list of 739 tensors which hold 1 value for 82 'features' simulating the dataset I had
dataset = tf.data.Dataset.from_tensor_slices(tensors)
dataset = dataset.map(lambda x : (x, x))
# This returns a dataset which marks the training set and target as the same
# which is what the Autoecnoder model is looking for
model.fit(dataset ...)
Following this I proceeded to do the same thing with the dataset returned by the _input_fn. Given that the tfx DataAccessor object returns a features_dict however I needed to combine the tensors in that dict together to create a single tensor.
This is how my _input_fn looks now:
def create_target_values(features_dict: Dict[str, tf.Tensor]) -> tuple:
value_tensor = tf.concat(list(features_dict.values()), axis = 1)
return (features_dict, value_tensor)
def _input_fn(
file_pattern,
data_accessor: tfx.components.DataAccessor,
tf_transform_output: tft.TFTransformOutput,
batch_size: int) -> tf.data.Dataset:
"""
Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
data_accessor: DataAccessor for converting input to RecordBatch.
tf_transform_output: A TFTransformOutput.
batch_size: representing the number of consecutive elements of returned
dataset to combine in a single batch
Returns:
A dataset that contains (features, target_tensor) tuple where features is a
dictionary of Tensors, and target_tensor is a single Tensor that is a concatenated tensor of all the
feature values.
"""
dataset = data_accessor.tf_dataset_factory(
file_pattern,
tfxio.TensorFlowDatasetOptions(batch_size = batch_size),
tf_transform_output.transformed_metadata.schema
)
dataset = dataset.map(lambda x: create_target_values(features_dict = x))
return dataset.repeat()

Using Gensim Fasttext model with LSTM nn in keras

I have trained fasttext model with Gensim over the corpus of very short sentences (up to 10 words). I know that my test set includes words that are not in my train corpus, i.e some of the words in my corpus are like "Oxytocin" "Lexitocin", "Ematrophin",'Betaxitocin"
given a new word in the test set, fasttext knows pretty well to generate a vector with high cosine-similarity to the other similar words in the train set by using the characters level n-gram
How do i incorporate the fasttext model inside a LSTM keras network without losing the fasttext model to just a list of vectors in the vocab? because then I won't handle any OOV even when fasttext do it well.
Any idea?
here the procedure to incorporate the fasttext model inside an LSTM Keras network
# define dummy data and precproces them
docs = ['Well done',
'Good work',
'Great effort',
'nice work',
'Excellent',
'Weak',
'Poor effort',
'not good',
'poor work',
'Could have done better']
docs = [d.lower().split() for d in docs]
# train fasttext from gensim api
ft = FastText(size=10, window=2, min_count=1, seed=33)
ft.build_vocab(docs)
ft.train(docs, total_examples=ft.corpus_count, epochs=10)
# prepare text for keras neural network
max_len = 8
tokenizer = tf.keras.preprocessing.text.Tokenizer(lower=True)
tokenizer.fit_on_texts(docs)
sequence_docs = tokenizer.texts_to_sequences(docs)
sequence_docs = tf.keras.preprocessing.sequence.pad_sequences(sequence_docs, maxlen=max_len)
# extract fasttext learned embedding and put them in a numpy array
embedding_matrix_ft = np.random.random((len(tokenizer.word_index) + 1, ft.vector_size))
pas = 0
for word,i in tokenizer.word_index.items():
try:
embedding_matrix_ft[i] = ft.wv[word]
except:
pas+=1
# define a keras model and load the pretrained fasttext weights matrix
inp = Input(shape=(max_len,))
emb = Embedding(len(tokenizer.word_index) + 1, ft.vector_size,
weights=[embedding_matrix_ft], trainable=False)(inp)
x = LSTM(32)(emb)
out = Dense(1)(x)
model = Model(inp, out)
model.predict(sequence_docs)
how to deal unseen text
unseen_docs = ['asdcs work','good nxsqa zajxa']
unseen_docs = [d.lower().split() for d in unseen_docs]
sequence_unseen_docs = tokenizer.texts_to_sequences(unseen_docs)
sequence_unseen_docs = tf.keras.preprocessing.sequence.pad_sequences(sequence_unseen_docs, maxlen=max_len)
model.predict(sequence_unseen_docs)

How to save Tensorflow encoder decoder model?

I followed this tutorial about building an encoder-decoder language translation model and built one for my native language.
Now I want to save it, deploy on cloud ML engine and make predictions with HTTP request.
I couldn't find a clear example on how to save this model,
I am new to ML and found TF save guide v confusing..
Is there a way to save this model using something like
tf.keras.models.save_model
Create the train saver after opening the session and after the training is done save the model:
with tf.Session() as sess:
saver = tf.train.Saver()
# Training of the model
save_path = saver.save(sess, "logs/encoder_decoder")
print(f"Model saved in path {save_path}")
You can save a Keras model in Keras's HDF5 format, see:
https://keras.io/getting-started/faq/#how-can-i-save-a-keras-model
You will want to do something like:
import tf.keras
model = tf.keras.Model(blah blah)
model.save('my_model.h5')
If you migrate to TF 2.0, it's more straightforward to build a model in tf.keras and deploy using the TF SavedModel format. This 2.0 tutorial shows using a pretrained tf.keras model, saving the model in SavedModel format, deploying to the cloud and then doing an HTTP request for a prediction:
https://www.tensorflow.org/beta/guide/saved_model
I know I am a little late but was having the same problem (see How do I save an encoder-decoder model with TensorFlow? for more details) and figured out a solution. It's a little hacky, but it works!
Step 1 - Saving your model
Save your tokenizer (if applicable). Then individually save the weights of the model you used to train your data (naming your layers helps here).
# Save the tokenizer
with open('tokenizer.pickle', 'wb') as handle:
pickle.dump(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)
# save the weights individually
for layer in model.layers:
weights = layer.get_weights()
if weights != []:
np.savez(f'{layer.name}.npz', weights)
Step 2 - reloading the weights
You will want to reload the tokenizer (as applicable) then load the weights you just saved. The loaded weights are in an npz format so can't be used directly, but the very short documentation will tell you everything you need to know about this file type https://numpy.org/doc/stable/reference/generated/numpy.savez.html
# load the tokenizer
with open('tokenizer.pickle', 'rb') as handle:
tokenizer = pickle.load(handle)
# load the weights
w_encoder_embeddings = np.load('encoder_embeddings.npz', allow_pickle=True)
w_decoder_embeddings = np.load('decoder_embeddings.npz', allow_pickle=True)
w_encoder_lstm = np.load('encoder_lstm.npz', allow_pickle=True)
w_decoder_lstm = np.load('decoder_lstm.npz', allow_pickle=True)
w_dense = np.load('dense.npz', allow_pickle=True)
Step 3 - Recreate the your training model and apply the weights
You'll want to re-run the code you used to create your model. In my case this was:
encoder_inputs = Input(shape=(None,), name="encoder_inputs")
encoder_embeddings = Embedding(vocab_size, embedding_size, mask_zero=True, name="encoder_embeddings")(encoder_inputs)
# Encoder lstm
encoder_lstm = LSTM(512, return_state=True, name="encoder_lstm")
encoder_outputs, state_h, state_c = encoder_lstm(encoder_embeddings)
# discard `encoder_outputs` and only keep the states.
encoder_states = [state_h, state_c]
# Set up the decoder, using `encoder_states` as initial state.
decoder_inputs = Input(shape=(None,), name="decoder_inputs")
# target word embeddings
decoder_embeddings = Embedding(vocab_size, embedding_size, mask_zero=True, name="decoder_embeddings")
training_decoder_embeddings = decoder_embeddings(decoder_inputs)
# decoder lstm
decoder_lstm = LSTM(512, return_sequences=True, return_state=True, name="decoder_lstm")
decoder_outputs, _, _ = decoder_lstm(training_decoder_embeddings,
initial_state=encoder_states)
decoder_dense = TimeDistributed(Dense(vocab_size, activation='softmax'), name="dense")
decoder_outputs = decoder_dense(decoder_outputs)
# While training, model takes input and traget words and outputs target strings
loaded_model = Model([encoder_inputs, decoder_inputs], decoder_outputs, name="training_model")
Now you can apply your saved weights to these layers! It takes a little bit of investigation which weight goes to which layer, but this is made a lot easier by naming your layers and inspecting your model layers with model.layers.
# set the weights of the model
loaded_model.layers[2].set_weights(w_encoder_embeddings['arr_0'])
loaded_model.layers[3].set_weights(w_decoder_embeddings['arr_0'])
loaded_model.layers[4].set_weights(w_encoder_lstm['arr_0'])
loaded_model.layers[5].set_weights(w_decoder_lstm['arr_0'])
loaded_model.layers[6].set_weights(w_dense['arr_0'])
Step 4 - Create the inference model
Finally, you can now create your inference model based on this training model! Again in my case this was:
encoder_model = Model(encoder_inputs, encoder_states)
# Redefine the decoder model with decoder will be getting below inputs from encoder while in prediction
decoder_state_input_h = Input(shape=(512,))
decoder_state_input_c = Input(shape=(512,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
inference_decoder_embeddings = decoder_embeddings(decoder_inputs)
decoder_outputs2, state_h2, state_c2 = decoder_lstm(inference_decoder_embeddings, initial_state=decoder_states_inputs)
decoder_states2 = [state_h2, state_c2]
decoder_outputs2 = decoder_dense(decoder_outputs2)
# sampling model will take encoder states and decoder_input(seed initially) and output the predictions(french word index) We dont care about decoder_states2
decoder_model = Model(
[decoder_inputs] + decoder_states_inputs,
[decoder_outputs2] + decoder_states2)
And voilĂ ! You can now make inferences using the previously trained model!

In Tensorflow, how to use a restored meta-graph if the meta graph was feeding with TFRecord input (without placeholders)

I trained a network with TFRecord input pipeline. In other words, there was no placeholders. Simple example would be:
input, truth = _get_next_batch() # TFRecord. `input` is not a tf.placeholder
net = Model(input)
net.set_loss(truth)
optimizer = tf...(net.loss)
Let's say, I acquired three files, ckpt-20000.meta, ckpt-20000.data-0000-of-0001, ckpt-20000.index. I understood that, later one can import the meta-graph using the .meta file and access tensors such as:
new_saver = tf.train.import_meta_graph('ckpt-20000.meta')
new_saver.restore(sess, 'ckpt-20000')
logits = tf.get_collection("logits")[0]
However, the meta-graph does not have a placeholder from the beginning in the pipeline. Is there a way that I can use meta-graph and query inference of an input?
For information, in a query application (or a script), I used to define a model with a placeholder and restored model weights (see below). I am wondering if I can just utilize the meta-graph without re-definition since it would be much more simple.
input = tf.placeholder(...)
net = Model(input)
tf.restore(sess, 'ckpt-2000')
lgt = sess.run(net.logits, feed_dict = {input:img})
You can build a graph that uses placeholder_with_default() for the inputs, so can use both TFRecord input pipeline as well as feed_dict{}.
An example:
input, truth = _get_next_batch()
_x = tf.placeholder_with_default(input, shape=[...], name='input')
_y = tf.placeholder_with_default(truth, shape-[...], name='label')
net = Model(_x)
net.set_loss(_y)
optimizer = tf...(net.loss)
Then during inference,
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
new_saver = tf.train.import_meta_graph('ckpt-20000.meta')
new_saver.restore(sess, 'ckpt-20000')
# Get the tensors by their variable name
input = loaded_graph.get_tensor_by_name('input:0')
logits = loaded_graph.get_tensor_by_name(...)
# Now you can feed the inputs to your tensors
lgt = sess.run(logits, feed_dict = {input:img})
In the above example, if you don't feed input, then the input will be read from the TFRecord input pipeline.
Is there a way to do it without placeholders at test though? It should be possible to re-use the graph with a new input pipeline without resorting to slow placeholders (i.e. the test dataset may be very large). placeholder_with_default is a suboptimal solution in that case.
The recommended way is saving two meta graphs. One is for Training/Validation/Testing, and the other one is for inference.
see Building a SavedModel
export_dir = ...
...
builder = tf.saved_model_builder.SavedModelBuilder(export_dir)
with tf.Session(graph=tf.Graph()) as sess:
...
builder.add_meta_graph_and_variables(sess,
[tag_constants.TRAINING],
signature_def_map=foo_signatures,
assets_collection=foo_assets)
...
# Add a second MetaGraphDef for inference.
with tf.Session(graph=tf.Graph()) as sess:
...
builder.add_meta_graph([tag_constants.SERVING])
...
builder.save()
The NMT tutorial also provides a detailed example about creating multiple graphs with shared variables: Neural Machine Translation (seq2seq) Tutorial-Building Training, Eval, and Inference Graphs
train_graph = tf.Graph()
eval_graph = tf.Graph()
infer_graph = tf.Graph()
with train_graph.as_default():
train_iterator = ...
train_model = BuildTrainModel(train_iterator)
initializer = tf.global_variables_initializer()
with eval_graph.as_default():
eval_iterator = ...
eval_model = BuildEvalModel(eval_iterator)
with infer_graph.as_default():
infer_iterator, infer_inputs = ...
infer_model = BuildInferenceModel(infer_iterator)
checkpoints_path = "/tmp/model/checkpoints"
train_sess = tf.Session(graph=train_graph)
eval_sess = tf.Session(graph=eval_graph)
infer_sess = tf.Session(graph=infer_graph)
train_sess.run(initializer)
train_sess.run(train_iterator.initializer)
for i in itertools.count():
train_model.train(train_sess)
if i % EVAL_STEPS == 0:
checkpoint_path = train_model.saver.save(train_sess, checkpoints_path, global_step=i)
eval_model.saver.restore(eval_sess, checkpoint_path)
eval_sess.run(eval_iterator.initializer)
while data_to_eval:
eval_model.eval(eval_sess)
if i % INFER_STEPS == 0:
checkpoint_path = train_model.saver.save(train_sess, checkpoints_path, global_step=i)
infer_model.saver.restore(infer_sess, checkpoint_path)
infer_sess.run(infer_iterator.initializer, feed_dict={infer_inputs: infer_input_data})
while data_to_infer:
infer_model.infer(infer_sess)