Related
I'm trying to work through the Google BERT tutorial in Google Colab, and am having a hard time following some of its steps.
Specifically, there is a function called create_model which reads like this:
def create_model(is_predicting, input_ids, input_mask, segment_ids, labels,
num_labels):
"""Creates a classification model."""
bert_module = hub.Module(
BERT_MODEL_HUB,
trainable=True)
bert_inputs = dict(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids)
bert_outputs = bert_module(
inputs=bert_inputs,
signature="tokens",
as_dict=True)
# Use "pooled_output" for classification tasks on an entire sentence.
# Use "sequence_outputs" for token-level output.
output_layer = bert_outputs["pooled_output"]
hidden_size = output_layer.shape[-1].value
# Create our own layer to tune for politeness data.
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
# Dropout helps prevent overfitting
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
# Convert labels into one-hot encoding
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
predicted_labels = tf.squeeze(tf.argmax(log_probs, axis=-1, output_type=tf.int32))
# If we're predicting, we want predicted labels and the probabiltiies.
if is_predicting:
return (predicted_labels, log_probs)
# If we're train/eval, compute loss between predicted and actual label
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, predicted_labels, log_probs)
This would be fine, but when I look for where the create_model function is called in the notebook, the origin of the input_ids, input_mask and segment_ids arguments is not clear to me.
This is where the function is referenced later on in the notebook:
def model_fn_builder(num_labels, learning_rate, num_train_steps,
num_warmup_steps):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)
# TRAIN and EVAL
if not is_predicting:
(loss, predicted_labels, log_probs) = create_model(
is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)
The problem here is that the features argument is not listed as an argument in the parent function, and it's not defined anywhere else in the notebook. So I'm not sure why it works, and even more importantly, I'm not sure what things like features['input_ids'] are supposed to represent. Without this being clear to me, it'll be difficult to make sense of what BERT actually does.
Thank you for your help.
I know how to train a network on a single GPU -> save a checkpoint -> later on load this checkpoint -> run benchmarks.
I can't figure how to do it when I train using multiple GPUs and using the new Data API.
Here is the 'normal' training code:
import tensorflow as tf
images_placeholder = tf.placeholder(tf.float32, shape=(None, image_size,
image_size, 1), name='input')
labels_placeholder = tf.placeholder(tf.int32, shape=(None))
embeddings = build_graph(images_placeholder)
loss = add_loss(embeddings, labels_placeholder)
embeddings = tf.identity(embeddings, 'embeddings')
Later on, when I want to benchmark:
with tf.Graph().as_default():
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
load_graph_def(model_path) # for example: d:\model.ckpt-0
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
images = benchmark_utils.load_data(paths_batch, image_size)
feed_dict = {images_placeholder: images}
predictions = sess.run(embeddings, feed_dict=feed_dict)
So now I want to train with multiple GPUs like so:
with tf.Graph().as_default(), tf.device('/cpu:0'):
dataset = tf.data.Dataset.from_tensor_slices((images_list, labels_list))
dataset = dataset.map(load_images)
dataset = dataset.shuffle(buffer_size=100)
dataset = dataset.batch(128)
dataset = dataset.repeat()
opt = tf.train.MomentumOptimizer(0.01, momentum=0.9, use_nesterov=True)
tower_grads = []
with tf.variable_scope(tf.get_variable_scope()):
for i in range(num_gpus):
with tf.device('/gpu:%d' % i):
with tf.name_scope('%s_%d' % (TOWER_NAME, i)) as scope:
image_batch, label_batch = dataset.iterator.get_next()
loss = tower_loss(scope, image_batch, label_batch)
What I can't figure out is how can I get the 'input' and 'embeddings' tensor when I want to benchmark the checkpoint.
How do I define for example the tensor called 'input' that should receive the images that should be evaluated ?
I'm guessing that somewhere in the multi-gpu code, I should define this images_placeholder like I defined in the single-gpu training.
Thanks for any advice!
I'm a newbie of Tensorflow. I have created CNNs of Tensorflow followingthis topic : A Guide to TF Layers: Building a Convolutional Neural Network
I want to create CNNs to using it for training traffic sign dataset. The dataset I use is : BelgiumTS. It includes two part, one part stores images for training, second parth stores images for testing. All of this is .ppm format.
I define a method to load the dataset :
def load_data(data_dir):
"""Load Data and return two numpy array"""
directories = [d for d in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir,d))]
list_labels = []
list_images = []
for d in directories:
label_dir = os.path.join(data_dir,d)
file_names = [os.path.join(label_dir,f) for f in os.listdir(label_dir) if f.endswith(".ppm")]
for f in file_names:
list_images.append(skimage.data.imread(f))
list_labels.append(int(d))
#resize images to 32x32 pixel
list_images32 = [skimage.transform.resize(image,(32,32)) for image in list_images]
#Got Error "Value passed to parameter 'input' has DataType float64 not in list of allowed values: float16, float32" if I don't add this line
list_images32 = tf.cast(list_images32,tf.float32)
images = np.array(list_images32)
labels = np.asarray(list_labels,dtype=int32)
return images,labels
And this is CNNs define :
def cnn_model_fn(features, labels, mode):
#Input layer
input_layer = tf.reshape(features["x"],[-1,32,32,1])
#Convolutional layer 1
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[5,5],
padding="same",
activation=tf.nn.relu)
#Pooling layer 1
pool1 = tf.layers.max_pooling2d(inputs=conv1,pool_size=[2,2],strides=2)
#Convolutional layer 2
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5,5],
padding="same",
activation=tf.nn.relu)
#Pooling layer 2
pool2 = tf.layers.max_pooling2d(inputs=conv2,pool_size=[2,2],strides=2)
#Dense layer
pool2_flat = tf.reshape(pool2,[-1,7*7*64])
dense = tf.layers.dense(inputs=pool2_flat,units=1024,activation=tf.nn.relu)
#Dropout
dropout = tf.layers.dropout(inputs=dense,rate=0.4,training=mode == tf.estimator.ModeKeys.TRAIN)
#Logits layer
logits = tf.layers.dense(inputs=dropout,units=10)
predictions = {
"classes": tf.argmax(input=logits,axis=1),
"probabilities": tf.nn.softmax(logits,name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode,predictions=predictions)
#Calculate Loss Value
onehot_labels = tf.one_hot(indices=tf.cast(labels,tf.int32),depth=10)
loss = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels,logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
loss = loss,
global_step = tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode,loss=loss,train_op=train_op)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels,predictions=predictions["classes"])}
return tf.estimator.EstimatorSpec(mode=mode,loss=loss,eval_metric_ops=eval_metric_ops)
I run my app in main :
def main(unused_argv):
# Load training and eval data
train_data_dir = "W:/Projects/AutoDrive/Training"
test_data_dir = "W:/Projects/AutoDrive/Testing"
images,labels = load_data(train_data_dir)
test_images,test_labels = load_data(test_data_dir)
# Create the Estimator
autoDrive_classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn, model_dir="/tmp/autoDrive_convnet_model")
# Set up logging for predictions
# Log the values in the "Softmax" tensor with label "probabilities"
tensors_to_log = {"probabilities": "softmax_tensor"}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=50)
# Train the model
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": images},
y=labels,
batch_size=100,
num_epochs=None,
shuffle=True)
autoDrive_classifier.train(
input_fn=train_input_fn,
steps=10000,
hooks=[logging_hook])
# Evaluate the model and print results
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": test_images},
y=test_labels,
num_epochs=1,
shuffle=False)
eval_results = autoDrive_classifier.evaluate(input_fn=eval_input_fn)
print(eval_results)
But when I run it, I got this error : ValueError: Argument must be a dense tensor ... got shape [4575, 32, 32, 3], but wanted [4575] Did I lost something ?
Finally, this is full code :
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import os
import skimage.data
import skimage.transform
import matplotlib
import matplotlib.pyplot as plt
tf.logging.set_verbosity(tf.logging.INFO)
def load_data(data_dir):
"""Load Data and return two lists"""
directories = [d for d in os.listdir(data_dir) if
os.path.isdir(os.path.join(data_dir,d))]
list_labels = []
list_images = []
for d in directories:
label_dir = os.path.join(data_dir,d)
file_names = [os.path.join(label_dir,f) for f in os.listdir(label_dir) if f.endswith(".ppm")]
for f in file_names:
list_images.append(skimage.data.imread(f))
list_labels.append(int(d))
list_images32 = [skimage.transform.resize(image,(32,32)) for image in list_images]
list_images32 = tf.cast(list_images32,tf.float32)
images = np.array(list_images32)
labels = np.asarray(list_labels,dtype=int32)
return images,labels
def cnn_model_fn(features, labels, mode):
#Input layer
input_layer = tf.reshape(features["x"],[-1,32,32,1])
#Convolutional layer 1
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[5,5],
padding="same",
activation=tf.nn.relu)
#Pooling layer 1
pool1 = tf.layers.max_pooling2d(inputs=conv1,pool_size=[2,2],strides=2)
#Convolutional layer 2
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5,5],
padding="same",
activation=tf.nn.relu)
#Pooling layer 2
pool2 = tf.layers.max_pooling2d(inputs=conv2,pool_size=[2,2],strides=2)
#Dense layer
pool2_flat = tf.reshape(pool2,[-1,7*7*64])
dense = tf.layers.dense(inputs=pool2_flat,units=1024,activation=tf.nn.relu)
#Dropout
dropout = tf.layers.dropout(inputs=dense,rate=0.4,training=mode == tf.estimator.ModeKeys.TRAIN)
#Logits layer
logits = tf.layers.dense(inputs=dropout,units=10)
predictions = {
"classes": tf.argmax(input=logits,axis=1),
"probabilities": tf.nn.softmax(logits,name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode,predictions=predictions)
#Calculate Loss Value
onehot_labels = tf.one_hot(indices=tf.cast(labels,tf.int32),depth=10)
loss = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels,logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
loss = loss,
global_step = tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode,loss=loss,train_op=train_op)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels,predictions=predictions["classes"])}
return tf.estimator.EstimatorSpec(mode=mode,loss=loss,eval_metric_ops=eval_metric_ops)
def main(unused_argv):
# Load training and eval data
train_data_dir = "W:/Projects/TSRecognition/Training"
test_data_dir = "W:/Projects/TSRecognition/Testing"
images,labels = load_data(train_data_dir)
test_images,test_labels = load_data(test_data_dir)
# Create the Estimator
TSRecognition_classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn, model_dir="/tmp/TSRecognition_convnet_model")
# Set up logging for predictions
# Log the values in the "Softmax" tensor with label "probabilities"
tensors_to_log = {"probabilities": "softmax_tensor"}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=50)
# Train the model
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": images},
y=labels,
batch_size=100,
num_epochs=None,
shuffle=True)
TSRecognition_classifier.train(
input_fn=train_input_fn,
steps=10000,
hooks=[logging_hook])
# Evaluate the model and print results
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": test_images},
y=test_labels,
num_epochs=1,
shuffle=False)
eval_results = TSRecognition_classifier.evaluate(input_fn=eval_input_fn)
print(eval_results)
if __name__ == "__main__":
tf.app.run()
Short answer for your code:
Get rid of the np.array and np.asarray calls in your load_data function. In particular, change:
list_images32 = [skimage.transform.resize(image,(32,32)) for image in list_images]
...to...
list_images32 = [skimage.transform.resize(image,(32,32)).astype(np.float32).tolist() for image in list_images]
...and return list_images32 AS IS from your load_data function. Don't "wrap it" with the np.asarray() call. The tolist() part of my suggestion is what is important. With the astype() call I'm just suggesting doing in numpy something you're doing in TensorFlow.
Simply getting rid of the np.asarray you have on list_labels should suffice for your labels.
The full answer for those that want to understand what's going on...
The "got shape...but wanted" exception is thrown from exactly one place in TensorFlow (tensor_util.py) and the reason is this function:
def _GetDenseDimensions(list_of_lists):
"""Returns the inferred dense dimensions of a list of lists."""
if not isinstance(list_of_lists, (list, tuple)):
return []
elif not list_of_lists:
return [0]
else:
return [len(list_of_lists)] + _GetDenseDimensions(list_of_lists[0])
It is trying to traverse what it assumes are nested plain Python lists or plain Python tuples; it doesn't know what to do with the Numpy array type it finds in your data structure because of the np.array/np.asarray calls.
I am new to tensorflow. I created a 204x4 matrix where the first 3 colums are feature and the last colum is the target. How do I need to convert the array so that tensorflow can train the data?
TRAINING_SET = np.asarray(seq[:llength])
VALIDATION_SET= np.asarray(seq[llength:llength+tlength])
TEST_SET = np.asarray(seq[llength+tlength:])
num_epochs=100
batch_size = 32
featureColumns = np.shape(TRAINING_SET)[1]
# define a function to get data as batch, you can use this function for test and validation also by simply changing shuffle=False and replacing tf.train.shuffle_batch as tf.train.batch
def data_input_fn(trainset, batch_size, num_epochs, toShuffle):
data_f = trainset[:, :(featureColumns-1)]
data_l = trainset[:, (featureColumns-1)]
data_f_single, data_l_single = tf.train.slice_input_producer([data_f, data_l], num_epochs=num_epochs, shuffle=toShuffle)
if toShuffle is True:
data_f_batch, data_l_batch = tf.train.shuffle_batch([data_f_single, data_l_single], batch_size=batch_size, capacity=400, min_after_dequeue=2*batch_size)
else:
data_f_batch, data_l_batch = tf.train.batch([data_f_single, data_l_single], batch_size=batch_size, capacity=400, min_after_dequeue=2*batch_size)
return data_f_batch, data_l_batch
def main():
# Specify that all features have real-value data
feature_columns = [tf.contrib.layers.real_valued_column("", dimension=3)]
# Build 3 layer DNN with 10, 20, 10 units respectively.
classifier = tf.contrib.learn.DNNClassifier(feature_columns=feature_columns,
hidden_units=[10, 20, 10],
n_classes=10,
model_dir="/tmp/iris_model")
# Fit model.
classifier.fit(input_fn=lambda: data_input_fn(TRAINING_SET, batch_size, num_epochs, True), steps=4000)
# Evaluate accuracy.
accuracy_test_score = classifier.evaluate(input_fn=lambda: data_input_fn(VALIDATION_SET, batch_size, num_epochs, False),
steps=1)["accuracy"]
accuracy_validation_score = classifier.evaluate(input_fn=lambda: data_input_fn(TEST_SET, batch_size, num_epochs, False),
steps=1)["accuracy"]
print ("\nValidation Accuracy: {0:0.2f}\nTest Accuracy: {1:0.2f}\n".format(accuracy_validation_score,accuracy_test_score))
# Classify two new flower samples.
def new_samples():
return np.array(
[[327,8,3],
[47,8,0]], dtype=np.float32)
predictions = list(classifier.predict_classes(input_fn=new_samples))
gives
TypeError: 'Tensor' object is not callable
You need use a function for the input_fn not just a tensor
TRAINING_SET = np.asarray(seq[:llength])
VALIDATION_SET= np.asarray(seq[llength:llength+tlength])
TEST_SET = np.asarray(seq[llength+tlength:])
num_epochs=100
batch_size = 32
# define a function to get data as batch, you can use this function for test and validation also by simply changing shuffle=False and replacing tf.train.shuffle_batch as tf.train.batch
def data_input_fn(trainset, batch_size, num_epochs):
data_f = trainset[:, :3]
data_l = trainset[:, 3]
data_f_single, data_l_single = tf.train.slice_input_producer([data_f, data_l], num_epochs=num_epochs, shuffle=True)
data_f_batch, data_l_batch = tf.train.shuffle_batch([data_f_single, data_l_single], batch_size=batch_size, capacity=400, min_after_dequeue=2*batch_size)
return data_f_batch, data_l_batch
# use this function as input_fn to fit
classifier.fit(input_fn=lambda: data_input_fn(TRAINING_SET, batch_size, num_epochs), steps=4000)
I have built a system that leverages Google ML Engine to train various text classifiers using a simple flat CNN architecture (borrowed from the excellent WildML post). I've also leveraged heavily the ML Engine trainer template which exists here - specifically using the Tensorflow core functions.
My issue is that while the model trains and learns parameters correctly, I cannot get the serialized export in the binary SavedModel format (i.e. - the .pb files) to maintain the learned weights. I can tell this by using the gcloud predict local API on the model export folder and each time it makes randomized predictions - leading me to believe that while the graph structure is being saved to the proto-buf format, the associated weights in the checkpoint file are not being carried over.
Here's the code for my run function:
def run(...):
# ... code to load and transform train/test data
with train_graph.as_default():
with tf.Session(graph=train_graph).as_default() as session:
# Features and label tensors as read using filename queue
features, labels = model.input_fn(
x_train,
y_train,
num_epochs=num_epochs,
batch_size=train_batch_size
)
# Returns the training graph and global step tensor
tf.logging.info("Train vocab size: {:d}".format(vocab_size))
train_op, global_step_tensor, cnn, train_summaries = model.model_fn(
model.TRAIN,
sequence_length,
num_classes,
label_values,
vocab_size,
embedding_size,
filter_sizes,
num_filters
)
tf.logging.info("Created simple training CNN with ({}) filter types".format(filter_sizes))
# Setup writers
train_summary_op = tf.summary.merge(train_summaries)
train_summary_dir = os.path.join(job_dir, "summaries", "train")
# Generate writer
train_summary_writer = tf.summary.FileWriter(train_summary_dir, session.graph)
# Initialize all variables
session.run(tf.global_variables_initializer())
session.run(tf.local_variables_initializer())
model_dir = os.path.abspath(os.path.join(job_dir, "model"))
if not os.path.exists(model_dir):
os.makedirs(model_dir)
saver = tf.train.Saver()
def train_step(x_batch, y_batch):
"""
A single training step
"""
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: 0.5
}
step, _, loss, accuracy = session.run([global_step_tensor, train_op, cnn.loss, cnn.accuracy],
feed_dict=feed_dict)
time_str = datetime.datetime.now().isoformat()
if step % 10 == 0:
tf.logging.info("{}: step {}, loss {:g}, acc {:g}".format(time_str, step, loss, accuracy))
# Return current step
return step
def eval_step(x_batch, y_batch, train_step, total_steps):
"""
Evaluates model on a dev set
"""
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: 1.0
}
step, loss, accuracy, scores, predictions = session.run([global_step_tensor, cnn.loss, cnn.accuracy, cnn.scores, cnn.predictions],
feed_dict=feed_dict)
# Get metrics
y_actual = np.argmax(y_batch, 1)
model_metrics = precision_recall_fscore_support(y_actual, predictions)
#print(scores)
time_str = datetime.datetime.now().isoformat()
print("\n---- EVAULATION ----")
avg_precision = np.mean(model_metrics[0], axis=0)
avg_recall = np.mean(model_metrics[1], axis=0)
avg_f1 = np.mean(model_metrics[2], axis=0)
print("{}: step {}, loss {:g}, acc {:g}, prec {:g}, rec {:g}, f1 {:g}".format(time_str, step, loss, accuracy, avg_precision, avg_recall, avg_f1))
print("Model metrics: ", model_metrics)
print("---- EVALUATION ----\n")
# Generate batches
batches = data_helpers.batch_iter(
list(zip(features, labels)), train_batch_size, num_epochs)
# Training loop. For each batch...
for batch in batches:
x_batch, y_batch = zip(*batch)
current_step = train_step(x_batch, y_batch)
if current_step % 20 == 0 or current_step == 1:
eval_step(x_eval, y_eval, current_step, total_steps)
# Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
print(model_dir)
trained_model = saver.save(session, os.path.join(job_dir, 'model') + "/model.ckpt", global_step=current_step)
print(trained_model)
print("Saved final model checkpoint to {}".format(trained_model))
# Only perform this if chief
if is_chief:
build_and_run_exports(trained_model, job_dir,
model.SERVING_INPUT_FUNCTIONS[model.TEXT],
sequence_length, num_classes, label_values,
vocab_size, embedding_size, filter_sizes,
num_filters, vocab_processor)
And my build_and_run_exports function:
def build_and_run_exports(...):
# Check if we export already exists - if so delete
export_dir = os.path.join(job_dir, 'export')
if os.path.exists(export_dir):
print("Export currently exists - going to delete:", export_dir)
shutil.rmtree(export_dir)
# Create exporter
exporter = tf.saved_model.builder.SavedModelBuilder(export_dir)
# Restore prediction graph
prediction_graph = tf.Graph()
with prediction_graph.as_default():
with tf.Session(graph=prediction_graph) as session:
# Get training data
features, inputs_dict = serving_input_fn()
# Setup inputs
inputs_info = {
name: tf.saved_model.utils.build_tensor_info(tensor)
for name, tensor in inputs_dict.iteritems()
}
# Load model
cnn = TextCNN(
sequence_length=sequence_length,
num_classes=num_classes,
vocab_size=vocab_size,
embedding_size=embedding_size,
filter_sizes=list(map(int, filter_sizes.split(","))),
num_filters=num_filters,
input_tensor=features)
# Restore model
saver = tf.train.Saver()
saver.restore(session, latest_checkpoint)
# Setup outputs
outputs = {
'logits': cnn.scores,
'probabilities': cnn.probabilities,
'predicted_indices': cnn.predictions
}
# Create output info
output_info = {
name: tf.saved_model.utils.build_tensor_info(tensor)
for name, tensor in outputs.iteritems()
}
# Setup signature definition
signature_def = tf.saved_model.signature_def_utils.build_signature_def(
inputs=inputs_info,
outputs=output_info,
method_name=sig_constants.PREDICT_METHOD_NAME
)
# Create graph export
exporter.add_meta_graph_and_variables(
session,
tags=[tf.saved_model.tag_constants.SERVING],
signature_def_map={
sig_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature_def
},
legacy_init_op=tf.saved_model.main_op.main_op()
)
# Export model
exporter.save()
And last, but not least, the TextCNN model:
class TextCNN(object):
"""
A CNN for text classification.
Uses an embedding layer, followed by a convolutional, max-pooling and softmax layer.
"""
def __init__(
self, sequence_length, num_classes, vocab_size,
embedding_size, filter_sizes, num_filters, l2_reg_lambda=0.0,
dropout_keep_prob=0.5, input_tensor=None):
# Setup input
if input_tensor != None:
self.input_x = input_tensor
self.dropout_keep_prob = tf.constant(1.0)
else:
self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
# Placeholders for input, output and dropout
self.input_y = tf.placeholder(tf.int32, [None, num_classes], name="input_y")
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0)
# Embedding layer
with tf.device('/cpu:0'), tf.name_scope("embedding"):
self.W = tf.Variable(
tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),
name="W")
self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x)
self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)
# Create a convolution + maxpool layer for each filter size
pooled_outputs = []
for i, filter_size in enumerate(filter_sizes):
with tf.name_scope("conv-maxpool-%s" % filter_size):
# Convolution Layer
filter_shape = [filter_size, embedding_size, 1, num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b")
conv = tf.nn.conv2d(
self.embedded_chars_expanded,
W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
# Maxpooling over the outputs
pooled = tf.nn.max_pool(
h,
ksize=[1, sequence_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="pool")
pooled_outputs.append(pooled)
# Combine all the pooled features
num_filters_total = num_filters * len(filter_sizes)
self.h_pool = tf.concat(pooled_outputs, 3)
self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
# Add dropout
with tf.name_scope("dropout"):
self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)
# Final (unnormalized) scores and predictions
with tf.name_scope("output"):
W = tf.get_variable(
"W",
shape=[num_filters_total, num_classes],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name="scores")
self.predictions = tf.argmax(self.scores, 1, name="predictions")
# CalculateMean cross-entropy loss
with tf.name_scope("loss"):
losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)
self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss
with tf.name_scope("probabilities"):
self.probabilities = tf.nn.softmax(logits=self.scores)
# Accuracy
with tf.name_scope("accuracy"):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
I'm hoping I'm just missing something simple in how I'm creating the TF graph / session and restoring stats.
Thank you in advance for your help!
This behavior is caused due to the behavior of tf.saved_model.main_op.main_op() which randomly initializes all of the variables in the graph (code). However, legacy_init_op happens after the variables are restored from the checkpoint (restore happens here followed by legacy_init_op here).
The solution is simply to not re-initialize all of the variables, for example, in your code:
from tensorflow.python.ops import variables
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import control_flow_ops
def my_main_op():
init_local = variables.local_variables_initializer()
init_tables = lookup_ops.tables_initializer()
return control_flow_ops.group(init_local, init_tables)
def build_and_run_exports(...):
...
# Create graph export
exporter.add_meta_graph_and_variables(
session,
tags=[tf.saved_model.tag_constants.SERVING],
signature_def_map={
sig_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature_def
},
legacy_init_op=my_main_op()
)
# Export model
exporter.save()