What's different between what evaluate prints and what it returns? - tensorflow

I wrote a custom metric for specificity and sensitivity and passed it as a metric to model.compile() . This is the code I wrote(I've copied most of it from tensorflow's website):
import tensorflow as tf
from tensorflow.keras import backend as K
class MulticlassSensitivity(tf.keras.metrics.Metric):
def __init__(self, name='Sensitivity', **kwargs):
super(MulticlassSensitivity, self).__init__(name=name, **kwargs)
self.true_positive = self.add_weight(name='tp', shape=(2,), initializer='zeros')
self.true_negative = self.add_weight(name='tn', shape=(2,), initializer='zeros')
self.false_positive = self.add_weight(name='fp', shape=(2,), initializer='zeros')
self.false_negative = self.add_weight(name='fn', shape=(2,), initializer='zeros')
self.sensitivity = self.add_weight(name='sensitivity', shape=(2,), initializer='zeros')
self.true_class = tf.Variable([False, True])
self.false_class = tf.Variable([True, False])
def update_state(self, y_true, y_pred, sample_weight=None):
threshold = tf.reduce_max(y_pred, axis=-1, keepdims=True)
y_pred = tf.logical_and(y_pred >= threshold, tf.abs(y_pred) > 1e-12)
y_true = tf.cast(y_true, tf.bool)
y_pred = tf.cast(y_pred, tf.bool)
values = tf.logical_and(tf.equal(y_true, True), tf.equal(y_pred, True))
values = tf.cast(values, self.dtype)
self.true_positive.assign_add(tf.reduce_sum(values, axis=0))
values = tf.logical_and(tf.equal(y_true, False), tf.equal(y_pred, True))
values = tf.cast(values, self.dtype)
self.false_positive.assign_add(tf.reduce_sum(values, axis=0))
values = tf.logical_and(tf.equal(y_true, False), tf.equal(y_pred, False))
values = tf.cast(values, self.dtype)
self.true_negative.assign_add(tf.reduce_sum(values, axis=0))
values = tf.logical_and(tf.equal(y_true, True), tf.equal(y_pred, False))
values = tf.cast(values, self.dtype)
self.false_negative.assign_add(tf.reduce_sum(values, axis=0))
def result(self):
self.sensitivity.assign(tf.math.divide_no_nan(self.true_positive,tf.math.add(self.true_positive,self.false_negative)))
return self.sensitivity[1]
def get_config(self):
"""Returns the config"""
config = {
'num_classes':2
}
base_config = super().get_config()
return {**base_config, **config}
def reset_states(self):
reset_value = tf.zeros(2, dtype=self.dtype)
K.batch_set_value([(v, reset_value) for v in self.variables])
class MulticlassSpecificity(tf.keras.metrics.Metric):
def __init__(self, name='Specificity', **kwargs):
super(MulticlassSpecificity, self).__init__(name=name, **kwargs)
self.true_positive = self.add_weight(name='tp', shape=(2,), initializer='zeros')
self.true_negative = self.add_weight(name='tn', shape=(2,), initializer='zeros')
self.false_positive = self.add_weight(name='fp', shape=(2,), initializer='zeros')
self.false_negative = self.add_weight(name='fn', shape=(2,), initializer='zeros')
self.specificity = self.add_weight(name='specificity', shape=(2,), initializer='zeros')
self.true_class = tf.Variable([False, True])
self.false_class = tf.Variable([True, False])
def update_state(self, y_true, y_pred, sample_weight=None):
threshold = tf.reduce_max(y_pred, axis=-1, keepdims=True)
y_pred = tf.logical_and(y_pred >= threshold, tf.abs(y_pred) > 1e-12)
y_true = tf.cast(y_true, tf.bool)
y_pred = tf.cast(y_pred, tf.bool)
values = tf.logical_and(tf.equal(y_true, True), tf.equal(y_pred, True))
values = tf.cast(values, self.dtype)
self.true_positive.assign_add(tf.reduce_sum(values, axis=0))
values = tf.logical_and(tf.equal(y_true, False), tf.equal(y_pred, True))
values = tf.cast(values, self.dtype)
self.false_positive.assign_add(tf.reduce_sum(values, axis=0))
values = tf.logical_and(tf.equal(y_true, False), tf.equal(y_pred, False))
values = tf.cast(values, self.dtype)
self.true_negative.assign_add(tf.reduce_sum(values, axis=0))
values = tf.logical_and(tf.equal(y_true, True), tf.equal(y_pred, False))
values = tf.cast(values, self.dtype)
self.false_negative.assign_add(tf.reduce_sum(values, axis=0))
def result(self):
self.specificity.assign(tf.math.divide_no_nan(self.true_negative,tf.math.add(self.true_negative, self.false_positive)))
return self.specificity[1]
def get_config(self):
"""Returns the config"""
config = {
'num_classes':2
}
base_config = super().get_config()
return {**base_config, **config}
def reset_states(self):
reset_value = tf.zeros(2, dtype=self.dtype)
K.batch_set_value([(v, reset_value) for v in self.variables])
when I evaluate the model using model.evaluate(), this is what I get:
Test on test set:
86/86 [==============================] - 6s 59ms/step - loss: 0.2944 - categorical_accuracy: 0.4465 - f1_score: 0.4415 - Specificity: 0.2740 - Sensitivity: 0.8057
[0.29457294940948486,
0.4528070390224457,
array([0.4514866 , 0.45412117], dtype=float32),
0.33382710814476013,
0.6994695663452148]

when invoking evaluate, pass return_dict=True.
Evaluate will return a label for each value and you will know what they are.
Something like:
print(m.evaluate(x,y,return_dict=True))

Related

Unhashable Type TypeError: Tensors are unhashable. Instead, use tensor.ref() as the key

I am trying to implement a custom variational autoencoder. Following is the code to reproduce.
epsilon_std = 1.0
vx = tf.keras.layers.Input(batch_shape=(None, max_length_output), name='vae_enc_in')
vx_emb = tf.keras.layers.Embedding(
vocab_tar_size,
embedding_dim,
input_length=max_length_output,
name='vae_enc_emb'
)(vx)
vxbi = tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(units, return_sequences=False, recurrent_dropout=0.2, name='vae_enc_lstm'), merge_mode='concat'
)(vx_emb)
vx_drop = tf.keras.layers.Dropout(0.2, name='vae_enc_drop')(vxbi)
vx_dense = tf.keras.layers.Dense(units, activation='linear', name='vae_enc_dense')(vx_drop)
vx_elu = tf.keras.layers.ELU(name='vae_enc_elu')(vx_dense)
vx_drop1 = tf.keras.layers.Dropout(0.2, name='vae_enc_drop2')(vx_elu)
z_mean = tf.keras.layers.Dense(20, name='vae_enc_dense2')(vx_drop1)
z_log_var = tf.keras.layers.Dense(20, name='vae_enc_dense3')(vx_drop1)
def sampling(args):
z_mean, z_log_var = args
epsilon = tf.random.normal(shape=(BATCH_SIZE, 20), mean=0.,
stddev=epsilon_std)
return z_mean + tf.math.exp(z_log_var / 2) * epsilon
z = tf.keras.layers.Lambda(sampling, output_shape=(20,), name='vae_lambda')([z_mean, z_log_var])
repeated_context = tf.keras.layers.RepeatVector(max_length_output, name='vae_repeat')
decoder_h = tf.keras.layers.LSTM(units, return_sequences=True, recurrent_dropout=0.2, name='vae_dec_lstm')
decoder_mean = tf.keras.layers.TimeDistributed(
tf.keras.layers.Dense(vocab_tar_size, activation='linear', name='vae_dec_lstm'),
name='vae_dec_time_dist'
)
h_decoded = decoder_h(repeated_context(z))
x_decoded_mean = decoder_mean(h_decoded)
def zero_loss(y_true, y_pred):
print("ZERO LOSS")
return tf.zeros_like(y_pred)
And then creating a custom vae layer
class VAELayer(tf.keras.layers.Layer):
def __init__(self, batch_size, max_len, **kwargs):
self.is_placeholder = True
super(VAELayer, self).__init__(**kwargs)
self.target_weights = tf.constant(np.ones((batch_size, max_len)), tf.float32)
def vae_loss(self, x, x_decoded_mean):
#xent_loss = K.sum(metrics.categorical_crossentropy(x, x_decoded_mean), axis=-1)
labels = tf.cast(x, tf.int32)
xent_loss = tf.math.reduce_sum(
tfa.seq2seq.sequence_loss(
x_decoded_mean,
labels,
weights=self.target_weights,
average_across_timesteps=False,
average_across_batch=False
),
axis=-1
)
#softmax_loss_function=softmax_loss_f), axis=-1)#, for sampled softmax
kl_loss = - 0.5 * tf.math.reduce_sum(1 + z_log_var - tf.math.square(z_mean) - tf.math.exp(z_log_var), axis=-1)
return tf.math.reduce_mean(xent_loss + kl_loss)
def call(self, inputs):
x = inputs[0]
x_decoded_mean = inputs[1]
print(x.shape, x_decoded_mean.shape)
loss = self.vae_loss(x, x_decoded_mean)
print("Adding loss")
self.add_loss(loss, inputs=inputs)
print("Returning ones like")
return tf.ones_like(x)
I compiled it successfully and also produced a test output by calling the model. But when i try to train, it, It produces the error
TypeError: Tensors are unhashable. (KerasTensor(type_spec=TensorSpec(shape=(), dtype=tf.float32, name=None), name='tf.math.reduce_sum_25/Sum:0', description="created by layer 'tf.math.reduce_sum_25'"))Instead, use tensor.ref() as the key.
Following is the code for compiling and fitting the model
loss_layer = VAELayer(BATCH_SIZE, max_length_output)([vx, x_decoded_mean])
vae = tf.keras.Model(vx, [loss_layer], name='VariationalAutoEncoderLayer')
opt = tf.keras.optimizers.Adam(lr=0.01) #SGD(lr=1e-2, decay=1e-6, momentum=0.9, nesterov=True)
vae.compile(optimizer=opt, loss=[zero_loss])
def vae_sentence_generator():
for ip, tg in train_dataset:
yield tg.numpy()
vae.fit(vae_sentence_generator(steps_per_epoch=steps_per_epoch, epochs=10))

loss value don't change(neural network)

I implemented neural network model with tensorflow(version 2.0) on Python3
I don't know the code works properly because loss value don't almost change.
The code is wrong
or
The model is too many parameter(this mean that the code is right)?
Please tell me whether the code works properly.
The following is the code.
import tensorflow as tf
import numpy as np
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
class Model(object):
def __init__(self):
self.var_list = []
self.w_layer1 = tf.Variable(tf.random.normal(shape=[28*28, 1000], stddev=0.3,dtype=tf.float64))
self.b_layer1 = tf.Variable(tf.random.normal(shape=[1,], stddev=0.1,dtype=tf.float64))
self.w_layer2 = tf.Variable(tf.random.normal(shape=[1000, 100], stddev=0.3,dtype=tf.float64))
self.b_layer2 = tf.Variable(tf.random.normal(shape=[1,], stddev=0.1,dtype=tf.float64))
self.w_layer3 = tf.Variable(tf.random.normal(shape=[100, 100], stddev=0.3,dtype=tf.float64))
self.b_layer3 = tf.Variable(tf.random.normal(shape=[1,], stddev=0.1,dtype=tf.float64))
self.w_layer4 = tf.Variable(tf.random.normal(shape=[100, 10], stddev=0.3,dtype=tf.float64))
self.b_layer4 = tf.Variable(tf.random.normal(shape=[1,], stddev=0.1,dtype=tf.float64))
self.var_list.append(self.w_layer1)
self.var_list.append(self.b_layer1)
self.var_list.append(self.w_layer2)
self.var_list.append(self.b_layer2)
self.var_list.append(self.w_layer3)
self.var_list.append(self.b_layer3)
self.var_list.append(self.w_layer4)
self.var_list.append(self.b_layer4)
def __call__(self, x):
return self.w*x+self.b
def dense_layer(self, inputs, w, b):
z = tf.matmul(inputs, w) + b
return tf.nn.relu(z)
def output_layer(self, inputs, w, b):
return tf.matmul(inputs, w) + b
def flattend(self, inputs):
inputs = tf.cast(inputs, tf.float64)
return tf.reshape(inputs, [-1, 28*28])
def loss(self, outputs, targets):
predicted_y = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = outputs, labels = targets))
return predicted_y
def grad(self, x, target_y):
with tf.GradientTape() as tape:
tape.watch(self.var_list)
loss_value = self.loss(self.run(x), target_y)
return tape.gradient(loss_value, self.var_list)
def run(self, inputs):
inputs = self.flattend(inputs)
layer1 = self.dense_layer(inputs, self.w_layer1, self.b_layer1)
layer2 = self.dense_layer(layer1, self.w_layer2, self.b_layer2)
layer3 = self.dense_layer(layer2, self.w_layer3, self.b_layer3)
layer4 = self.output_layer(layer3, self.w_layer4, self.b_layer4)
return layer4
def optimizer(self):
opt = tf.keras.optimizers.SGD(learning_rate=0.01)
return opt
def make_onehot_labels(labels):
depth = 10
one_hot_labels = tf.one_hot(labels, depth)
return one_hot_labels
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
train_images = train_images/255.0
test_images = test_images/255.0
train_labels = make_onehot_labels(train_labels)
test_labels = make_onehot_labels(test_labels)
ds_train_x = tf.data.Dataset.from_tensor_slices(train_images)
ds_train_y = tf.data.Dataset.from_tensor_slices(train_labels)
train_dataset = tf.data.Dataset.zip((ds_train_x, ds_train_y)).shuffle(1000).repeat().batch(300)
train_images = tf.convert_to_tensor(train_images)
train_labels = tf.convert_to_tensor(train_labels)
test_images = tf.convert_to_tensor(test_images)
test_labels = tf.convert_to_tensor(test_labels)
count = 1
model = Model()
opt = model.optimizer()
print(model.loss(model.run(train_images), train_labels))
for epoch in range(10):
for data in train_dataset:
if count%200==0:
print(model.loss(model.run(train_images), train_labels))
#print(grads)
break
grads = model.grad(data[0], data[1])
opt.apply_gradients(zip(grads, model.var_list))
count = count+1
the following is the result which the above code executed
tf.Tensor(184.81706096058622, shape=(), dtype=float64)
tf.Tensor(1.2104797483683287, shape=(), dtype=float64)
tf.Tensor(1.2104797483683287, shape=(), dtype=float64)
tf.Tensor(1.2104797483683287, shape=(), dtype=float64)
tf.Tensor(1.2104797483683287, shape=(), dtype=float64)
The issue is in the following part
for epoch in range(10):
for data in train_dataset:
if count%200==0:
print(model.loss(model.run(train_images), train_labels))
#print(grads)
break
grads = model.grad(data[0], data[1])
opt.apply_gradients(zip(grads, model.var_list))
count = count+1
You have a break within the if condition, meaning you break your training loop (and restart a new epoch) when you hit count%200==0. Remove the break and you'll see the error rate going down.
To elaborate on the issue, as soon as you reach count==200 you break the loop, and the counter does not increase anymore so you're basically not reaching anything beyond that if condition after 200 iterations ( this anything beyond includes your gradient application).

Why this model has not yet been built?

my code similar like this:
class sub_sub_Block(tf.keras.Model):
def __init__(self, kernel_size, filters):
super(sub_sub_Block, self).__init__(name='')
if req==True:
self.layer1 = tf.keras.layers.Conv2D(filters1, (1, 1))
if inf==True:
self.layer2 = tf.keras.layers.Conv2D(filters, (1, 1))
def call(self, input_tensor, training=False):
x = self.layer1(input_tensor)
x = self.layer2(x)
return tf.nn.relu(x)
class sub_Block(tf.keras.Model):
def __init__(self, kernel_size, filters):
super(sub_Block, self).__init__(name='')
if req==True:
self.layer1 = sub_sub_Block(filters1, (1, 1))
if inf==True:
self.layer2 = tf.keras.layers.Conv2D(filters, (1, 1))
def call(self, input_tensor, training=False):
x = self.layer1(input_tensor)
x = self.layer2(x)
return tf.nn.relu(x)
class Block(tf.keras.Model):
def __init__(self, kernel_size, filters):
super(Block, self).__init__(name='')
if req==True:
self.layer1 = subBlock(filters1, (1, 1))
if inf==True:
self.layer2 = tf.keras.layers.Conv2D(filters, (1, 1))
def call(self, input_tensor, training=False):
x = self.layer1(input_tensor)
x = self.layer2(x)
return tf.nn.relu(x)
block = Block(1, [1, 2, 3])
print(block(tf.zeros([1, 2, 3, 3])))
print(block.summary())
error messages is :
ValueError: This model has not yet been built. Build the model first by calling build() or calling fit() with some data, or specify a
n input_shape argument in the first layer(s) for automatic build.
how to solve this problem?
does tf.keras.Model can't in other tf.keras.Model block?

Attention layer output shape issue

I have been using BiLSTMs to classify each word in sentences and my input is n_sentences, max_sequence_length, classes. Recently, I have been trying to use this attention layer: https://www.kaggle.com/takuok/bidirectional-lstm-and-attention-lb-0-043
class Attention(Layer):
def __init__(self, step_dim,
W_regularizer=None, b_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, **kwargs):
self.supports_masking = True
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
self.step_dim = step_dim
self.features_dim = 0
super(Attention, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 3
self.W = self.add_weight((input_shape[-1],),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
self.features_dim = input_shape[-1]
if self.bias:
self.b = self.add_weight((input_shape[1],),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
else:
self.b = None
self.built = True
def compute_mask(self, input, input_mask=None):
return None
def call(self, x, mask=None):
features_dim = self.features_dim
step_dim = self.step_dim
eij = K.reshape(K.dot(K.reshape(x, (-1, features_dim)),
K.reshape(self.W, (features_dim, 1))), (-1, step_dim))
if self.bias:
eij += self.b
eij = K.tanh(eij)
a = K.exp(eij)
if mask is not None:
a *= K.cast(mask, K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
a = K.expand_dims(a)
weighted_input = x * a
return K.sum(weighted_input, axis=1)
def compute_output_shape(self, input_shape):
return input_shape[0], self.features_dim
My output needs to be (samples, steps, features) or I get this
ValueError: Error when checking target: expected dense_2 to have 2 dimensions, but got array with shape (656, 109, 2)
So I switched:
return input_shape[0], self.features_dim
to
return input_shape[0], self.step_dim, self.features_dim
Doing so I get another error:
InvalidArgumentError: Incompatible shapes: [32,109] vs. [32]
[[{{node metrics/acc/Equal}}]]
What do I need to modify to actually use the attention layer on my sentences ?
Are u using SeqSelfAttention?
I faced the same issue and instead of SeqSelfAttention I used SeqWeightedAttention - and it solved my problem.
model.add(SeqWeightedAttention())

How to create tf.RunMetadata and add it to writer when using tf.contrib.learn Module

Now I use tf.contrib.learn.Experiment, Estimator, learn_runner to help training the model. When run the learn_runner, it will implicitly create a tf.MoniteredSession and call its run() function, so I cannot add arguments options and run_metadata to run() function.
So how can I add options and run_metadata args to run function and call summary_writer.add_run_metadata()?
I am searching for a long time on net. But no use. Please help or try to give some ideas how to achieve this.
this is the code:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
from tensorflow.contrib import slim, training, learn
tf.logging.set_verbosity(tf.logging.DEBUG)
def variable_summaries(var):
with tf.name_scope(var.name.split(':')[0]):
mean = tf.reduce_mean(var)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.add_to_collection('variable_summaries', tf.summary.scalar('mean', mean))
tf.add_to_collection('variable_summaries', tf.summary.scalar('stddev', stddev))
tf.add_to_collection('variable_summaries', tf.summary.scalar('max', tf.reduce_max(var)))
tf.add_to_collection('variable_summaries', tf.summary.scalar('min', tf.reduce_min(var)))
tf.add_to_collection('variable_summaries', tf.summary.histogram('histogram', var))
def model_fn(features, labels, mode, params):
id_ts = tf.get_collection('id_ts')[0]
fc1 = slim.fully_connected(features, 10, tf.nn.relu, scope='fc1')
variable_summaries(fc1)
fc2 = slim.fully_connected(fc1, 2, None, scope='fc2')
variable_summaries(fc2)
for i in tf.trainable_variables():
variable_summaries(i)
logits = fc2
prob = tf.nn.softmax(logits)
predictions = tf.argmax(logits, axis=1)
summay_op = tf.summary.merge_all('variable_summaries')
scaffold = tf.train.Scaffold(summary_op=summay_op)
if mode == learn.ModeKeys.TRAIN or mode == learn.ModeKeys.EVAL:
onehot_labels = slim.one_hot_encoding(labels, 2)
loss = tf.losses.softmax_cross_entropy(logits=logits, onehot_labels=onehot_labels)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, slim.get_global_step())
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(labels, predictions),
'auc': tf.metrics.auc(labels, predictions),
'precision': tf.metrics.precision(labels, predictions),
'recall': tf.metrics.recall(labels, predictions),
}
return learn.ModelFnOps(mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
scaffold=scaffold)
elif mode == learn.ModeKeys.INFER:
return learn.ModelFnOps(mode=mode, predictions={'prob': prob,
'fc1': fc1,
'fc2': fc2,
'id': id_ts})
def train_input_fn():
fn = tf.train.string_input_producer(['data.csv'])
reader = tf.TextLineReader()
key, value = reader.read(fn)
data_ts = tf.decode_csv(value, [[0.], [0.], [0.], [0.]], field_delim=',')
batch_ts = tf.train.shuffle_batch(data_ts, 10, 1000, 10)
id_ts = batch_ts[0]
tf.add_to_collection('id_ts', id_ts)
features_ts = tf.concat([tf.reshape(batch_ts[1], [-1, 1]), tf.reshape(batch_ts[2], [-1, 1])], axis=1)
labels_ts = tf.cast(batch_ts[3], tf.int32)
return features_ts, labels_ts
def eval_input_fn():
fn = tf.train.string_input_producer(['data.csv'])
reader = tf.TextLineReader()
key, value = reader.read(fn)
data_ts = tf.decode_csv(value, [[0.], [0.], [0.], [0.]], field_delim=',')
batch_ts = tf.train.batch(data_ts, 10, 1000)
id_ts = batch_ts[0]
tf.add_to_collection('id_ts', id_ts)
features_ts = tf.concat([tf.reshape(batch_ts[1], [-1, 1]), tf.reshape(batch_ts[2], [-1, 1])], axis=1)
labels_ts = tf.cast(batch_ts[3], tf.int32)
return features_ts, labels_ts
def run_experiment(_):
session_config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True),
log_device_placement=False)
run_config = learn.RunConfig(save_checkpoints_steps=100,
model_dir='model_dir',
session_config=session_config,
keep_checkpoint_max=2)
hparams = training.HParams(train_steps=1000)
learn.learn_runner.run(experiment_fn=create_experiment_fn,
schedule='train_and_evaluate',
run_config=run_config,
hparams=hparams)
def create_experiment_fn(run_config, hparams):
estimator = get_estimator_fn(config=run_config, params=hparams)
return learn.Experiment(estimator=estimator,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
train_steps=hparams.train_steps)
def get_estimator_fn(config, params):
return learn.Estimator(model_fn=model_fn,
model_dir=config.model_dir,
config=config,
params=params)
if __name__ == '__main__':
tf.app.run(main=run_experiment)