How to input csv data in an autoencoder - tensorflow

I am using the code below that implements an autoencoder. How can I feed the autoencoder with data for training and testing?
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
class Autoencoder(object):
def __init__(self, n_input, n_hidden, transfer_function=tf.nn.softplus, optimizer = tf.train.AdamOptimizer()):
self.n_input = n_input
self.n_hidden = n_hidden
self.transfer = transfer_function
network_weights = self._initialize_weights()
self.weights = network_weights
# model
self.x = tf.placeholder(tf.float32, [None, self.n_input])
self.hidden = self.transfer(tf.add(tf.matmul(self.x, self.weights['w1']), self.weights['b1']))
self.reconstruction = tf.add(tf.matmul(self.hidden, self.weights['w2']), self.weights['b2'])
# cost
self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction, self.x), 2.0))
self.optimizer = optimizer.minimize(self.cost)
init = tf.global_variables_initializer()
self.sess = tf.Session()
self.sess.run(init)
def _initialize_weights(self):
all_weights = dict()
all_weights['w1'] = tf.get_variable("w1", shape=[self.n_input, self.n_hidden],
initializer=tf.contrib.layers.xavier_initializer())
all_weights['b1'] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float32))
all_weights['w2'] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype=tf.float32))
all_weights['b2'] = tf.Variable(tf.zeros([self.n_input], dtype=tf.float32))
return all_weights
def partial_fit(self, X):
cost, opt = self.sess.run((self.cost, self.optimizer), feed_dict={self.x: X})
return cost
def calc_total_cost(self, X):
return self.sess.run(self.cost, feed_dict = {self.x: X})
def transform(self, X):
return self.sess.run(self.hidden, feed_dict={self.x: X})
def generate(self, hidden = None):
if hidden is None:
hidden = self.sess.run(tf.random_normal([1, self.n_hidden]))
return self.sess.run(self.reconstruction, feed_dict={self.hidden: hidden})
def reconstruct(self, X):
return self.sess.run(self.reconstruction, feed_dict={self.x: X})
def getWeights(self):
return self.sess.run(self.weights['w1'])
def getBiases(self):
return self.sess.run(self.weights['b1'])
# I instantiate the class autoencoder, 5 is the dimension of a raw input,
2 is the dimension of the hidden layer
autoencoder = Autoencoder(5, 2, transfer_function=tf.nn.softplus, optimizer
= tf.train.AdamOptimizer())
# I prepare my data**
IRIS_TRAINING = "C:\\Users\\Desktop\\iris_training.csv"
#Feeding data to Autoencoder ???
Train and Test ??
How can I train this model with csv file data? I think I need to run the following instruction as _, c = sess.run([optimizer, cost], feed_dict={self.x: batch_ofd_ata}) inside a loop of epochs, but I am struggling with it.

Check out Stanford CS20SI's tutorial.
https://github.com/chiphuyen/tf-stanford-tutorials/blob/master/examples/05_csv_reader.py

Related

BERT WITH BiDirectional LSTM loss is not decreasing

I am trying port Keras Semantic Similarity example to Pytorch Lightening. I followed all the necessary steps from keras example but somehow loss not at all decreasing. i am just curious where did i make mistake?
Here is the colab link
PyTorch Model
MAX_LENGTH = 128
from transformers import BertModel,AdamW
import pytorch_lightning as pl
class BertWithLSTM(pl.LightningModule):
def __init__(self,train_path,test_path, val_path,num_classes):
super().__init__()
self.bert = BertModel.from_pretrained("bert-base-uncased", output_attentions=True)
self.bert.trainable = False
self.lstm = nn.LSTM(input_size=768, hidden_size=64, batch_first=True, bidirectional=True, dropout=0.3)
self.dropout = nn.Dropout(p=0.3)
self.output = nn.Linear(MAX_LENGTH*2,3)
self.num_classes = num_classes
self.train_path = train_path
self.test_path = test_path
self.val_path = val_path
self.criterion = nn.CrossEntropyLoss()
def train_dataloader(self):
train_dataset = CustomDataset(csv_path=self.train_path)
return DataLoader(dataset=train_dataset, batch_size=32, shuffle=False)
def test_dataloader(self):
test_dataset = CustomDataset(csv_path=self.test_path)
return DataLoader(dataset=test_dataset)
def val_dataloader(self):
val_dataset = CustomDataset(csv_path=self.val_path)
return DataLoader(dataset=val_dataset, batch_size=16, shuffle=True)
def training_step(self,train_batch, batch_idx):
input_ids,attention_mask,token_type_ids,labels = train_batch
output = self(input_ids, attention_mask, token_type_ids, labels)
loss = F.cross_entropy(output, labels)
self.log("training_loss", loss, on_epoch=True, prog_bar=True, logger=True)
return loss
def validation_step(self,val_batch, batch_idx):
input_ids,attention_mask,token_type_ids,labels = val_batch
output = self(input_ids, attention_mask, token_type_ids, labels)
loss = F.cross_entropy(output, labels)
self.log("val_loss", loss, on_epoch=True, prog_bar=True, logger=True)
def forward(self, input_ids,attention_mask,token_type_ids, label ):
bert_output = self.bert(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
sequence_output = bert_output.last_hidden_state
output, (hidden_state, c_n) = self.lstm(sequence_output)
avg_pool = torch.mean(output, dim=1)
max_pool,indices = torch.max(output,dim=1)
output = torch.cat((avg_pool,max_pool),1)
output = self.output(output)
return output
def configure_optimizers(self):
optimizer = torch.optim.AdamW(self.parameters())
return optimizer

When I use transformer model to train a translator, why it cannot convert y to a tensor

def encoder():
input_layer = Input(batch_shape=(None, 13, 128))
h= layer(input_layer)
h= Masking(mask_value=0.0)(h)
h, hidden_layer, cell_layer = LSTM(512, return_state=True)(h)
model = Model(inputs = input_layer, outputs = [hidden_layer, cell_layer])
return model
model=encoder()
model.summary()
class Decoder(Model):
def __init__(self):
super(Decoder, self).__init__()
self.embedding_layer = Embedding(input_dim=max_tokens+1, output_dim=128, mask_zero=True)
self.lstm_layer = LSTM(512,
return_state=True, return_sequences=True)
self.dense_layer = Dense(units=max_tokens+1)
def call(self,inputer,hidden_layer=None,cell_layer=None):
x=self.embedding_layer(inputer)
if hidden_layer!=None and cell_layer!=None:
x, h, c = self.lstm_layer(x, initial_state=[hidden_layer, cell_layer])
else:
x, h, c = self.lstm_layer(x)
x=self.dense_layer(x)
return x,h,c
decoder=Decoder()
for eng,germ in train.take(1):
y,hidden,cell = decoder(germ)
#tf.function
def loss_fn(en_input, germ_input, germ_output, loss):
with tf.GradientTape() as tape:
enc_hidden_s, enc_cell_s = model(en_input)
dec_output, dec_hidden_s, dec_cell_s = decoder(germ_input, enc_hidden_s,enc_cell_s)
loss_value = loss(germ_output, dec_output)
return loss_value, tape.gradient(loss_value, variables)
def fit_german_shape(german):
input_data = german[:,:-1]
output_data = german[:,1:]
return input_data,output_data
def training(train_data, test_data,optimizer, loss,epochs=5):
batch_num=0
batch_num2=0
epoch_loss=0
epoch_loss2=0
for english,germany in train:
germany_in,germany_out=fit_german_shape(germany)
loss2, grad= loss_fn(english, germany_in, germany_out, loss)
optimizer.apply_gradients(zip(grad,model.trainable_variables + decoder.trainable_variables))
epoch_loss=epoch_loss+loss2
batch_num=batch_num+1
avg_loss=epoch_loss/batch_num
avg_loss3=String(avg_loss1)
print("In this train epoch, the loss is"+ave_loss3)
for english2,germany2 in test:
germany_in2,germany_out2=fit_german_shape(germany2)
hidden_state,cell_state=model(en)
pred,temp1,temp2=decoder(germany_in2,hidden_state,cell_state)
loss, temp3 = loss_fn(english2, germany_in2, germany_out2)
epoch_loss2=loss+epoch_loss2
batch_num=batch_num+1
avg_loss2=epoch_loss2/batch_num2
avg_loss4=String(avg_loss2)
print("In this test epoch, the loss is"+ave_loss4)
return avg_loss,avg_loss2
When I use this model to translate German to English, it report the error that "Tried to convert 'y' to a tensor and failed. Error: None values not supported." Error may occur in the decoder to assign value to x,h,c, but I dont know why cannot convert y to a tensor.

Training multiple models defined from the same class in Tensorflow 2.0 fails when using #tf.function

I am using Tensorflow 2.1 to create custom models and custom training loops. My aim is to compare the accuracy of different configurations of my neural network. Specifically, in this case, I am comparing the reconstruction error of an AutoEncoder with varying latent dimension. Hence, I am training my network for one latent dimension then computing the test error and then I redo this process for another latent dimension, and so on. With this process I want to create plots like this:
Plot example:
To speed up the training I want to use the #tf.function decorator for the BackPropagation part of my training loop. However, when I try to train several different networks, looping over the latent dimension I get an error. See below:
ValueError: in converted code:
<ipython-input-19-78bafad21717>:41 grad *
loss_value = tf.losses.mean_squared_error(inputs, model(inputs))
/tensorflow-2.1.0/python3.6/tensorflow_core/python/keras/engine/base_layer.py:778 __call__
outputs = call_fn(cast_inputs, *args, **kwargs)
<ipython-input-19-78bafad21717>:33 call *
x_enc = self.encoder(inp)
/tensorflow-2.1.0/python3.6/tensorflow_core/python/keras/engine/base_layer.py:778 __call__
outputs = call_fn(cast_inputs, *args, **kwargs)
<ipython-input-19-78bafad21717>:9 call *
x = self.dense1(inp)
/tensorflow-2.1.0/python3.6/tensorflow_core/python/keras/engine/base_layer.py:748 __call__
self._maybe_build(inputs)
/tensorflow-2.1.0/python3.6/tensorflow_core/python/keras/engine/base_layer.py:2116 _maybe_build
self.build(input_shapes)
/tensorflow-2.1.0/python3.6/tensorflow_core/python/keras/layers/core.py:1113 build
trainable=True)
/tensorflow-2.1.0/python3.6/tensorflow_core/python/keras/engine/base_layer.py:446 add_weight
caching_device=caching_device)
/tensorflow-2.1.0/python3.6/tensorflow_core/python/training/tracking/base.py:744 _add_variable_with_custom_getter
**kwargs_for_getter)
/tensorflow-2.1.0/python3.6/tensorflow_core/python/keras/engine/base_layer_utils.py:142 make_variable
shape=variable_shape if variable_shape else None)
/tensorflow-2.1.0/python3.6/tensorflow_core/python/ops/variables.py:258 __call__
return cls._variable_v1_call(*args, **kwargs)
/tensorflow-2.1.0/python3.6/tensorflow_core/python/ops/variables.py:219 _variable_v1_call
shape=shape)
/tensorflow-2.1.0/python3.6/tensorflow_core/python/ops/variables.py:65 getter
return captured_getter(captured_previous, **kwargs)
/tensorflow-2.1.0/python3.6/tensorflow_core/python/eager/def_function.py:502 invalid_creator_scope
"tf.function-decorated function tried to create "
ValueError: tf.function-decorated function tried to create variables on non-first call.
I do not get this error when I remove #tf.function decorator. I believe if it has something to do with Tensorflow creating a computational graph when I use the decorator and this graph remains when I create another instance of my network. Thus, sparking an error since the old graph does not match the new instance of the network. But I am not sure about this at all, since I believe I am missing something fundamental about Tensorflow here!
Below is a very simply version of my code recreating the error. I have tried to remove all the unnecessary parts of the code to make it easier to read and debug. Furthermore, I am generating a very simply training and test set just for the sake of this question.
I have already tried the tf.keras.backend.clear_session() function without any luck.
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# Encoder
class build_encoder(tf.keras.Model):
def __init__(self,latent_dim):
super(build_encoder, self).__init__()
self.dense1 = tf.keras.layers.Dense(32, activation='relu',use_bias=True)
self.dense2 = tf.keras.layers.Dense(latent_dim, activation='relu',use_bias=True)
def call(self, inp):
x = self.dense1(inp)
x = self.dense2(x)
return x
# Decoder
class build_decoder(tf.keras.Model):
def __init__(self,):
super(build_decoder, self).__init__()
self.dense1 = tf.keras.layers.Dense(32, activation='relu',use_bias=True)
self.dense2 = tf.keras.layers.Dense(10, activation='relu',use_bias=True)
def call(self, inp):
x = self.dense1(inp)
x = self.dense2(x)
return x
# Full Autoencoder
class Autoencoder(tf.keras.Model):
def __init__(self,latent_dim=5):
super(Autoencoder, self).__init__()
self.encoder = build_encoder(latent_dim)
self.decoder = build_decoder()
def call(self, inp):
x_enc = self.encoder(inp)
x_dec = self.decoder(x_enc)
return x_dec
#### Here is the backpropagation with #tf.function decorator ####
#tf.function
def grad(model, inputs):
with tf.GradientTape() as tape:
loss_value = tf.losses.mean_squared_error(inputs, model(inputs))
return loss_value, tape.gradient(loss_value, model.trainable_variables)
# Training loop function
def train(x_train, model, num_epochs, batch_size,optimizer):
train_loss = []
for epoch in range(num_epochs):
tf.random.shuffle(x_train)
for i in range(0, len(x_train), batch_size):
x_inp = x_train[i: i + batch_size]
loss_value, grads = grad(model, x_inp)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
train_loss.append(tf.reduce_mean(tf.losses.mean_squared_error(x_train, model(x_train))).numpy())
if epoch % 100 == 0:
print("Epoch: {}, Train loss: {:.9f}".format(epoch, train_loss[epoch]))
return train_loss
#### Generating simple training and test data
num_train = 10000
num_test = 1000
x_train = s = np.random.uniform(0,1,(num_train,10)).astype(np.float32)
x_train[:,6:10] = 0
x_test = s = np.random.uniform(0,1,(num_test,10)).astype(np.float32)
x_test[:,6:10] = 0
###
batch_size = 8
num_epochs = 10000
test_loss = []
# Looping over the latent dimensions
for latent_dim in range(1,10):
model = Autoencoder(latent_dim=3) # Creating an instance of my Autoencoder
optimizer = tf.keras.optimizers.Adam(learning_rate=0.00005) # Defining an optimizer
train_loss = train(x_train, model=model, num_epochs=num_epochs, batch_size=batch_size, optimizer=optimizer) # Training the network
test_loss.append(tf.reduce_mean(tf.losses.mean_squared_error(x_test, model(x_test))).numpy())
plt.figure()
plt.plot(test_loss,linewidth=1.5)
plt.grid(True)
plt.show()
There's an error in the code snippet you provided.
I changed last Dense layer unit from 6 to 10.
# Decoder
class build_decoder(tf.keras.Model):
def __init__(self,):
super(build_decoder, self).__init__()
self.dense1 = tf.keras.layers.Dense(32, activation='relu',use_bias=True)
self.dense2 = tf.keras.layers.Dense(10, activation='relu',use_bias=True)
def call(self, inp):
x = self.dense1(inp)
x = self.dense2(x)
return x
As for your question on training multiple model.
The error message "ValueError: tf.function-decorated function tried to create variables on non-first call" means that the function decorated by #tf.function is creating a new variable on its next iteration, this is not allowed as this function is turned into a graph.
I have modified your back propagation method, I commented out your original code to observe the difference.
#### Here is the backpropagation with #tf.function decorator ####
# #tf.function
# def grad(model, inputs):
# with tf.GradientTape() as tape:
# loss_value = tf.losses.mean_squared_error(inputs, model(inputs))
# return loss_value, tape.gradient(loss_value, model.trainable_variables)
#tf.function
def MSE(y_true, y_pred):
return tf.keras.losses.MSE(y_true, y_pred)
def backprop(inputs, model):
with tf.GradientTape() as tape:
loss_value = MSE(inputs, model(inputs))
return loss_value, tape.gradient(loss_value, model.trainable_variables)
def gradient_func(model, inputs):
return backprop(inputs, model)
The main culprit of your original code was the calling of model(inputs) as an input in the Loss Function, when you decorate #tf.function in a function it is inherited on all the functions inside, this means the Loss function is optimized.
Also a way to train multiple model without rewriting single variable, is to put them into array.
model_array = [0]
# Looping over the latent dimensions
for latent_dim in range(1,10):
model_array.append(Autoencoder(latent_dim))
# Creating an instance of my Autoencoder
optimizer = tf.keras.optimizers.Adam(learning_rate=0.00005) # Defining an optimizer
train_loss = train(x_train, model=model_array[latent_dim], num_epochs=num_epochs, batch_size=batch_size, optimizer=optimizer) # Training the network
test_loss.append(tf.reduce_mean(tf.losses.mean_squared_error(x_test, model_array[latent_dim](x_test))).numpy())
This will arrange model into array, easier to be accessed and debugged.
Here is the complete modified code.
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# Encoder
class build_encoder(tf.keras.Model):
def __init__(self,latent_dim):
super(build_encoder, self).__init__()
self.dense1 = tf.keras.layers.Dense(32, activation='relu',use_bias=True)
self.dense2 = tf.keras.layers.Dense(latent_dim, activation='relu',use_bias=True)
def call(self, inp):
x = self.dense1(inp)
x = self.dense2(x)
return x
# Decoder
class build_decoder(tf.keras.Model):
def __init__(self,):
super(build_decoder, self).__init__()
self.dense1 = tf.keras.layers.Dense(32, activation='relu',use_bias=True)
self.dense2 = tf.keras.layers.Dense(10, activation='relu',use_bias=True)
def call(self, inp):
x = self.dense1(inp)
x = self.dense2(x)
return x
# Full Autoencoder
class Autoencoder(tf.keras.Model):
def __init__(self,latent_dim=5):
super(Autoencoder, self).__init__()
self.encoder = build_encoder(latent_dim)
self.decoder = build_decoder()
def call(self, inp):
x_enc = self.encoder(inp)
x_dec = self.decoder(x_enc)
return x_dec
#### Here is the backpropagation with #tf.function decorator ####
# #tf.function
# def grad(model, inputs):
# with tf.GradientTape() as tape:
# loss_value = tf.losses.mean_squared_error(inputs, model(inputs))
# return loss_value, tape.gradient(loss_value, model.trainable_variables)
#tf.function
def MSE(y_true, y_pred):
return tf.keras.losses.MSE(y_true, y_pred)
def backprop(inputs, model):
with tf.GradientTape() as tape:
loss_value = MSE(inputs, model(inputs))
return loss_value, tape.gradient(loss_value, model.trainable_variables)
def gradient_func(model, inputs):
return backprop(inputs, model)
# Training loop function
def train(x_train, model, num_epochs, batch_size,optimizer):
train_loss = []
for epoch in range(num_epochs):
tf.random.shuffle(x_train)
for i in range(0, len(x_train), batch_size):
x_inp = x_train[i: i + batch_size]
loss_value, grads = gradient_func(model, x_inp)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
train_loss.append(tf.reduce_mean(tf.losses.mean_squared_error(x_train, model(x_train))).numpy())
if epoch % 100 == 0:
print("Epoch: {}, Train loss: {:.9f}".format(epoch, train_loss[epoch]))
return train_loss
#### Generating simple training and test data
num_train = 10000
num_test = 1000
x_train = s = np.random.uniform(0,1,(num_train,10)).astype(np.float32)
x_train[:,6:10] = 0
x_test = s = np.random.uniform(0,1,(num_test,10)).astype(np.float32)
x_test[:,6:10] = 0
###
batch_size = 8
num_epochs = 10000
test_loss = []
model_array = [0]
# Looping over the latent dimensions
for latent_dim in range(1,10):
model_array.append(Autoencoder(latent_dim))
# Creating an instance of my Autoencoder
optimizer = tf.keras.optimizers.Adam(learning_rate=0.00005) # Defining an optimizer
train_loss = train(x_train, model=model_array[latent_dim], num_epochs=num_epochs, batch_size=batch_size, optimizer=optimizer) # Training the network
test_loss.append(tf.reduce_mean(tf.losses.mean_squared_error(x_test, model_array[latent_dim](x_test))).numpy())
plt.figure()
plt.plot(range(1,10),test_loss,linewidth=1.5)
plt.grid(True)
plt.show()
There is also a brief discussion about #tf.function and AutoGraphs in TF Documentation in this link.
Feel free to ask questions and hope this helps you.

loss value don't change(neural network)

I implemented neural network model with tensorflow(version 2.0) on Python3
I don't know the code works properly because loss value don't almost change.
The code is wrong
or
The model is too many parameter(this mean that the code is right)?
Please tell me whether the code works properly.
The following is the code.
import tensorflow as tf
import numpy as np
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
class Model(object):
def __init__(self):
self.var_list = []
self.w_layer1 = tf.Variable(tf.random.normal(shape=[28*28, 1000], stddev=0.3,dtype=tf.float64))
self.b_layer1 = tf.Variable(tf.random.normal(shape=[1,], stddev=0.1,dtype=tf.float64))
self.w_layer2 = tf.Variable(tf.random.normal(shape=[1000, 100], stddev=0.3,dtype=tf.float64))
self.b_layer2 = tf.Variable(tf.random.normal(shape=[1,], stddev=0.1,dtype=tf.float64))
self.w_layer3 = tf.Variable(tf.random.normal(shape=[100, 100], stddev=0.3,dtype=tf.float64))
self.b_layer3 = tf.Variable(tf.random.normal(shape=[1,], stddev=0.1,dtype=tf.float64))
self.w_layer4 = tf.Variable(tf.random.normal(shape=[100, 10], stddev=0.3,dtype=tf.float64))
self.b_layer4 = tf.Variable(tf.random.normal(shape=[1,], stddev=0.1,dtype=tf.float64))
self.var_list.append(self.w_layer1)
self.var_list.append(self.b_layer1)
self.var_list.append(self.w_layer2)
self.var_list.append(self.b_layer2)
self.var_list.append(self.w_layer3)
self.var_list.append(self.b_layer3)
self.var_list.append(self.w_layer4)
self.var_list.append(self.b_layer4)
def __call__(self, x):
return self.w*x+self.b
def dense_layer(self, inputs, w, b):
z = tf.matmul(inputs, w) + b
return tf.nn.relu(z)
def output_layer(self, inputs, w, b):
return tf.matmul(inputs, w) + b
def flattend(self, inputs):
inputs = tf.cast(inputs, tf.float64)
return tf.reshape(inputs, [-1, 28*28])
def loss(self, outputs, targets):
predicted_y = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = outputs, labels = targets))
return predicted_y
def grad(self, x, target_y):
with tf.GradientTape() as tape:
tape.watch(self.var_list)
loss_value = self.loss(self.run(x), target_y)
return tape.gradient(loss_value, self.var_list)
def run(self, inputs):
inputs = self.flattend(inputs)
layer1 = self.dense_layer(inputs, self.w_layer1, self.b_layer1)
layer2 = self.dense_layer(layer1, self.w_layer2, self.b_layer2)
layer3 = self.dense_layer(layer2, self.w_layer3, self.b_layer3)
layer4 = self.output_layer(layer3, self.w_layer4, self.b_layer4)
return layer4
def optimizer(self):
opt = tf.keras.optimizers.SGD(learning_rate=0.01)
return opt
def make_onehot_labels(labels):
depth = 10
one_hot_labels = tf.one_hot(labels, depth)
return one_hot_labels
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
train_images = train_images/255.0
test_images = test_images/255.0
train_labels = make_onehot_labels(train_labels)
test_labels = make_onehot_labels(test_labels)
ds_train_x = tf.data.Dataset.from_tensor_slices(train_images)
ds_train_y = tf.data.Dataset.from_tensor_slices(train_labels)
train_dataset = tf.data.Dataset.zip((ds_train_x, ds_train_y)).shuffle(1000).repeat().batch(300)
train_images = tf.convert_to_tensor(train_images)
train_labels = tf.convert_to_tensor(train_labels)
test_images = tf.convert_to_tensor(test_images)
test_labels = tf.convert_to_tensor(test_labels)
count = 1
model = Model()
opt = model.optimizer()
print(model.loss(model.run(train_images), train_labels))
for epoch in range(10):
for data in train_dataset:
if count%200==0:
print(model.loss(model.run(train_images), train_labels))
#print(grads)
break
grads = model.grad(data[0], data[1])
opt.apply_gradients(zip(grads, model.var_list))
count = count+1
the following is the result which the above code executed
tf.Tensor(184.81706096058622, shape=(), dtype=float64)
tf.Tensor(1.2104797483683287, shape=(), dtype=float64)
tf.Tensor(1.2104797483683287, shape=(), dtype=float64)
tf.Tensor(1.2104797483683287, shape=(), dtype=float64)
tf.Tensor(1.2104797483683287, shape=(), dtype=float64)
The issue is in the following part
for epoch in range(10):
for data in train_dataset:
if count%200==0:
print(model.loss(model.run(train_images), train_labels))
#print(grads)
break
grads = model.grad(data[0], data[1])
opt.apply_gradients(zip(grads, model.var_list))
count = count+1
You have a break within the if condition, meaning you break your training loop (and restart a new epoch) when you hit count%200==0. Remove the break and you'll see the error rate going down.
To elaborate on the issue, as soon as you reach count==200 you break the loop, and the counter does not increase anymore so you're basically not reaching anything beyond that if condition after 200 iterations ( this anything beyond includes your gradient application).

why am I getting a 100% error rate (RNN for spam)

I am learning tensor flow by modifying some examples I've found. To start off with I have taken an RNN example to try against the "Spam" data set from UCI.
My code and the sample data set can be found in full here:
https://trinket.io/python/c7d6b95452
When I run the code I get a 100% error rate. I figure even if this data set was not well suited for this particular model that I'd get at least something better than that, so I don't think it's my choice of a sample data set.
Below is my Python code. If anyone can suggest how to modify this to get the model to work properly I would appreciate it! I'd also appreciate any general tensor flow advice too.
# Example for my blog post at:
# https://danijar.com/introduction-to-recurrent-networks-in-tensorflow/
import functools
import os
import sets
import random
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import rnn, rnn_cell
def lazy_property(function):
attribute = '_' + function.__name__
#property
#functools.wraps(function)
def wrapper(self):
if not hasattr(self, attribute):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return wrapper
class SequenceClassification:
def __init__(self, data, target, dropout, num_hidden=200, num_layers=3):
self.data = data
self.target = target
self.dropout = dropout
self._num_hidden = num_hidden
self._num_layers = num_layers
self.prediction
self.error
self.optimize
#lazy_property
def prediction(self):
# Recurrent network.
network = rnn_cell.GRUCell(self._num_hidden)
network = rnn_cell.DropoutWrapper(
network, output_keep_prob=self.dropout)
network = rnn_cell.MultiRNNCell([network] * self._num_layers)
output, _ = tf.nn.dynamic_rnn(network, self.data, dtype=tf.float32)
# Select last output.
output = tf.transpose(output, [1, 0, 2])
last = tf.gather(output, int(output.get_shape()[0]) - 1)
# Softmax layer.
weight, bias = self._weight_and_bias(
self._num_hidden, int(self.target.get_shape()[1]))
prediction = tf.nn.softmax(tf.matmul(last, weight) + bias)
return prediction
#lazy_property
def cost(self):
cross_entropy = -tf.reduce_sum(self.target *tf.log(self.prediction))
return cross_entropy
#lazy_property
def optimize(self):
learning_rate = 0.003
optimizer = tf.train.RMSPropOptimizer(learning_rate)
return optimizer.minimize(self.cost)
#lazy_property
def error(self):
mistakes = tf.not_equal(
tf.argmax(self.target, 1), tf.argmax(self.prediction, 1))
return tf.reduce_mean(tf.cast(mistakes, tf.float32))
#staticmethod
def _weight_and_bias(in_size, out_size):
weight = tf.truncated_normal([in_size, out_size], stddev=0.01)
bias = tf.constant(0.1, shape=[out_size])
return tf.Variable(weight), tf.Variable(bias)
def main():
sample_size=10
num_classes=2 #spam or ham
##
# import spam data
##
spam_data=[]
spam_data_train=[]
spam_data_test=[]
data_dir="."
data_file="spam.csv"
with open(os.path.join(data_dir, data_file), "r") as file_handle:
for row in file_handle:
spam_data.append(row)
spam_data=[line.rstrip().split(",") for line in spam_data if len(line) >=1]
random.shuffle(spam_data)
spam_data_train=spam_data[0:int(len(spam_data)*.8)]
spam_data_test=spam_data[int(len(spam_data)*.8):int(len(spam_data))]
def next_train_batch(batch_size):
a=random.sample(spam_data_train, batch_size)
return [np.array([line[:-1] for line in a]), np.array([line[len(line)-1] for line in a])]
def train_batch():
return [np.array([line[:-1] for line in spam_data_train]),np.array([line[len(line)-1] for line in spam_data_train])]
def next_test_batch(batch_size):
a=random.sample(spam_data_test, batch_size)
return [np.array([line[:-1] for line in a]), np.array([line[len(line)-1] for line in a])]
def test_batch():
return [np.array([line[:-1] for line in spam_data_test]),np.array([line[len(line)-1] for line in spam_data_test])]
t=train_batch();
train_input=t[0]
train_target=t[1]
test=test_batch()
test_input=t[0]
test_target=t[1]
training_data = tf.placeholder(tf.float32, [None, sample_size, len(train_input[0])], "training_data")
training_target = tf.placeholder(tf.float32, [None, sample_size], "training_target")
testing_data = tf.placeholder(tf.float32, [None, len(test_input), len(test_input[0])], "testing_data")
testing_target = tf.placeholder(tf.float32, [None, len(test_target)], "testing_target")
dropout = tf.placeholder(tf.float32)
training_model = SequenceClassification(training_data, training_target, dropout)
tf.get_variable_scope().reuse_variables()
testing_model = SequenceClassification(testing_data, testing_target, dropout)
sess = tf.Session()
init = tf.initialize_all_variables()
sess.run(init)
for epoch in range(sample_size):
for _ in range(100):
sample=random.sample(range(0,len(train_input)-1),sample_size)
batch_train = [train_input[i] for i in sample]
batch_target = [train_target[i] for i in sample]
sess.run(training_model.optimize, {
training_data: [batch_train], training_target: [batch_target] , dropout: 0.5})
error = sess.run(testing_model.error, {
testing_data: [test_input], testing_target: [test_target], dropout: 1.0})
print('Epoch {:2d} error {:3.1f}%'.format(epoch + 1, 100 * error))
if __name__ == '__main__':
main()