I want to save train and validation loss in csv files in tflearn, and then reload it like we do in keras with history to plot graphs. Please help me
I'm not sure this will help but you can save the model with this code: model.save('mnist.tflearn') After that whenever you want, you can to reload the model back. Below is an example to reload the model:
from __future__ import division, print_function, absolute_import
import tflearn.datasets.mnist as mnist
import csv
import tflearn
from tflearn.layers.core import input_data,dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.estimator import regression
from tflearn.data_preprocessing import ImagePreprocessing
from tflearn.data_augmentation import ImageAugmentation
from tflearn.metrics import Accuracy
X, Y, test_x, test_y = mnist.load_data(one_hot=True)
shape = 28
X = X.reshape([-1, shape, shape, 1])
test_x = test_x.reshape([-1, shape, shape, 1])
###################################
# Image transformations
###################################
# normalisation of images
img_prep = ImagePreprocessing()
img_prep.add_featurewise_zero_center()
img_prep.add_featurewise_stdnorm()
# Create extra synthetic training data by flipping & rotating images
img_aug = ImageAugmentation()
img_aug.add_random_flip_leftright()
img_aug.add_random_rotation(max_angle=25.)
###################################
# Define network architecture
###################################
# Input is a 32x32 image with 3 color channels (red, green and blue)
network = input_data(shape=[None, shape, shape, 1],
data_preprocessing=img_prep,
data_augmentation=img_aug)
# 1: Convolution layer with 32 filters, each 3x3x3
conv_1 = conv_2d(network, 32, 2, activation='relu', name='conv_1')
# 2: Max pooling layer
network = max_pool_2d(conv_1, 2)
# 12: Dropout layer to combat overfitting
network = dropout(network, 0.8)
# 3: Convolution layer with 64 filters
conv_2 = conv_2d(network, 64, 2, activation='relu', name='conv_2')
# 2: Max pooling layer
network = max_pool_2d(conv_2, 2)
# 12: Dropout layer to combat overfitting
network = dropout(network, 0.8)
# 4: Convolution layer with 64 filters
conv_3 = conv_2d(network, 64, 2, activation='relu', name='conv_3')
# 5: Max pooling layer
network = max_pool_2d(conv_3, 2)
# 12: Dropout layer to combat overfitting
network = dropout(network, 0.8)
# 5: Convolution layer with 64 filters
conv_4 = conv_2d(network, 128, 2, activation='relu', name='conv_4')
# 6: Max pooling layer
network = max_pool_2d(conv_4, 2)
# 12: Dropout layer to combat overfitting
network = dropout(network, 0.8)
# 7: Convolution layer with 64 filters
conv_5 = conv_2d(network, 256, 2, activation='relu', name='conv_5')
# 8: Max pooling layer
network = max_pool_2d(conv_5, 2)
# 12: Dropout layer to combat overfitting
network = dropout(network, 0.8)
# 9: Convolution layer with 64 filters
conv_6 = conv_2d(network, 256, 2, activation='relu', name='conv_6')
# 10: Max pooling layer
network = max_pool_2d(conv_6, 2)
# 12: Dropout layer to combat overfitting
network = dropout(network, 0.8)
# 11: Fully-connected 512 node layer
network = fully_connected(network, 1024, activation='relu')
# 13: Fully-connected layer with two outputs
network = fully_connected(network, 10, activation='softmax')
# Configure how the network will be trained
acc = Accuracy(name="Accuracy")
network = regression(network, optimizer='adam',
loss='categorical_crossentropy',
learning_rate=0.0005, metric=acc)
# Wrap the network in a model object
model = tflearn.DNN(network)
model.load('mnist.tflearn')
for i in xrange(0, len(testX)):
im = [testX[i]]
a = model.predict(im)
Related
I am training a DCGAN model with tensorflow.keras, and I added BatchNormalization layers in both generator and discriminator.
I train gan with following steps:
1. train discriminator with real images and images from generator(using generator.predict)
2. train adversarial network(compiled with discriminator.trainable=False)
Then I found that after a few rounds the training loss returned by train_on_batch() of both generator and discriminator goes to zero. But when I use test_on_batch() the loss is still huge for generator. And the generated images are all mess.
At first I thought that it is because in the 2.'s step mentioned above when training the adversarial network, the discriminator's input only containing fake images makes the batch normalization layers get different distribution as the 1.'s step when both fake & real images were fed.
But even if I removed all batch normalization layers in discriminator, the same problem still exists. Only when all batch normalization layers were removed the problem will disappear. Also I found out that existences of Dropout layers don't make a difference. I wondering why batch normalization can cause such problem, even if in generator fed with noises with same distribution.
# Model definition
class DCGAN_128:
def __init__(self, hidden_dim):
generator = M.Sequential()
generator.add(L.Dense(128 * 8 * 8, input_shape=[hidden_dim]))
generator.add(L.Reshape([8, 8, 128]))
generator.add(L.UpSampling2D()) # [8, 8, 128]
generator.add(L.Conv2D(128, kernel_size=3, padding="same")) # [16, 16, 128]
generator.add(L.LayerNormalization()) # 4
generator.add(L.ReLU())
generator.add(L.UpSampling2D()) # [32, 32, 128]
generator.add(L.Conv2D(64, kernel_size=5, padding="same")) # [32, 32, 64]
generator.add(L.LayerNormalization()) # 8
generator.add(L.ReLU())
generator.add(L.UpSampling2D()) # [64, 64, 128]
generator.add(L.Conv2D(32, kernel_size=7, padding="same")) # [64, 64, 32]
generator.add(L.LayerNormalization()) # 12
generator.add(L.ReLU())
generator.add(L.UpSampling2D()) # [128, 128, 32]
generator.add(L.Conv2D(3, kernel_size=3, padding="same", activation=A.sigmoid)) # [128, 128, 3]
discriminator = M.Sequential()
discriminator.add(L.Conv2D(32, kernel_size=5, strides=2, padding="same", input_shape=[128, 128, 3]))
discriminator.add(L.LeakyReLU())
# discriminator.add(L.Dropout(0.25)) # [64, 64, 32]
discriminator.add(L.Conv2D(64, kernel_size=3, strides=2, padding="same"))
# discriminator.add(L.BatchNormalization(epsilon=1e-5)) # 4
discriminator.add(L.LeakyReLU())
# discriminator.add(L.Dropout(0.25)) # [32, 32, 64]
discriminator.add(L.Conv2D(128, kernel_size=3, strides=2, padding="same"))
discriminator.add(L.LayerNormalization()) # 8
discriminator.add(L.LeakyReLU()) # [16, 16, 128]
discriminator.add(L.Dropout(0.25))
discriminator.add(L.Conv2D(256, kernel_size=3, strides=2, padding="same"))
discriminator.add(L.LayerNormalization()) # 12
discriminator.add(L.LeakyReLU()) # [8, 8, 256]
discriminator.add(L.Dropout(0.25))
discriminator.add(L.Conv2D(512, kernel_size=3, strides=2, padding="same"))
discriminator.add(L.LeakyReLU()) # [4, 4, 512]
discriminator.add(L.Flatten())
discriminator.add(L.Dense(1, activation=A.sigmoid))
self.model_gen = generator
self.model_dis = discriminator
self.adv_input = L.Input([hidden_dim])
self.adv_output = discriminator(generator(self.adv_input))
self.model_adversarial = M.Model(self.adv_input, self.adv_output)
# Training
dcgan = hidden_dim = 100
DCGAN_128(hidden_dim)
data_loader = AnimeFacesLoader([128, 128])
batch_size = 32
n_rounds = 40000
dis_model = dcgan.model_dis
gen_model = dcgan.model_gen
adv_model = dcgan.model_adversarial
gen_model.summary()
adv_model.summary()
dis_model.compile(Opt.Adam(0.0002), Lo.binary_crossentropy)
dis_model.trainable = False
adv_model.compile(Opt.Adam(0.0002), Lo.binary_crossentropy)
layer_outputs = [layer.output for layer in dis_model.layers]
visual_model = tf.keras.Model(dis_model.input, layer_outputs)
for rounds in range(n_rounds):
# Get output images
if rounds % 100 == 0 and rounds > 0:
noise = np.random.uniform(-1, 1, [16, hidden_dim])
tiled_images = np.zeros([4*128, 4*128, 3]).astype(np.uint8)
generated_imgs = gen_model.predict(noise)
generated_imgs *= 256
generated_imgs = generated_imgs.astype(np.uint8)
for i in range(16):
tiled_images[int(i / 4)*128: int(i / 4)*128 + 128,
int(i % 4)*128: int(i % 4)*128 + 128, :] = generated_imgs[i, :, :, :]
Image.fromarray(tiled_images).save("Output/DCGAN/" + "rounds_{0}.jpg".format(rounds))
'''
layer_visualization = visual_model.predict(generated_imgs[:1])
for i in range(len(layer_visualization)):
plt.imshow(layer_visualization[i][0, :, :, 0])
plt.show()
'''
# train discriminator on real & fake images
real_imgs = data_loader.get_batch(batch_size)
real_ys = np.ones([batch_size, 1])
noise = np.random.uniform(-1, 1, [batch_size, hidden_dim])
fake_ys = np.zeros([batch_size, 1])
fake_imgs = gen_model.predict(noise)
imgs = np.concatenate([real_imgs, fake_imgs], axis=0)
ys = np.concatenate([real_ys, fake_ys], axis=0)
loss_dis = dis_model.train_on_batch(imgs, ys)
print("Round {}, Loss dis:{:.4f}".format(rounds, loss_dis))
loss_dis_test = dis_model.test_on_batch(imgs, ys)
print(loss_dis_test)
noise = np.random.uniform(-1, 1, [batch_size, hidden_dim])
fake_ys = np.ones([batch_size, 1])
loss_gen = adv_model.train_on_batch(noise, fake_ys)
print("Round {}, Loss gen:{:.4f}".format(rounds, loss_gen))
loss_gen_test = adv_model.test_on_batch(noise, fake_ys)
print(loss_gen_test)
I am following a course on deep learning and I am doing right now the CNN networks the train set is 8000 photos 4000 cats and 4000 dogs the training set is 2000/2000 the size I am using for images is 64x64 with RGB. I am using Keras with 2 conv2d/maxpool layers of 32 filters a flatten layer and two dense layers of 128 and 1 output. My problem is that this setup is performing at 15 minutes per epoch and for 25 epochs that means 6 Hours of training at least plus sometimes on some epochs is freezing for sometimes at 7999/8000 I am running this on windows 10 and anaconda with python 3.7 and TensorFlow 1.13. Is this a good performance or I can improve it? I was expecting from the new Turing architecture better performances.
# -*- coding: utf-8 -*-
# Part 1 - Building the convolutional neural network
import tensorflow as tf
from keras import backend as K
config = tf.ConfigProto(intra_op_parallelism_threads=6,
inter_op_parallelism_threads=6,
allow_soft_placement=True,
device_count = {'CPU' : 1,
'GPU' : 1}
)
session = tf.Session(config=config)
K.set_session(session)
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
# Initialising the CNN
classifier = Sequential()
# Step 1 - Convolution
classifier.add(Conv2D(32, (3, 3), input_shape = (64, 64, 3), activation = 'relu'))
# Step 2 - Pooling
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Adding a second convolutional layer
classifier.add(Conv2D(32, (3, 3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Step 3 - Flattening
classifier.add(Flatten())
# Step 4 - Full connection
classifier.add(Dense(units = 128, activation = 'relu'))
classifier.add(Dense(units = 1, activation = 'sigmoid'))
# Compiling the CNN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
weights = classifier.get_weights()
#Part 2 - Fiting the CNN to the images
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('dataset/training_set',
target_size = (64, 64),
batch_size = 32,
class_mode = 'binary')
test_set = test_datagen.flow_from_directory('dataset/test_set',
target_size = (64, 64),
batch_size = 32,
class_mode = 'binary')
classifier.fit_generator(training_set,
steps_per_epoch = 8000,
epochs = 25,
validation_data = test_set,
validation_steps = 2000)
classifier.save("my first model")
Thank you
My immediate problem is that all of the various CNN regression models I've tried always return the same (or very similar) values and I'm trying to figure out why. But I would be open to a wide range of suggestions.
My dataset looks like this:
x: 64x64 greyscale images arranged into a 64 x 64 x n ndarray
y: Values between 0 and 1, each corresponding to an image (think of this as some sort of proportion)
weather: 4 weather readings from the time each image was taken (ambient temperature, humidity, dewpoint, air pressure)
The goal is to use the images and weather data to predict y. Since I'm working with images, I thought a CNN would be appropriate (please let me know if there are other strategies here).
From what I understand, CNNs are most often used for classification tasks--it's rather unusual to use them for regression. But in theory, it shouldn't be too different--I just need to change the loss function to MSE/RMSE and the last activation function to linear (although maybe a sigmoid is more appropriate here since y is between 0 and 1).
The first hurdle I ran into was trying to figure out how to incorporate the weather data, and the natural choice was to incorporate them into the first fully connected layer. I found an example here: How to train mix of image and data in CNN using ImageAugmentation in TFlearn
The second hurdle I ran into was determining an architecture. Normally I would just pick a paper and copy its architecture, but I couldn't find anything on CNN image regression. So I tried a (fairly simple) network with 3 convolutional layers and 2 fully connected layers, then I tried VGGNet and AlexNet architectures from https://github.com/tflearn/tflearn/tree/master/examples
Now the problem I'm having is that all of the models I'm trying output the same value, namely the mean y of the training set. Looking at tensorboard, the loss function flattens out fairly quickly (after around 25 epochs). Do you know what's going on here? While I do understand the basics of what each layer is doing, I have no intuition on what makes a good architecture for a particular dataset or task.
Here is an example. I am using VGGNet from the tflearn examples page:
tf.reset_default_graph()
img_aug = ImageAugmentation()
img_aug.add_random_flip_leftright()
img_aug.add_random_flip_updown()
img_aug.add_random_90degrees_rotation(rotations=[0, 1, 2, 3])
convnet = input_data(shape=[None, size, size, 1],
data_augmentation=img_aug,
name='hive')
weathernet = input_data(shape=[None, 4], name='weather')
convnet = conv_2d(convnet, 64, 3, activation='relu', scope='conv1_1')
convnet = conv_2d(convnet, 64, 3, activation='relu', scope='conv1_2')
convnet = max_pool_2d(convnet, 2, strides=2, name='maxpool1')
convnet = conv_2d(convnet, 128, 3, activation='relu', scope='conv2_1')
convnet = conv_2d(convnet, 128, 3, activation='relu', scope='conv2_2')
convnet = max_pool_2d(convnet, 2, strides=2, name='maxpool2')
convnet = conv_2d(convnet, 256, 3, activation='relu', scope='conv3_1')
convnet = conv_2d(convnet, 256, 3, activation='relu', scope='conv3_2')
convnet = conv_2d(convnet, 256, 3, activation='relu', scope='conv3_3')
convnet = max_pool_2d(convnet, 2, strides=2, name='maxpool3')
convnet = conv_2d(convnet, 512, 3, activation='relu', scope='conv4_1')
convnet = conv_2d(convnet, 512, 3, activation='relu', scope='conv4_2')
convnet = conv_2d(convnet, 512, 3, activation='relu', scope='conv4_3')
convnet = max_pool_2d(convnet, 2, strides=2, name='maxpool4')
convnet = conv_2d(convnet, 512, 3, activation='relu', scope='conv5_1')
convnet = conv_2d(convnet, 512, 3, activation='relu', scope='conv5_2')
convnet = conv_2d(convnet, 512, 3, activation='relu', scope='conv5_3')
convnet = max_pool_2d(convnet, 2, strides=2, name='maxpool5')
convnet = fully_connected(convnet, 4096, activation='relu', scope='fc6')
convnet = merge([convnet, weathernet], 'concat')
convnet = dropout(convnet, .75, name='dropout1')
convnet = fully_connected(convnet, 4096, activation='relu', scope='fc7')
convnet = dropout(convnet, .75, name='dropout2')
convnet = fully_connected(convnet, 1, activation='sigmoid', scope='fc8')
convnet = regression(convnet,
optimizer='adam',
learning_rate=learning_rate,
loss='mean_square',
name='targets')
model = tflearn.DNN(convnet,
tensorboard_dir='log',
tensorboard_verbose=0)
model.fit({
'hive': x_train,
'weather': weather_train
},
{'targets': y_train},
n_epoch=1000,
batch_size=batch_size,
validation_set=({
'hive': x_val,
'weather': weather_val
},
{'targets': y_val}),
show_metric=False,
shuffle=True,
run_id='poop')
To get at what my objects are:
x_train is an ndarray of shape (n, 64, 64, 1)
weather_train is an ndarray of shape (n, 4)
y_train is an ndarray of shape (n, 1)
Overfitting is another concern, but given that the models perform poorly on the training set, I think I can worry about that later.
To address your concern regarding the same predicted value for all instances in your test set. You have a couple options here that don't involve changing the structure of your conv net:
You can rescale your target variable using sklearn StandardScaler() (which standardizes features by removing the mean and scaling to unit variance)
Scale pixel data; generally performance increases with scaled pixel data, as a rule of thumb always divide pixel data by 255.0 (shown at end of post)
You can play around with learning rate and the error function (the reason that the CNN is outputting the same value for all predictions is because that is what it has determined is the point of minimum error)
Next. If you are trying to perform regression ensure that your final fully connected layer uses a linear activation function instead of sigmoid. A linear activation function takes inputs to the neuron multiplied by the neuron weight and creates an output proportional to the input.
convnet = fully_connected(convnet, 1, activation='linear', scope='fc8')
Lastly. I have recently implemented ResNet50 for regression tasks in Keras. Here is the construction of that network, this version does not permit loading of pretrained weights and it must receive images of shape (224, 224, 3).
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D, MaxPooling2D, DepthwiseConv2D
from keras.layers.core import Activation, Dropout, Dense
from keras.layers import Flatten, Input, Add, ZeroPadding2D, GlobalAveragePooling2D, GlobalMaxPooling2D
from keras.models import Model
from keras import backend
def block1(x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None):
"""
A residual block
:param x: input tensor
:param filters: integer, filters of the bottleneck layer
:param kernel_size: kernel size of bottleneck
:param stride: stride of first layer
:param conv_shortcut: use convolution shortcut if true, otherwise identity shortcut
:param name: string, block label
:return: Output tensor of the residual block
"""
# bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
bn_axis = -1
if conv_shortcut is True:
shortcut = Conv2D(4 * filters, 1, strides=stride, name=name+'_0_conv')(x)
shortcut = BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name+'_0_bn')(shortcut)
else:
shortcut = x
x = Conv2D(filters, 1, strides=stride, name=name+'_1_conv')(x)
x = BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name+'_1_bn')(x)
x = Activation('relu', name=name+'_1_relu')(x)
x = Conv2D(filters, kernel_size, padding='SAME', name=name+'_2_conv')(x)
x = BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name+'_2_bn')(x)
x = Activation('relu', name=name+'_2_relu')(x)
x = Conv2D(4 * filters, 1, name=name+'_3_conv')(x)
x = BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name+'_3_bn')(x)
x = Add(name=name+'_add')([shortcut, x])
x = Activation('relu', name=name+'_out')(x)
return x
def stack1(x, filters, blocks, stride1=2, name=None):
"""
a set of stacked residual blocks
:param x: input tensor
:param filters: int, filters fof the bottleneck layer in the block
:param blocks: int, blocks in the stacked blocks,
:param stride1: stride of the first layer in the first block
:param name: stack label
:return: output tensor for the stacked blocks
"""
x = block1(x, filters, stride=stride1, name=name+'_block1')
for i in range(2, blocks+1):
x = block1(x, filters, conv_shortcut=False, name=name+'_block'+str(i))
return x
def resnet(height, width, depth, stack_fn, use_bias=False, nodes=256):
"""
:param height: height of image, int
:param width: image width, int
:param depth: bn_axis or depth, int
:param stack_fn: function that stacks residual blocks
:param nodes: width of nodes included in top layer of CNN, int
:return: a Keras model instance
"""
input_shape = (height, width, depth)
img_input = Input(shape=input_shape)
x = ZeroPadding2D(padding=((3, 3), (3, 3)), name='conv1_pad')(img_input)
x = Conv2D(64, 7, strides=2, use_bias=use_bias, name='conv1_conv')(x)
x = ZeroPadding2D(padding=((1, 1), (1, 1)), name='pool1_pad')(x)
x = MaxPooling2D(3, strides=2, name='pool1_pool')(x)
x = stack_fn(x)
# top layer
x = GlobalAveragePooling2D(name='avg_pool')(x)
x = Dense(nodes, activation='relu')(x)
# perform regression
x = Dense(1, activation='linear')(x)
model = Model(img_input, x)
return model
def resnet50(height, width, depth, nodes):
def stack_fn(x):
x = stack1(x, 64, 3, stride1=1, name='conv2')
x = stack1(x, 128, 4, name='conv3')
x = stack1(x, 256, 6, name='conv4')
x = stack1(x, 512, 3, name='conv5')
return x
return resnet(height, width, depth, stack_fn, nodes=nodes)
Which can be implemented using some x_train, x_test, y_train, y_test data (where x_train/test is image data and y_train,y_test data are numeric values on the interval [0, 1].
scaler = MinMaxScaler()
images = load_images(df=target, path=PATH_features, resize_shape=(224, 224), quadruple=True)
images = images / 255.0 # scale pixel data to [0, 1]
images = images.astype(np.float32)
imshape = images.shape
target = target[Target]
target = quadruple_target(target, target=Target)
x_train, x_test, y_train, y_test = train_test_split(images, target, test_size=0.3, random_state=101)
y_train = scaler.fit_transform(y_train)
y_test = scaler.transform(y_test)
model = resnet50(imshape[1], imshape[2], imshape[3], nodes=256)
opt = Adam(lr=1e-5, decay=1e-5 / 200)
model.compile(loss=lossFN, optimizer=opt)
history = model.fit(x_train, y_train, validation_data=(x_test, y_test), verbose=1, epochs=200)
pred = model.predict(x_test)
I try to implement a CNN model based on MNIST tutorial on TF website.
Here is my code
import tensorflow as tf
import numpy as np
from tensorflow.contrib import learn
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
def cnn_model_fn(features, labels, mode):
"""Model function for CNN."""
# Input Layer
# Reshape X to 4-D tensor: [batch_size, width, height, channels]
# breaKHis images are 32x32 pixels, and have three color channel
input_layer = tf.reshape(features, [-1, 32, 32, 3])
# Convolutional Layer #1
# Computes 32 features using a 5x5 filter with ReLU activation.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 32, 32, 1]
# Output Tensor Shape: [batch_size, 32, 32, 32]
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
#print conv1.get_shape().as_list()
# Pooling Layer #1
# First max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 32, 32, 32]
# Output Tensor Shape: [batch_size, 16, 16, 32]
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
#print pool1.get_shape().as_list()
# Convolutional Layer #2
# Computes 32 features using a 5x5 filter.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 16, 16, 32]
# Output Tensor Shape: [batch_size, 16, 16, 32]
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
#print conv2.get_shape().as_list()
# Pooling Layer #2
# Second max pooling layer with a 3x3 filter and stride of 2
# Input Tensor Shape: [batch_size, 16, 16, 32]
# Output Tensor Shape: [batch_size, 8, 8, 32]
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
#print pool2.get_shape().as_list()
# Convolutional Layer #3
# Computes 64 features using a 5x5 filter.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 8, 8, 32]
# Output Tensor Shape: [batch_size, 8, 8, 64]
conv3 = tf.layers.conv2d(
inputs=pool2,
filters=64,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
#print conv3.get_shape().as_list()
# Pooling Layer #3
# Second max pooling layer with a 3x3 filter and stride of 2
# Input Tensor Shape: [batch_size, 8, 8, 64]
# Output Tensor Shape: [batch_size, 4, 4, 64]
pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2], strides=2)
# Flatten tensor into a batch of vectors
# Input Tensor Shape: [batch_size, 4, 4, 64]
# Output Tensor Shape: [batch_size, 4 * 4 * 64]
pool3Shape = pool3.get_shape().as_list()
#print pool3Shape
pool2_flat = tf.reshape(pool2, [-1, 4*4*64])
# Dense Layer
# Densely connected layer with 64 neurons
# Input Tensor Shape: [batch_size, 4 * 4 * 64]
# Output Tensor Shape: [batch_size, 64]
dense = tf.layers.dense(inputs=pool2_flat, units=64, activation=tf.nn.relu)
# Add dropout operation; 0.6 probability that element will be kept
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode == learn.ModeKeys.TRAIN)
# Logits layer
# Input Tensor Shape: [batch_size, 64]
# Output Tensor Shape: [batch_size, 2]
logits = tf.layers.dense(inputs=dropout, units=2)
loss = None
train_op = None
# Calculate Loss (for both TRAIN and EVAL modes)
if mode != learn.ModeKeys.INFER:
onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=2)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
# Configure the Training Op (for TRAIN mode)
if mode == learn.ModeKeys.TRAIN:
train_op = tf.contrib.layers.optimize_loss(
loss=loss,
global_step=tf.contrib.framework.get_global_step(),
learning_rate=0.001,
optimizer="SGD")
# Generate Predictions
predictions = {
"classes": tf.argmax(
input=logits, axis=1),
"probabilities": tf.nn.softmax(
logits, name="softmax_tensor")
}
# Return a ModelFnOps object
return model_fn_lib.ModelFnOps(
mode=mode, predictions=predictions, loss=loss, train_op=train_op)
And it throws me the error: InvalidArgumentError (see above for traceback): Assign requires shapes of both tensors to match. lhs shape= [1024,64] rhs shape= [2048,64]
I think there should be something wrong in the last FC layer but don't know where it is.
You can check this answer,
Tensorflow Assign requires shapes of both tensors to match. lhs shape= [20] rhs shape= [48]
Maybe you can install the previous version and try it again.
So I have been looking a bit into Tensorflow and trying to get my head around one thing, i do not know what I am missing. I am checking out the tutorial from https://www.tensorflow.org/versions/master/tutorials/layers and more particularly, from the model building step:
"""Model function for CNN."""
# Input Layer
input_layer = tf.reshape(features, [-1, 28, 28, 1])
# Convolutional Layer #1
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Pooling Layer #1
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# Convolutional Layer #2 and Pooling Layer #2
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# Dense Layer
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
If I understand it correctly, input convulated with 32 # 5x5 filters creates an output of 32 feature maps # 28x28. Then max-pooled reduces the feature maps to width and height = 14x14.
So here is the problem for me to understand, we have 32 # 14x14 feature maps and we convolute with another set of 64 filters # 5x5. Should it produce 32*64=2048 feature maps of size 14x14? So when reshaping it, we should use [-1,7*7*2048] after the last max-pooling step? Or is this convulation in conv2 done with a depth of 32, the filters are sized like [5,5,32]? Maybe I am missing some crucial theory or just blind ;>
Would appriacte if someone could help me understand this!
Cheers!
You are using padding "same".
If padding == "same"
output_shape[i] = ceil(input_shape[i]/stides[i])
If padding == "valid":
output_spatial_shape[i] = ceil((input_spatial_shape[i] - (spatial_filter_shape[i]-1) * dilation_rate[i]) / strides[i]).
To get the behavior you're expecting use padding "valid".
Look at API here : https://www.tensorflow.org/api_docs/python/tf/nn/convolution
I hope this helps.