How do I make the bounding boxes in yolo v3 tighter (closer to the objects)? - tensorflow

I'm following this Repo on creating Yolo v3 model from scratch in PyTorch. The only problem is that the bounding boxes are not as tight (close to the objects) in most images I tried. I compared them to the tutorial on creating Yolo v3 model but using TensorFlow. The tensorflow model produces excellent bounding boxed that are as tight as possible to the objects.
I tried to understand how the calculations are different between the two, but I'm finding myself getting stuck with the differences between torch and tf.
I believe the code for the bounding boxes in the tf tutorial comes from here:
def yolo_layer(inputs, n_classes, anchors, img_size, data_format):
"""Creates Yolo final detection layer.
Detects boxes with respect to anchors.
Args:
inputs: Tensor input.
n_classes: Number of labels.
anchors: A list of anchor sizes.
img_size: The input size of the model.
data_format: The input format.
Returns:
Tensor output.
"""
n_anchors = len(anchors)
inputs = tf.layers.conv2d(inputs, filters=n_anchors * (5 + n_classes),
kernel_size=1, strides=1, use_bias=True,
data_format=data_format)
shape = inputs.get_shape().as_list()
grid_shape = shape[2:4] if data_format == 'channels_first' else shape[1:3]
if data_format == 'channels_first':
inputs = tf.transpose(inputs, [0, 2, 3, 1])
inputs = tf.reshape(inputs, [-1, n_anchors * grid_shape[0] * grid_shape[1],
5 + n_classes])
strides = (img_size[0] // grid_shape[0], img_size[1] // grid_shape[1])
box_centers, box_shapes, confidence, classes = \
tf.split(inputs, [2, 2, 1, n_classes], axis=-1)
x = tf.range(grid_shape[0], dtype=tf.float32)
y = tf.range(grid_shape[1], dtype=tf.float32)
x_offset, y_offset = tf.meshgrid(x, y)
x_offset = tf.reshape(x_offset, (-1, 1))
y_offset = tf.reshape(y_offset, (-1, 1))
x_y_offset = tf.concat([x_offset, y_offset], axis=-1)
x_y_offset = tf.tile(x_y_offset, [1, n_anchors])
x_y_offset = tf.reshape(x_y_offset, [1, -1, 2])
box_centers = tf.nn.sigmoid(box_centers)
box_centers = (box_centers + x_y_offset) * strides
anchors = tf.tile(anchors, [grid_shape[0] * grid_shape[1], 1])
box_shapes = tf.exp(box_shapes) * tf.to_float(anchors)
confidence = tf.nn.sigmoid(confidence)
classes = tf.nn.sigmoid(classes)
inputs = tf.concat([box_centers, box_shapes,
confidence, classes], axis=-1)
return inputs
While the code for the bounding boxes for the pytorch model comes from here, and the explanation:
def bbox_iou(box1, box2):
"""
Returns the IoU of two bounding boxes
"""
#Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[:,0], box1[:,1], box1[:,2], box1[:,3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[:,0], box2[:,1], box2[:,2], box2[:,3]
#get the corrdinates of the intersection rectangle
inter_rect_x1 = torch.max(b1_x1, b2_x1)
inter_rect_y1 = torch.max(b1_y1, b2_y1)
inter_rect_x2 = torch.min(b1_x2, b2_x2)
inter_rect_y2 = torch.min(b1_y2, b2_y2)
#Intersection area
inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * torch.clamp(inter_rect_y2 - inter_rect_y1 + 1, min=0)
#Union Area
b1_area = (b1_x2 - b1_x1 + 1)*(b1_y2 - b1_y1 + 1)
b2_area = (b2_x2 - b2_x1 + 1)*(b2_y2 - b2_y1 + 1)
iou = inter_area / (b1_area + b2_area - inter_area)
return iou
def predict_transform(prediction, inp_dim, anchors, num_classes, CUDA = True):
batch_size = prediction.size(0)
stride = inp_dim // prediction.size(2)
grid_size = inp_dim // stride
bbox_attrs = 5 + num_classes
num_anchors = len(anchors)
prediction = prediction.view(batch_size, bbox_attrs*num_anchors, grid_size*grid_size)
prediction = prediction.transpose(1,2).contiguous()
prediction = prediction.view(batch_size, grid_size*grid_size*num_anchors, bbox_attrs)
anchors = [(a[0]/stride, a[1]/stride) for a in anchors]
#Sigmoid the centre_X, centre_Y. and object confidencce
prediction[:,:,0] = torch.sigmoid(prediction[:,:,0])
prediction[:,:,1] = torch.sigmoid(prediction[:,:,1])
prediction[:,:,4] = torch.sigmoid(prediction[:,:,4])
#Add the center offsets
grid = np.arange(grid_size)
a,b = np.meshgrid(grid, grid)
x_offset = torch.FloatTensor(a).view(-1,1)
y_offset = torch.FloatTensor(b).view(-1,1)
if CUDA:
x_offset = x_offset.cuda()
y_offset = y_offset.cuda()
x_y_offset = torch.cat((x_offset, y_offset), 1).repeat(1,num_anchors).view(-1,2).unsqueeze(0)
prediction[:,:,:2] += x_y_offset
#log space transform height and the width
anchors = torch.FloatTensor(anchors)
if CUDA:
anchors = anchors.cuda()
anchors = anchors.repeat(grid_size*grid_size, 1).unsqueeze(0)
prediction[:,:,2:4] = torch.exp(prediction[:,:,2:4])*anchors
prediction[:,:,5: 5 + num_classes] = torch.sigmoid((prediction[:,:, 5 : 5 + num_classes]))
prediction[:,:,:4] *= stride
return prediction

Related

vectorized way to multiply and add specific axes in numpy array (convolutional layer backprop)

I was wondering how it would be possible to vectorize the following quadruple for-loops (this is t do with backprop in a convolutional layer).
W = np.ones((2, 2, 3, 8)) # just a toy example
dW = np.zeros(W.shape)
dZ = np.ones((10, 4, 4, 8))*2
# get the shapes: m = samples/images; H_dim = Height of image; W_dim = Width of image; 8 = Channels/filters
(m, H_dim, W_dim, C) = dZ.shape
dA_prev = np.zeros((10, 4, 4, 3))
# add symmetric padding of 2 around height and width borders with 0-values; shape is now: (10, 8, 8, 3)
dA_prev = np.pad(dA_prev,((0,0),(2,2),(2,2),(0,0)), mode='constant', constant_values = (0,0))
# loop over images
for i in range(m):
# loop over height
for h in range(H_dim):
# loop over width
for w in range(W_dim):
# loop over channels/filters
for c in range(C):
vert_start = 1 * h # 1 = stride, just as an example
vert_end = vert_start + 2 # 2 = vertical filter size, just as an example
horiz_start = 1 * w # 1 = stride
horiz_end = horiz_start + 2 # 2 = horizontal filter size, just as an example
dW[:,:,:,c] += dA_prev[i, vert_start:vert_end,horiz_start:horiz_end,:] * dZ[i, h, w, c]
dA_prev[i, vert_start:vert_end, horiz_start:horiz_end, :] += W[:,:,:,c] * dZ[i, h, w, c] # dZ[i, h, w, c] is a scalar
doing backprop on the bias is easy enough (db = np.sum(dZ, axis=(0,1,2), keepdims=True)), and the weights can be dealt with using stride tricks and by reshaping the dZ and then using the dot product the rescaled input (or tensordot on the axes or einsum).
def _striding(array, stride_size, filter_shapes, Wout=None, Hout=None):
strides = (array.strides[0], array.strides[1] * stride_size, array.strides[2] * stride_size, array.strides[1], array.strides[2], array.strides[3])
strided = as_strided(array, shape=(array.shape[0], Hout, Wout, filter_shapes[0], filter_shapes[1], array.shape[3]), strides=strides, writeable=False)
return strided
Hout = (A_prev.shape[1] - 2) // 1 + 1
Wout = (A_prev.shape[2] - 2) // 1 + 1
x_flat = _striding(array=A_prev, stride_size=2, filter_shapes=(2,2),
Wout=Wout, Hout=Hout).reshape(-1, 2 * 2 * A_prev.shape[3])
dout_descendant_flat = dout_descendant.reshape(-1, n_C)
dW = x_flat.T # dout_descendant_flat # shape (fh * fw * n_prev_C, C)
dW = dW.reshape(fh, fw, n_prev_C, C)
this gives identical results as dW in the slow version. but doing something similar to get the derivative wrt to the input that should yield the same result, doesn't. here's what i've done:
dZ_pad = np.pad(dZ,((0,0),(2,2),(2,2),(0,0)), mode='constant', constant_values = (0,0)) # padding to get the same shape as A_prev
dZ_pad_reshaped = _striding(array=dZ_pad, stride_size=1, filter_shapes=(2,2),
Wout=4, Hout=4) # the Hout and Wout dims are from the unpadded dims of A_prev
Wrot180 = np.rot90(W, 2, axes=(0,1)) # the filter height and width are in the first two axes, which we want to rotate
dA_prev = np.tensordot(dZ_pad_reshaped, Wrot180, axes=([3,4,5],[0,1,3]))
the shapes of dA_prev are right, but for some reason the results aren't identical as the slow version
OK, turns out the error was to do with several things:
dZ needed to be dilated relative to the stride in the forward propagation
the window function used for windowing dZ (done after dilation of dZ) needed to be called with stride 1 (no matter the stride choice in the forward propagation) with the output heights and widths of the padded input (not the original, unpadded input -- this was the main mistake that took me days to debug)
the relevant code is below with comments explaining shapes and operations as well as some further sources for reading. i've also included the forward propagation.
i should note that after days of debugging, writing various functions, reading etc. the variable names changed after a while, so for the ease of reading, here are the names of the variables as defined in my question and then their equivalent in the code below:
A_prev is x
dZ is dout_descendant
Hout is the height of dout_descendant
Wout is the width of dout_descendant
(as one would expect all references to self are to the class these functions are part of)
def _pad(self, array, pad_size, pad_val):
'''
only symmetric padding is implemented
'''
return np.pad(array, ((0, 0), (pad_size, pad_size), (pad_size, pad_size), (0, 0)), 'constant', constant_values=(pad_val, pad_val))
def _dilate(self, array, stride_size, pad_size, symmetric_filter_shape, output_image_size):
# on dilation for backprop with stride>1,
# see: https://medium.com/#mayank.utexas/backpropagation-for-convolution-with-strides-8137e4fc2710
# see also: https://leimao.github.io/blog/Transposed-Convolution-As-Convolution/
pad_bottom_and_right = (output_image_size + 2 * pad_size - symmetric_filter_shape) % stride_size
for m in range(stride_size - 1):
array = np.insert(array, range(1, array.shape[1], m + 1), 0, axis=1)
array = np.insert(array, range(1, array.shape[2], m + 1), 0, axis=2)
for _ in range(pad_bottom_and_right):
array = np.insert(array, array.shape[1], 0, axis=1)
array = np.insert(array, array.shape[2], 0, axis=2)
return array
def _windows(self, array, stride_size, filter_shapes, out_height, out_width):
'''
inputs:
array to create windows of
stride_size: int
filter_shapes: tuple(int): tuple of filter height and width
out_height and out_width: int, respectively: output sizes for the windows
returns:
windows of array with shape (excl. dilation):
array.shape[0], out_height, out_width, filter_shapes[0], filter_shapes[1], array.shape[3]
'''
strides = (array.strides[0], array.strides[1] * stride_size, array.strides[2] * stride_size, array.strides[1], array.strides[2], array.strides[3])
return np.lib.stride_tricks.as_strided(array, shape=(array.shape[0], out_height, out_width, filter_shapes[0], filter_shapes[1], array.shape[3]), strides=strides, writeable=False)
def forward(self, x):
'''
expects inputs to be of shape: [batchsize, height, width, channel in]
after init, filter_shapes are: [fh, fw, channel in, channel out]
'''
self.input_shape = x.shape
x_pad = self._pad(x, self.pad_size, self.pad_val)
self.input_pad_shape = x_pad.shape
# get the shapes
batch_size, h, w, Cin = self.input_shape
# calculate output sizes; only symmetric padding is possible
self.Hout = (h + 2*self.pad_size - self.fh) // self.stride + 1
self.Wout = (w + 2*self.pad_size - self.fw) // self.stride + 1
x_windows = self._windows(array=x_pad, stride_size=self.stride, filter_shapes=(self.fh, self.fw),
out_width=self.Wout, out_height=self.Hout) # 2D matrix with shape (batch_size, Hout, Wout, fh, fw, Cin)
self.out = np.tensordot(x_windows, self.w, axes=([3,4,5], [0,1,2])) + self.b
self.inputs = x_windows
## alternative 1: einsum approach, slower than other alternatives
# self.out = np.einsum('noufvc,fvck->nouk', x_windows, self.w) + self.b
## alternative 2: column approach with simple dot product
# z = x_windows.reshape(-1, self.fh * self.fw * Cin) # self.W.reshape(self.fh*self.fw*Cin, Cout) + self.b # 2D matrix with shape (batch_size * Hout * Wout, Cout)
# self.dout = z.reshape(batch_size, Hout, Wout, Cout)
def backward(self,dout_descendant):
'''
dout_descendant has shape (batch_size, Hout, Wout, Cout)
'''
# get shapes
batch_size, h, w, Cin = self.input_shape
# we want to sum everything but the filters for b
self.db = np.sum(dout_descendant, axis=(0,1,2), keepdims=True) # shape (1,1,1, Cout)
# for dW we'll use the column approach with ordinary dot product for variety ;) tensordot does the same without all the reshaping
dout_descendant_flat = dout_descendant.reshape(-1, self.Cout) # new shape (batch_size * Hout * Wout, Cout)
x_flat = self.inputs.reshape(-1, self.fh * self.fw * Cin) # shape (batch_size * Hout * Wout, fh * fw * Cin)
dw = x_flat.T # dout_descendant_flat # shape (fh * fw * Cin, Cout)
self.dw = dw.reshape(self.fh, self.fw, Cin, self.Cout)
del dout_descendant_flat # free memory
# for dinputs: we'll get padded and dilated windows of dout_descendant and perform the tensordot with 180 rotated W
# for details, see https://medium.com/#mayank.utexas/backpropagation-for-convolution-with-strides-8137e4fc2710 ; also: https://pavisj.medium.com/convolutions-and-backpropagations-46026a8f5d2c ; also: https://youtu.be/Lakz2MoHy6o?t=835
Wrot180 = np.rot90(self.w, 2, axes=(0,1)) # or also self.w[::-1, ::-1, :, :]
# backprop for forward with stride > 1 is done on windowed dout that's padded and dilated with stride 1
dout_descendant = self._dilate(dout_descendant, stride_size=self.stride, pad_size=self.pad_size, symmetric_filter_shape=self.fh, output_image_size=h)
dout_descendant = self._pad(dout_descendant, pad_size=self.fw-1, pad_val=self.pad_val) # pad dout_descendant to dim: fh-1 (or fw-1); only symmetrical filters are supported
dout_descendant = self._windows(array=dout_descendant, stride_size=1, filter_shapes=(self.fh, self.fw),
out_height=h + 2 * self.pad_size, out_width=w + 2 * self.pad_size) # shape: (batch_size * h_padded * w_padded, fh * fw * Cout)
self.dout = np.tensordot(dout_descendant, Wrot180, axes=([3,4,5],[0,1,3]))
self.dout = self.dout[:,self.pad_size:-self.pad_size, self.pad_size:-self.pad_size, :]
## einsum alternative, but slower:
# dinput = np.einsum('nhwfvk,fvck->nhwc', dout_windows, self.W)
i've left this answer here, because all the other sources on stackoverflow or github i could find that used numpy stride tricks were implemented for convolutions of stride 1 (which doesn't require dilation of dZ) or they used very complex fancy indexing operations that were extremely hard to follow (e.g. https://sgugger.github.io/convolution-in-depth.html#convolution-in-depth or https://github.com/parasdahal/deepnet/blob/51a9e61c351138b7dc637f4b748a0e6ca2e15595/deepnet/im2col.py)

draw a line in tensorflow

I want to create a human pose skeleton estimation network and for this, I have a two-part network, first part generates 16 heatmaps as output(each heatmap for different joint and hence a key point can be extracted), using these 16 key points I wish to create a human skeleton and feed it to second half of my network. My problem is, how do I draw lines between the key points to create the skeleton? I couldn't find a way to do it on a tensor object using tensorflow or keras.
I know i'm a bit late but here is some code that I think does what you're after (in TFv2.3). Hopefully it will save someone time in the future!
It uses solely tensorflow ops, so you can use it in data loaders etc. The real pain here is that Tensorflow doesn't allow Eager Assignment, so you can't just update tensors by index. This works around that by creating two sparse tensors, one for the mask (where to apply the line) and another for the new_values (what value to apply at the line). The code for simply designing the line might not be applicable in your case (based on https://stackoverflow.com/a/47381058) but ported away from numpy.
import tensorflow as tf
def trapez(y, y0, w):
return tf.clip_by_value(tf.minimum(y + 1 + w/2 - y0, -y + 1 + w/2 + y0), 0, 1)
def apply_output(img, yy, xx, val):
stack = tf.stack([yy, xx], axis=1)
stack = tf.cast(stack, tf.int64)
values = tf.ones(stack.shape[0], tf.float32)
mask = tf.sparse.SparseTensor(indices=stack, values=values, dense_shape=img.shape)
mask = tf.sparse.reorder(mask)
mask = tf.sparse.to_dense(mask)
mask = tf.cast(mask, tf.float32)
new_values = tf.sparse.SparseTensor(indices=stack, values=val, dense_shape=img.shape)
new_values = tf.sparse.reorder(new_values)
new_values = tf.sparse.to_dense(new_values)
img = img * (1 - mask) + new_values * mask
img = tf.cast(tf.expand_dims(img * 255, axis=-1), tf.uint8)
return img
def weighted_line(img, r0, c0, r1, c1, w):
output = img
x = tf.range(c0, c1 + 1, dtype=tf.float32)
slope = (r1-r0) / (c1-c0)
w *= tf.sqrt(1 + tf.abs(slope)) / 2
y = x * slope + (c1*r0-c0*r1) / (c1-c0)
thickness = tf.math.ceil(w/2)
yy = (tf.reshape(tf.math.floor(y), [-1, 1]) + tf.reshape(tf.range(-thickness-1, thickness+2), [1, -1]))
xx = tf.repeat(x, yy.shape[1])
values = tf.reshape(trapez(yy, tf.reshape(y, [-1, 1]), w), [-1])
yy = tf.reshape(yy, [-1])
limits_y = tf.math.logical_and(yy >= 0, yy < img.shape[0])
limits_x = tf.math.logical_and(xx >= 0, xx < img.shape[1])
limits = tf.math.logical_and(limits_y, limits_x)
limits = tf.math.logical_and(limits, values > 0)
yy = tf.cast(yy[limits], tf.float32)
xx = tf.cast(xx[limits], tf.float32)
return yy, xx, values[limits], apply_output(output, yy, xx, values[limits])
Just for a sanity check, you can call it with the following, and display it using opencv
if __name__ == "__main__":
IMG = tf.zeros((500, 500), tf.float32)
yy, xx, vals, FINAL_IMG = weighted_line(IMG, 10, 20, 100, 200, 5)
jpeg_string = tf.io.encode_jpeg(FINAL_IMG)
tf.io.write_file("output.jpg", jpeg_string)
import cv2
img = cv2.imread("output.jpg")
cv2.imshow("Output", img)
cv2.waitKey(0)

How to display the convolution filters used on a CNN with Tensorflow?

I would like to produce figures similar to this one:
To do that, with Tensorflow I load my model and then, using this code I am about to select the variable with filters from one layer :
# search for the name of the specific layer with the filters I want to display
for v in tf.trainable_variables():
print(v.name)
# store the filters into a variable
var = [v for v in tf.trainable_variables() if v.name == "model/center/kernel:0"][0]
doing var.eval() I am able to store var into a numpy array.
This numpy array have this shape: (3, 3, 512, 512) which correspond to the kernel size: 3x3 and the number of filters: 512.
My problem is the following: How can I extract 1 filter from this 3,3,512,512 array to display it ? If I understand how to do that, I will find how to display the 512 filters
Since you are using Tensorflow, you might be using tf.keras.Sequential for building the CNN Model, and model.summary() gives the names of all the Layers, along with Shapes, as shown below:
Once you have the Layer Name, you can Visualize the Convolutional Filters of that Layer of CNN as shown in the code below:
#-------------------------------------------------
#Utility function for displaying filters as images
#-------------------------------------------------
def deprocess_image(x):
x -= x.mean()
x /= (x.std() + 1e-5)
x *= 0.1
x += 0.5
x = np.clip(x, 0, 1)
x *= 255
x = np.clip(x, 0, 255).astype('uint8')
return x
#---------------------------------------------------------------------------------------------------
#Utility function for generating patterns for given layer starting from empty input image and then
#applying Stochastic Gradient Ascent for maximizing the response of particular filter in given layer
#---------------------------------------------------------------------------------------------------
def generate_pattern(layer_name, filter_index, size=150):
layer_output = model.get_layer(layer_name).output
loss = K.mean(layer_output[:, :, :, filter_index])
grads = K.gradients(loss, model.input)[0]
grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)
iterate = K.function([model.input], [loss, grads])
input_img_data = np.random.random((1, size, size, 3)) * 20 + 128.
step = 1.
for i in range(80):
loss_value, grads_value = iterate([input_img_data])
input_img_data += grads_value * step
img = input_img_data[0]
return deprocess_image(img)
#------------------------------------------------------------------------------------------
#Generating convolution layer filters for intermediate layers using above utility functions
#------------------------------------------------------------------------------------------
layer_name = 'conv2d_4'
size = 299
margin = 5
results = np.zeros((8 * size + 7 * margin, 8 * size + 7 * margin, 3))
for i in range(8):
for j in range(8):
filter_img = generate_pattern(layer_name, i + (j * 8), size=size)
horizontal_start = i * size + i * margin
horizontal_end = horizontal_start + size
vertical_start = j * size + j * margin
vertical_end = vertical_start + size
results[horizontal_start: horizontal_end, vertical_start: vertical_end, :] = filter_img
plt.figure(figsize=(20, 20))
plt.savefig(results)
The above code Visualizes only 64 filters of a Layer. You can change it accordingly.
For more information, you can refer this article.

Why does the cost in my implementation of a deep neural network increase after a few iterations?

I am a beginner in machine learning and neural networks. Recently, after watching Andrew Ng's lectures on deep learning, I tried to implement a binary classifier using deep neural networks on my own.
However, the cost of the function is expected to decrease after each iteration.
In my program, it decreases slightly in the beginning, but rapidly increases later. I tried to make changes in learning rate and number of iterations, but to no avail. I am very confused.
Here is my code
1. Neural network classifier class
class NeuralNetwork:
def __init__(self, X, Y, dimensions, alpha=1.2, iter=3000):
self.X = X
self.Y = Y
self.dimensions = dimensions # Including input layer and output layer. Let example be dimensions=4
self.alpha = alpha # Learning rate
self.iter = iter # Number of iterations
self.length = len(self.dimensions)-1
self.params = {} # To store parameters W and b for each layer
self.cache = {} # To store cache Z and A for each layer
self.grads = {} # To store dA, dZ, dW, db
self.cost = 1 # Initial value does not matter
def initialize(self):
np.random.seed(3)
# If dimensions is 4, then layer 0 and 3 are input and output layers
# So we only need to initialize w1, w2 and w3
# There is no need of w0 for input layer
for l in range(1, len(self.dimensions)):
self.params['W'+str(l)] = np.random.randn(self.dimensions[l], self.dimensions[l-1])*0.01
self.params['b'+str(l)] = np.zeros((self.dimensions[l], 1))
def forward_propagation(self):
self.cache['A0'] = self.X
# For last layer, ie, the output layer 3, we need to activate using sigmoid
# For layer 1 and 2, we need to use relu
for l in range(1, len(self.dimensions)-1):
self.cache['Z'+str(l)] = np.dot(self.params['W'+str(l)], self.cache['A'+str(l-1)]) + self.params['b'+str(l)]
self.cache['A'+str(l)] = relu(self.cache['Z'+str(l)])
l = len(self.dimensions)-1
self.cache['Z'+str(l)] = np.dot(self.params['W'+str(l)], self.cache['A'+str(l-1)]) + self.params['b'+str(l)]
self.cache['A'+str(l)] = sigmoid(self.cache['Z'+str(l)])
def compute_cost(self):
m = self.Y.shape[1]
A = self.cache['A'+str(len(self.dimensions)-1)]
self.cost = -1/m*np.sum(np.multiply(self.Y, np.log(A)) + np.multiply(1-self.Y, np.log(1-A)))
self.cost = np.squeeze(self.cost)
def backward_propagation(self):
A = self.cache['A' + str(len(self.dimensions) - 1)]
m = self.X.shape[1]
self.grads['dA'+str(len(self.dimensions)-1)] = -(np.divide(self.Y, A) - np.divide(1-self.Y, 1-A))
# Sigmoid derivative for final layer
l = len(self.dimensions)-1
self.grads['dZ' + str(l)] = self.grads['dA' + str(l)] * sigmoid_prime(self.cache['Z' + str(l)])
self.grads['dW' + str(l)] = 1 / m * np.dot(self.grads['dZ' + str(l)], self.cache['A' + str(l - 1)].T)
self.grads['db' + str(l)] = 1 / m * np.sum(self.grads['dZ' + str(l)], axis=1, keepdims=True)
self.grads['dA' + str(l - 1)] = np.dot(self.params['W' + str(l)].T, self.grads['dZ' + str(l)])
# Relu derivative for previous layers
for l in range(len(self.dimensions)-2, 0, -1):
self.grads['dZ'+str(l)] = self.grads['dA'+str(l)] * relu_prime(self.cache['Z'+str(l)])
self.grads['dW'+str(l)] = 1/m*np.dot(self.grads['dZ'+str(l)], self.cache['A'+str(l-1)].T)
self.grads['db'+str(l)] = 1/m*np.sum(self.grads['dZ'+str(l)], axis=1, keepdims=True)
self.grads['dA'+str(l-1)] = np.dot(self.params['W'+str(l)].T, self.grads['dZ'+str(l)])
def update_parameters(self):
for l in range(1, len(self.dimensions)):
self.params['W'+str(l)] = self.params['W'+str(l)] - self.alpha*self.grads['dW'+str(l)]
self.params['b'+str(l)] = self.params['b'+str(l)] - self.alpha*self.grads['db'+str(l)]
def train(self):
np.random.seed(1)
self.initialize()
for i in range(self.iter):
#print(self.params)
self.forward_propagation()
self.compute_cost()
self.backward_propagation()
self.update_parameters()
if i % 100 == 0:
print('Cost after {} iterations is {}'.format(i, self.cost))
2. Testing code for odd or even number classifier
import numpy as np
from main import NeuralNetwork
X = np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]])
Y = np.array([[1, 0, 1, 0, 1, 0, 1, 0, 1, 0]])
clf = NeuralNetwork(X, Y, [1, 1, 1], alpha=0.003, iter=7000)
clf.train()
3. Helper Code
import math
import numpy as np
def sigmoid_scalar(x):
return 1/(1+math.exp(-x))
def sigmoid_prime_scalar(x):
return sigmoid_scalar(x)*(1-sigmoid_scalar(x))
def relu_scalar(x):
if x > 0:
return x
else:
return 0
def relu_prime_scalar(x):
if x > 0:
return 1
else:
return 0
sigmoid = np.vectorize(sigmoid_scalar)
sigmoid_prime = np.vectorize(sigmoid_prime_scalar)
relu = np.vectorize(relu_scalar)
relu_prime = np.vectorize(relu_prime_scalar)
Output
I believe your cross-entropy derivative is wrong. Instead of this:
# WRONG!
self.grads['dA'+str(len(self.dimensions)-1)] = -(np.divide(self.Y, A) - np.divide(1-self.Y, A))
... do this:
# CORRECT
self.grads['dA'+str(len(self.dimensions)-1)] = np.divide(A - self.Y, (1 - A) * A)
See these lecture notes for the details. I think you meant the formula (5), but forgot 1-A. Anyway, use formula (6).

Tensorflow - apply function over 1D Tensor

I have a function dice
def dice(yPred,yTruth,thresh):
smooth = tf.constant(1.0)
threshold = tf.constant(thresh)
yPredThresh = tf.to_float(tf.greater_equal(yPred,threshold))
mul = tf.mul(yPredThresh,yTruth)
intersection = 2*tf.reduce_sum(mul) + smooth
union = tf.reduce_sum(yPredThresh) + tf.reduce_sum(yTruth) + smooth
dice = intersection/union
return dice, yPredThresh
which works. An example is given here
with tf.Session() as sess:
thresh = 0.5
print("Dice example")
yPred = tf.constant([0.1,0.9,0.7,0.3,0.1,0.1,0.9,0.9,0.1],shape=[3,3])
yTruth = tf.constant([0.0,1.0,1.0,0.0,0.0,0.0,1.0,1.0,1.0],shape=[3,3])
diceScore, yPredThresh= dice(yPred=yPred,yTruth=yTruth,thresh= thresh)
diceScore_ , yPredThresh_ , yPred_, yTruth_ = sess.run([diceScore,yPredThresh,yPred, yTruth])
print("\nScore = {0}".format(diceScore_))
>>> Score = 0.899999976158
I would like to be able to loop over the third arguement of dice, thresh. I do not know the best way to do this such that I can extract it from the graph. Something along the lines of the following...
def diceROC(yPred,yTruth,thresholds=np.linspace(0.1,0.9,20)):
thresholds = thresholds.astype(np.float32)
nThreshs = thresholds.size
diceScores = tf.zeros(shape=nThreshs)
for i in xrange(nThreshs):
score,_ = dice(yPred,yTruth,thresholds[i])
diceScores[i] = score
return diceScores
Evaluating diceScoreROC yields the error 'Tensor' object does not support item assignment as I can't loop into and slice a tf tensor apparently.
Instead of the loop, I would encourage you to use broadcasting abilities of tensorflow. If you redefine dice to:
def dice(yPred,yTruth,thresh):
smooth = tf.constant(1.0)
yPredThresh = tf.to_float(tf.greater_equal(yPred,thresh))
mul = tf.mul(yPredThresh,yTruth)
intersection = 2*tf.reduce_sum(mul, [0, 1]) + smooth
union = tf.reduce_sum(yPredThresh, [0, 1]) + tf.reduce_sum(yTruth, [0, 1]) + smooth
dice = intersection/union
return dice, yPredThresh
You will be able to pass 3-dimensional yPred and yTruth (assuming the tensors will be just repeated along the last dimension) and 1-dimensional thresh:
with tf.Session() as sess:
thresh = [0.1,0.9,20, 0.5]
print("Dice example")
yPred = tf.constant([0.1,0.9,0.7,0.3,0.1,0.1,0.9,0.9,0.1],shape=[3,3,1])
ypred_tiled = tf.tile(yPred, [1,1,4])
yTruth = tf.constant([0.0,1.0,1.0,0.0,0.0,0.0,1.0,1.0,1.0],shape=[3,3,1])
ytruth_tiled = tf.tile(yTruth, [1,1,4])
diceScore, yPredThresh= dice(yPred=ypred_tiled,yTruth=ytruth_tiled,thresh= thresh)
diceScore_ = sess.run(diceScore)
print("\nScore = {0}".format(diceScore_))
You'll get:
Score = [ 0.73333335 0.77777779 0.16666667 0.89999998]