I ran the TensorFlow profiler using Python and got several types and operations.
Is there a way to change the name of the type and make it output with the changed name when profiling is performed? For example, I would like to know if there is a way to rename a type named Conv2D to conv2D_LOVE so that it is output to the profiler.
I'm currently searching, but I can't find the right way.
For example, I use the Alexnet model, and after profiling I got this output:
There are many of the class inherits that are allowed for the customizations. One is the Dense and LSTM layer. They are suitable to perform tasks internally and return you specific data as blocks of code.
Sample:
import tensorflow as tf
import matplotlib.pyplot as plt
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Class / Functions
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
class My_3D_noises_generator(tf.keras.layers.Layer):
def __init__(self, num_outputs):
super(My_3D_noises_generator, self).__init__()
self.num_outputs = num_outputs
def build(self, input_shape):
self.kernel = self.add_weight("kernel",
shape=[int(input_shape[-1]),
self.num_outputs],
initializer=tf.ones_initializer())
def call(self, inputs):
pi = 3.141592653589793
start = 0.0
stop = 1.0 * 2.0 * pi
x = tf.linspace( start, stop, self.num_outputs, name='linspace', axis=0 )
y1 = 3 * tf.math.sin( x )
escape_sine = tf.random.normal(
shape=( self.num_outputs, ),
mean=0.0,
stddev=0.15 * tf.math.abs( y1, name='abs' ),
dtype=tf.dtypes.float32,
seed=32,
name=None
)
y1 = tf.concat( (tf.zeros(60), y1 + escape_sine, tf.zeros(60)), axis=0, name='concat' )
initial_degree = tf.experimental.numpy.arange( -3, 0, 3 / 60, dtype=tf.float32 )
midring_degree = tf.experimental.numpy.arange( 0, 3 * 2 * pi, ( 3 * 2 * pi) / self.num_outputs, dtype=tf.float32 )
skipped_degree = tf.experimental.numpy.arange( 3 * 2 * pi, 3 * 2 * pi + 3, ( 3 * 2 * pi - 3 * 2 * pi + 3 ) / 60, dtype=tf.float32 )
x = tf.concat(( initial_degree.numpy(), midring_degree.numpy(), skipped_degree.numpy()), axis=0, name='concat')
y2 = 0.1 * x + 1
y = y1 + y2
z = 15 * tf.random.normal(
shape=( 1, self.num_outputs, ),
mean=0.0,
stddev=1,
dtype=tf.dtypes.float32,
seed=32,
name=None
)
x = tf.expand_dims(x, axis=0)
y = tf.expand_dims(y, axis=0)
z = tf.matmul(inputs, z)
x = tf.matmul(inputs, x)
y = tf.matmul(inputs, y)
x = x[int(tf.math.argmax(x, axis=0)[0])]
y = y[int(tf.math.argmax(y, axis=0)[0])]
z = z[int(tf.math.argmax(z, axis=0)[0])]
x = tf.expand_dims(x, axis=-1)
y = tf.expand_dims(y, axis=-1)
z = tf.expand_dims(z, axis=-1)
return x, y, z
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Perform operations
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
start = 3
limit = 33
delta = 3
sample = tf.range(start, limit, delta)
sample = tf.cast( sample, dtype=tf.float32 )
sample = tf.constant( sample, shape=( 10, 1 ) )
layer = My_3D_noises_generator(100)
xdata, ydata, zdata = layer(sample)
ax = plt.axes(projection='3d')
# Data for a three-dimensional line
zline = tf.range(0, 1000, 25)
zline = tf.cast( zline, dtype=tf.float32 )
xline = 20 * tf.math.sin(zline)
yline = 20 * tf.math.cos(zline)
ax.plot3D(xline, yline, zline, 'gray')
ax.scatter3D(xdata[0:100,:], ydata[0:100,:], zdata[0:100,:], c=zdata[0:100,:], cmap='Greens');
plt.show()
Output: Generated random noises in 3D try to catches them.
Related
I am trying to code a neural network using only numpy and pandas. I am having issues with the dimension of my data. I am getting the error "ValueError: operands could not be broadcast together with shapes (150,) (150,3)
." Not sure what the alternative is here, as we are trying to predict one of the three types of flower based on 4 numerical values. Here is my code:
import pandas as pd
class NeuralNet():
def __init__(self, i_dim, h_dim, o_dim, lr):
self.i_dim = i_dim
self.h_dim = h_dim
self.o_dim = o_dim
self.lr = lr
self.weights1 = np.random.randn(self.i_dim, self.h_dim) / np.sqrt(self.i_dim)
self.bias1 = np.zeros((1, self.h_dim))
self.weights2 = np.random.randn(self.h_dim, self.o_dim) / np.sqrt(self.h_dim)
self.bias2 = np.zeros((1, self.o_dim))
def sigmoid(self, x):
return 1 / (1 + np.exp(-x))
def softmax(self, x):
exps = np.exp(x - np.max(x, axis=1, keepdims=True))
return exps / np.sum(exps, axis=1, keepdims=True)
def forward(self, X):
self.layer1 = self.sigmoid(np.dot(X, self.weights1) + self.bias1)
self.layer2 = self.softmax(np.dot(self.layer1, self.weights2) + self.bias2)
return self.layer2
def sigmoid_derivative(self, x):
return x * (1 - x)
def softmax_derivative(self, x):
s = x.reshape(-1, 1)
return np.diagflat(s) - np.dot(s, s.T)
def backward(self, X, y, y_hat):
d_softmax = self.softmax_derivative(y_hat)
d_sigmoid = self.sigmoid_derivative(self.layer1)
d_weights2 = np.dot(self.layer1.T, (2 * (y - y_hat) * d_softmax))
d_bias2 = np.sum(2 * (y - y_hat) * d_softmax, axis=0, keepdims=True)
d_weights1 = np.dot(X.T, (np.dot(2 * (y - y_hat) * d_softmax, self.weights2.T) * d_sigmoid))
d_bias1 = np.sum(np.dot(2 * (y - y_hat) * d_softmax, self.weights2.T) * d_sigmoid, axis=0)
self.weights1 -= self.lr * d_weights1
self.bias1 -= self.lr * d_bias1
self.weights2 -= self.lr * d_weights2
self.bias2 -= self.lr * d_bias2
def cross_ent_loss(self):
sample_losses = - self.y * np.log(self.y_hat) - (1 - self.y) * np.log(1 - self.y_hat)
loss = np.mean(sample_losses)
return loss
def train(self, X, y, epochs):
for epoch in range(epochs):
y_hat = self.forward(X)
self.backward(X, y, y_hat)
loss = self.cross_ent_loss()
print(f"Epoch {epoch}: Loss = {loss}")
if epoch % 10 == 0:
print(f"Epoch {epoch}: Loss = {loss}")
def predict(self, X):
return self.forward(X)
df = pd.read_csv('/Users/brasilgu/PycharmProjects/NNfs/venv/lib/iris.data.txt', header=None)
X_train = df.iloc[:, :4].values
y_train = df.iloc[:, -1].values
nn = NeuralNet(4, 5, 3, 0.1)
nn.train(X_train, y_train, 1000)
y_pred = nn.predict(X_train)
y_pred_labels = np.argmax(y_pred, axis=1)
print(y_pred) ```
The stacktrace of the error:
``` Traceback (most recent call last):
File "/Users/brasilgu/PycharmProjects/NNfs/venv/lib/neural_net.py", line 72, in <module>
nn.train(X_train, y_train, 1000)
File "/Users/brasilgu/PycharmProjects/NNfs/venv/lib/neural_net.py", line 57, in train
self.backward(X, y, y_hat)
File "/Users/brasilgu/PycharmProjects/NNfs/venv/lib/neural_net.py", line 39, in backward
d_weights2 = np.dot(self.layer1.T, (2 * (y - y_hat) * d_softmax))
ValueError: operands could not be broadcast together with shapes (150,) (150,3)```
I saw the publicly available iris dataset and according to your code, the y seems to be a rank one matrix with shape (150, ).
So modify your y_train as y_train = y_train.reshape(-1, 1) to make it a proper matrix before creating the NeuralNet
I'm trying to solve quite a simple task (I thought it to be), which is replicating a tensor in custom layer on TPU.
My input is 2 tensors of shapes A=(BS, H, n, C) and B = (BS, n, W, C), where n in my case can be (1, 3, 5, 7), but should probably also work with other numbers.
My task is to repeat both tensors A & B to shape (BS, H, W, C) and them sum them for the output. It would be easy if H (or W) were always divisible by n, but they are not. So the number of repeats for each slice (BS, H, 1, C) of A would differ. Thus the output is calculated using the following pseudocode:
for i in range(W):
A1[BS, H, i, C] = A[BS, H, floor(n*i/W), C]
I tried implementing it in a multiple ways:
class StripPoolingCombine(tf.keras.layers.Layer):
def __init__(self, n=1):
super(StripPoolingCombine, self).__init__()
self.n = n
def call(self, v, h, training=False):
H, W = v.shape[1], h.shape[2]
v_repeats = tf.unique_with_counts(tf.math.floor(tf.range(W) * self.n / W))[-1]
h_repeats = tf.unique_with_counts(tf.math.floor(tf.range(H) * self.n / H))[-1]
v = tf.repeat(v, repeats=v_repeats, axis=2)
h = tf.repeat(h, repeats=h_repeats, axis=1)
return Add()([v, h])
Or by replacing unique_with_counts with the following logic:
tf.math.bincount(tf.cast(tf.math.floor(tf.range(W) * self.n / W), dtype=tf.int32)
Using improvised formula:
f = tf.cast(tf.math.ceil(W / self.n), dtype=tf.int32)
s = tf.cast(tf.math.floor(W / self.n), dtype=tf.int32)
b = tf.cast(f!=s, dtype=tf.int32)
r = W - f - s * (self.n - 1)
x1 = s * tf.ones(self.n-1, dtype=tf.int32)
x2 = (1 - tf.range(r*2) % 2) * b
x2 = tf.pad(x2, paddings=[[0, self.n-r*2-1]])
x3 = tf.concat([[f], tf.add(x1, x2)], axis=0)
But as could be seen at Available TensorFlow Ops for TPU, it doesn't support dynamic tf.range, tf.unique_with_counts or tf.math.bincount, and my implementations all result in errors when bulding a model and calling model.fit() or model.predict(). Yet I still hope that tensorflow has provided some way to work with dynamic shapes in a way that would suit my task, and won't me rewrite whole Ops module for such a trivial issue. Please, help!
Full reproducible example (using Colab TPU):
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Add
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
print(f'Running on TPU: {tpu.master()}')
except ValueError:
print('Could not connect to TPU')
tpu = None
if tpu:
try:
print('Initializing TPU...')
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.TPUStrategy(tpu)
print('TPU initialized!')
except Exception:
print('Failed to initialize TPU')
# class StripPoolingCombine(tf.keras.layers.Layer):
# def __init__(self, n=1):
# super(StripPoolingCombine, self).__init__()
# self.n = n
# def call(self, v, h, training=False):
# H, W = v.shape[1], h.shape[2]
# v_repeats = tf.unique_with_counts(tf.math.floor(tf.range(W) * self.n / W))[-1]
# h_repeats = tf.unique_with_counts(tf.math.floor(tf.range(H) * self.n / H))[-1]
# v = tf.repeat(v, repeats=v_repeats, axis=2)
# h = tf.repeat(h, repeats=h_repeats, axis=1)
# return Add()([v, h])
class StripPoolingCombine(tf.keras.layers.Layer):
def __init__(self, n=1):
super(StripPoolingCombine, self).__init__()
self.n = n
def call(self, v, h, training=False):
H, W = tf.shape(v)[1], tf.shape(h)[2]
f = tf.cast(tf.math.ceil(W / self.n), dtype=tf.int32)
s = tf.cast(tf.math.floor(W / self.n), dtype=tf.int32)
b = tf.cast(f!=s, dtype=tf.int32)
r = W - f - s * (self.n - 1)
x1 = s * tf.ones(self.n-1, dtype=tf.int32)
x2 = (1 - tf.range(r*2) % 2) * b
x2 = tf.pad(x2, paddings=[[0, self.n-r*2-1]])
x3 = tf.concat([[f], tf.add(x1, x2)], axis=0)
v = tf.repeat(v, repeats=x3, axis=2)
h = tf.repeat(h, repeats=x3, axis=1)
output = tf.add(v, h)
return output
def build_model(n=7):
v = Input(shape=(256, n, 3))
h = Input(shape=(n, 256, 3))
outputs = StripPoolingCombine()(v, h)
model = Model(inputs=[v, h], outputs=outputs)
return model
tf.keras.backend.clear_session()
with strategy.scope():
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-4, beta_1=0.9, beta_2=0.999)
model = build_model()
model.compile(optimizer=optimizer, loss='mean_squared_error')
rng_1 = tf.random.uniform([1, 256, 7, 3])
rng_2 = tf.random.uniform([1, 7, 256, 3])
model.predict([rng_1, rng_2])
Use tf.gather:
def call(self, v, h, training=False):
def out(A, H, axis):
r = tf.range(H)
inds = tf.floor(self.n * r / H)
inds = tf.cast(inds, tf.int32)
return tf.gather(A, inds, axis=axis)
H, W = tf.shape(v)[1], tf.shape(h)[2]
v = out(v, W, 2)
h = out(h, H, 1)
output = tf.add(v, h)
return output
I am interested to apply l2 norm constraint in each row of the parameters matrix in scipy.optimize.minimize. What I have tried so far is
def l2_const(x):
x = x.reshape(r, c)
b = np.sqrt((x**2).sum(axis=1)) - 1
return np.broadcast_to(b[:, None], (r, c)).flatten()
x0 = np.random.random((r, c))
const = ({'type': 'eq', 'fun': l2_const},)
f_min = minimize(fun=cost, x0=x0, method='SLSQP', jac=gradient, constraints=const)
but the computed parameters f_min.x are all zeros. Does anyone know how to implement correctly this type of constraints?
EDIT 1: An example to apply this type of constraints can be found in my answer of my previous post.
EDIT 2: Below you can find a complete working example. The results are very low when the constrains are used. Any suggestions are welcome.
Class:
import numpy as np
from scipy.optimize import minimize
from sklearn import preprocessing
class myLR():
def __init__(self, reltol=1e-8, maxit=1000, opt_method=None, verbose=True, seed=0):
self.maxit = maxit
self.reltol = reltol
self.seed = seed
self.verbose = verbose
self.opt_method = opt_method
self.lbin = preprocessing.LabelBinarizer()
def w_2d(self, w, n_classes):
return np.reshape(w, (n_classes, -1))
def softmax(self, W, X):
a = np.exp(X # W.T)
o = a / np.sum(a, axis=1, keepdims=True)
return o
def squared_norm(self, x):
x = np.ravel(x, order='K')
return np.dot(x, x)
def cost(self, W, X, T, n_samples, n_classes):
W = self.w_2d(W, n_classes)
log_O = np.log(self.softmax(W, X))
c = -(T * log_O).sum()
return c / n_samples
def gradient(self, W, X, T, n_samples, n_classes):
W = self.w_2d(W, n_classes)
O = self.softmax(W, X)
grad = -(T - O).T.dot(X)
return grad.ravel() / n_samples
def l1_constraint(self, x, n_classes, n_features):
x = x.reshape(n_classes, -1)
b = x.sum(axis=1) - 1
return np.broadcast_to(b[:, None], (n_classes, n_features)).flatten()
def fit(self, X, y=None):
n_classes = len(np.unique(y))
n_samples, n_features = X.shape
if n_classes == 2:
T = np.zeros((n_samples, n_classes), dtype=np.float64)
for i, cls in enumerate(np.unique(y)):
T[y == cls, i] = 1
else:
T = self.lbin.fit_transform(y)
np.random.seed(self.seed)
W_0 = np.random.random((n_classes, n_features))
const = ({'type': 'eq', 'fun': self.l1_constraint, 'args': (n_classes, n_features,)},)
options = {'disp': self.verbose, 'maxiter': self.maxit}
f_min = minimize(fun=self.cost, x0=W_0,
args=(X, T, n_samples, n_classes),
method=self.opt_method,
constraints=const,
jac=self.gradient,
options=options)
self.coef_ = self.w_2d(f_min.x, n_classes)
self.W_ = self.coef_
return self
def predict_proba(self, X):
O = self.softmax(self.W_, X)
return O
def predict(self, X):
sigma = self.predict_proba(X)
y_pred = np.argmax(sigma, axis=1)
return y_pred
Main:
import numpy as np
from sklearn import datasets
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from myLR import myLR
iris = datasets.load_iris()
X = iris.data[:, 0:2]
y = iris.target
par_dict2 = {'reltol': 1e-6,
'maxit': 20000,
'verbose': 20,
'seed': 0}
# Create different classifiers.
classifiers = {
'myLR': myLR(**par_dict2),
}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
accuracy_score
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
coef_ = classifier.coef_
print(np.linalg.norm(coef_, axis=1))
y_pred = classifier.predict(X)
accuracy = accuracy_score(y, y_pred)
print("Accuracy (train) for %s: %0.1f%% " % (name, accuracy * 100))
# View probabilities:
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='w', edgecolor='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
EDIT 3: I replaced the constraints, with
def l1_constraint(self, x, n_classes, n_features):
x = x.reshape(n_classes, -1)
b = x.sum(axis=1) - 1
return b
It produces better results. However, the computed components x1 and x2 do not sum to 1? Is that fine?
This is with tf 2.1.0
The following works up until you try to call a compiled model. Is there something to do to make the .compile and .fit methods work for multiple tensor inputs?
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow.keras as keras
tf.keras.backend.set_floatx('float64')
m = 250 # samples
n_x = 1 # dim of x
n_tau = 11
x = (2 * np.random.rand(m, n_x).astype(np.float64) - 1) * 2
i = np.argsort(x[:, 0])
x = x[i] # to make plotting nicer
A = np.random.randn(n_x, 1)
y = x ** 2 + 0.3 * x + 0.4 * np.random.randn(m, 1).astype(np.float64)
y = y.dot(A) # y is 1d
y = y[:, :, None]
tau = np.linspace(1.0 / n_tau, 1 - 1.0 / n_tau, n_tau).astype(np.float64)
tau = tau[None, :, None]
def loss(tau_y, u):
tau = tau_y[0]
y = tau_y[1]
u = y - u
res = u ** 2 * (tau - tf.where(u <= np.float64(0.0), np.float64(1.0), np.float64(0.0)))
return tf.reduce_sum(tf.reduce_mean(res, axis=[1, 2]), axis=0)
tf.keras.backend.set_floatx('float64')
class My(tf.keras.models.Model):
def __init__(self):
super().__init__()
self._my_layer = tf.keras.layers.Dense(1, dtype=tf.float64)
def call(self, inputs):
tau = inputs[0]
y = inputs[1]
tf.print(tau.shape, y.shape)
return self._my_layer(tau)
model = My()
u = model((tau, y)) # calling model works
l = loss((tau, y), model((tau, y))) # call loss works
opt = tf.keras.optimizers.Adam(learning_rate=0.01)
model.compile(optimizer=opt, loss=loss)
# this fails with the error below
model.fit((tau, y), (tau, y))
# ValueError: Error when checking model target: the list of Numpy arrays that you are passing to your model is not the size the model expected. Expected to see 1 array(s), for inputs ['output_1'] but instead got the following list of 2 arrays: [array([[[0.09090909],
# [0.17272727],
# [0.25454545],
# [0.33636364],
# [0.41818182],
# [0.5 ],
# [0.58181818],
# [0.66363636],
# [0.74545455],
# ...
I understand Code 1 is the code for the Linear Regression using tf.train.GradientDescentOptimizer which belong to TensorFlow library(black box).
Code 2 is a code example to do the same thing without GradientDescentOptimizer.
is the code without the black box.
I want to add bias (# hypothesis = X * W + b) in Code 2. In this case, how the code(gradient, descent, update, etc) should be?
Code 1
import tensorflow as tf
x_train = [1, 2, 3]
y_train = [1, 2, 3]
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
W = tf.Variable(5.)
b = tf.Variable(5.)
hypothesis = X * W + b
cost = tf.reduce_mean(tf.square(hypothesis - Y))
learning_rate = 0.1
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
gvs = optimizer.compute_gradients(cost, [W, b])
apply_gradients = optimizer.apply_gradients(gvs)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for step in range(21):
gradient_val, cost_val, _ = sess.run(
[gvs, cost, apply_gradients], feed_dict={X: x_train, Y: y_train})
print("%3d Cost: %10s, W': %10s, W: %10s, b': %10s, b: %10s" %
(step, round(cost_val, 5),
round(gradient_val[0][0] * learning_rate, 5), round(gradient_val[0][1], 5),
round(gradient_val[1][0] * learning_rate, 5), round(gradient_val[1][1], 5)))
Code2
import tensorflow as tf
x_train = [1, 2, 3]
y_train = [1, 2, 3]
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
W = tf.Variable(5.)
# b = tf.Variable(5.) # Bias
hypothesis = X * W
# hypothesis = X * W + b
cost = tf.reduce_mean(tf.square(hypothesis - Y))
learning_rate = 0.1
gradient = tf.reduce_mean((W * X - Y) * X) * 2
descent = W - learning_rate * gradient
update = tf.assign(W, descent)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
print(sess.run(W))
for step in range(21):
gradient_val, update_val, cost_val = sess.run(
[gradient, update, cost], feed_dict={X: x_train, Y: y_train})
print(step, gradient_val * learning_rate, update_val, cost_val)
I have referred An Introduction to Gradient Descent and Linear Regression
Code 2
import tensorflow as tf
x_train = [1, 2, 3]
y_train = [1, 2, 3]
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
W = tf.Variable(5.)
b = tf.Variable(5.)
hypothesis = X * W + b
cost = tf.reduce_mean(tf.square(hypothesis - Y))
learning_rate = 0.1
W_gradient = tf.reduce_mean((W * X + b - Y) * X) * 2
b_gradient = tf.reduce_mean(W * X + b - Y) * 2
W_descent = W - learning_rate * W_gradient
b_descent = b - learning_rate * b_gradient
W_update = tf.assign(W, W_descent)
b_update = tf.assign(b, b_descent)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for step in range(21):
cost_val, W_gradient_val, W_update_val, b_gradient_val, b_update_val = sess.run(
[cost, W_gradient, W_update, b_gradient, b_update],
feed_dict={X: x_train, Y: y_train})
print("%3d Cost: %8s, W': %8s, W: %8s, b': %8s, b: %8s" %
(step, round(cost_val, 5),
round(W_gradient_val * learning_rate, 5), round(W_update_val, 5),
round(b_gradient_val * learning_rate, 5), round(b_update_val, 5)))