I ran the code on this site in IDLE python https://sites.google.com/view/dailycoding00/main/%EC%95%8C%ED%8C%8C%EC%98%A4-%ED%95%99%EC%8A%B5
but it had an error
Traceback (most recent call last):
File "C:\Windows\system32\python", line 343, in <module>
ex = MyApp()
File "C:\Windows\system32\python", line 16, in __init__
self.initUI()
File "C:\Windows\system32\python", line 38, in initUI
self.board_cv2 = cv2.cvtColor(board_cv2, cv2.COLOR_BGR2RGB)
cv2.error: OpenCV(4.4.0) C:\Users\appveyor\AppData\Local\Temp\1\pip-req-build-xr4y3u3_\opencv\modules\imgproc\src\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor'
what does it mean?
code is
import sys
import numpy as np
from PyQt5.QtWidgets import QApplication, QWidget, QLabel, QVBoxLayout
from PyQt5.QtGui import QPixmap, QImage
from PyQt5.QtCore import Qt
import cv2
import time
import tensorflow as tf
class MyApp(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
load_model_path = 'model/model.ckpt'
#
board_size = 15
self.game_end = 0
self.board_size = board_size
self.board = np.zeros([board_size,board_size])
self.board_history = np.zeros([board_size,board_size])
self.cnt = 1
time_now = time.gmtime(time.time())
self.save_name = str(time_now.tm_year) + '_' + str(time_now.tm_mon) + '_' + str(time_now.tm_mday) + '_' + str(time_now.tm_hour) + '_' + str(time_now.tm_min) + '_' + str(time_now.tm_sec) + '.txt'
self.save_name_png = str(time_now.tm_year) + '_' + str(time_now.tm_mon) + '_' + str(time_now.tm_mday) + '_' + str(time_now.tm_hour) + '_' + str(time_now.tm_min) + '_' + str(time_now.tm_sec) + '.png'
# read image in numpy array (using cv2)
board_cv2 = cv2.imread('c:\\omok_v1.0/source/board_1515.png')
self.board_cv2 = cv2.cvtColor(board_cv2, cv2.COLOR_BGR2RGB)
white_ball = cv2.imread('c:\\omok_v1.0/source/white.png')
self.white_ball = cv2.cvtColor(white_ball, cv2.COLOR_BGR2RGB)
black_ball = cv2.imread('c:\\omok_v1.0/source/black.png')
self.black_ball = cv2.cvtColor(black_ball, cv2.COLOR_BGR2RGB)
# numpy to QImage
height, width, channel = self.board_cv2.shape
bytesPerLine = 3 * width
qImg_board = QImage(self.board_cv2.data, width, height, bytesPerLine, QImage.Format_RGB888)
self.player = 1 # 1: 흑 / 2: 백
x = 0
y = 0
self.lbl_img = QLabel()
self.lbl_img.setPixmap(QPixmap(qImg_board))
self.vbox = QVBoxLayout()
self.vbox.addWidget(self.lbl_img)
self.setLayout(self.vbox)
# load AI model
self.X = tf.placeholder(tf.float32, [None, board_size*board_size])
X_img = tf.reshape(self.X, [-1, board_size, board_size, 1])
W1 = tf.Variable(tf.random_normal([7,7,1,64], stddev=0.1))
L1 = tf.nn.conv2d(X_img, W1, strides=[1,1,1,1], padding='SAME')
L1 = tf.nn.relu(L1)
#L1 = tf.nn.dropout(L1, keep_prob=keep_prob)
W2 = tf.Variable(tf.random_normal([5,5,64,32], stddev = 0.1))
L2 = tf.nn.conv2d(L1, W2, strides=[1,1,1,1], padding='SAME')
L2 = tf.nn.relu(L2)
#L2 = tf.nn.dropout(L2, keep_prob=keep_prob)
W3 = tf.Variable(tf.random_normal([3,3,32,32], stddev = 0.1))
L3 = tf.nn.conv2d(L2, W3, strides=[1,1,1,1], padding='SAME')
L3 = tf.nn.relu(L3)
#L3 = tf.nn.dropout(L3, keep_prob=keep_prob)
W4 = tf.Variable(tf.random_normal([3,3,32,32], stddev = 0.1))
L4 = tf.nn.conv2d(L3, W4, strides=[1,1,1,1], padding='SAME')
L4 = tf.nn.relu(L4)
#L4 = tf.nn.dropout(L4, keep_prob=keep_prob)
W5 = tf.Variable(tf.random_normal([3,3,32,32], stddev = 0.1))
L5 = tf.nn.conv2d(L4, W5, strides=[1,1,1,1], padding='SAME')
L5 = tf.nn.relu(L5)
#L5 = tf.nn.dropout(L5, keep_prob=keep_prob)
W6 = tf.Variable(tf.random_normal([3,3,32,32], stddev = 0.1))
L6 = tf.nn.conv2d(L5, W6, strides=[1,1,1,1], padding='SAME')
L6 = tf.nn.relu(L6)
W7 = tf.Variable(tf.random_normal([3,3,32,32], stddev = 0.1))
L7 = tf.nn.conv2d(L6, W7, strides=[1,1,1,1], padding='SAME')
L7 = tf.nn.relu(L7)
W8 = tf.Variable(tf.random_normal([3,3,32,32], stddev = 0.1))
L8 = tf.nn.conv2d(L7, W8, strides=[1,1,1,1], padding='SAME')
L8 = tf.nn.relu(L8)
W9 = tf.Variable(tf.random_normal([3,3,32,32], stddev = 0.1))
L9 = tf.nn.conv2d(L8, W9, strides=[1,1,1,1], padding='SAME')
L9 = tf.nn.relu(L9)
#L6 = tf.nn.dropout(L6, keep_prob=keep_prob)
L9 = tf.reshape(L9, [-1, board_size * board_size * 32])
W10 = tf.get_variable("W10", shape=[board_size * board_size * 32, board_size*board_size],initializer = tf.contrib.layers.xavier_initializer())
b10 = tf.Variable(tf.random_normal([board_size * board_size]))
self.logits =tf.matmul(L9,W10) + b10
self.saver = tf.train.Saver()
self.sess = tf.Session()
self.saver.restore(self.sess, load_model_path)
self.setWindowTitle('오목 시뮬레이션')
self.move(100, 100)
self.resize(500,500)
self.show()
def game_play(self, board_img, ball, pos_x, pos_y, turn):
#human
ball_size = ball.shape[0]
step_size = 56
off_set = 10
# ]
if pos_x < step_size/2+off_set+1 or pos_y < step_size/2+off_set+1:
print('그곳에는 둘 수 없습니다')
elif pos_x > step_size*self.board_size+step_size/2+off_set or pos_y > step_size*self.board_size+step_size/2+off_set:
print('그곳에는 둘 수 없습니다')
else:
step_x = round((pos_x - off_set)/step_size)
step_y = round((pos_y - off_set)/step_size)
if self.board[step_x-1,step_y-1] != 0: # 이미 돌이 있을때
print('그곳에는 둘 수 없습니다')
else:
self.board[step_x-1,step_y-1] = turn
self.board_history[step_x-1,step_y-1] = self.cnt
self.cnt = self.cnt + 1
x_step = step_size*step_x-round(step_size/2) + off_set
y_step = step_size*step_y-round(step_size/2) + off_set
board_img[x_step:x_step+ball_size,y_step:y_step+ball_size] = ball
#
if self.game_rule(self.board, turn):
self.game_end = 1
print('게임이 끝났습니다.')
board_img = cv2.cvtColor(board_img, cv2.COLOR_RGB2BGR)
print('축하합니다 당신이 승리 하였습니다')
return board_img
def sigmoid(self, x):
return 1 / (1 +np.exp(-x))
def find_max(self, result_mat):
max = 0
max_x = 0
max_y = 0
for i in range(self.board_size):
for j in range(self.board_size):
if result_mat[i,j] > max:
max = result_mat[i,j]
max_x = i
max_y = j
result_mat[max_x,max_y] = 0
return result_mat, max_x, max_y
def mousePressEvent(self, e):
x = e.x()
y = e.y()
if self.game_end == 0:
#
self.board_cv2 = self.game_play(self.board_cv2, self.black_ball, y, x, 1)
save_name = 'result/' + str(self.cnt) + "board_black.png"
save_name_w = 'result/' + str(self.cnt) + "board_white.png"
save_name_pred = 'result/' + str(self.cnt) + "board_pred.png"
#
input_X = self.board.flatten()/2
result = self.sess.run(self.logits, feed_dict={self.X: input_X[None,:]})
result_mat = self.sigmoid(result).reshape([self.board_size,self.board_size])
heat_map = cv2.resize(result_mat*255, (500, 500))
result_mat, max_x, max_y = self.find_max(result_mat)
save_image = cv2.resize(self.board_cv2, (500, 500), interpolation=cv2.INTER_CUBIC)
save_image = cv2.cvtColor(save_image, cv2.COLOR_RGB2BGR)
#save_image[:,:,0] = save_image[:,:,0] + heat_map
cv2.imwrite(save_name, save_image)
cv2.imwrite(save_name_pred, heat_map)
#
while self.board[max_x,max_y] !=0:
print('AI 거긴 둘 수 없다')
result_mat, max_x, max_y = self.find_max(result_mat)
self.board[max_x,max_y] = 2 # 인공지능은 항상 2값 / 사람은 1값으로 표현
self.board_history[max_x,max_y] = self.cnt
self.cnt = self.cnt + 1
ball_size = self.white_ball.shape[0]
step_size = 56
off_set = 10
x_step = step_size*(max_x+1)-round(step_size/2) + off_set
y_step = step_size*(max_y+1)-round(step_size/2) + off_set
self.board_cv2[x_step:x_step+ball_size,y_step:y_step+ball_size] = self.white_ball
save_image = cv2.resize(self.board_cv2, (500, 500), interpolation=cv2.INTER_CUBIC)
save_image = cv2.cvtColor(save_image, cv2.COLOR_RGB2BGR)
cv2.imwrite(save_name_w, save_image)
#self.board_history[step_x-1,step_y-1] = self.cnt
#self.board_cv2 = self.game_play(self.board_cv2, self.white_ball, y, x, 2)
height, width, channel = self.board_cv2.shape
bytesPerLine = 3 * width
qImg_board = QImage(self.board_cv2.data, width, height, bytesPerLine, QImage.Format_RGB888)
self.lbl_img.setPixmap(QPixmap(qImg_board))
def game_rule(self, board, player): # 추후 오목 국룰 (렌주룰) 도입 예정
game_result = 0
diag_line = np.zeros(5)
# ●●●●●
for i_idx in range(len(board)):
for j_idx in range(len(board)-4):
p1 = (board[i_idx,j_idx:j_idx+5] == player)
if p1.sum() == 5:
#print('player ', player, ' win')
game_result = 1
return game_result
#print(board)
for i_idx in range(len(board)-4):
for j_idx in range(len(board)):
p1 = (board[i_idx:i_idx+5,j_idx] ==player)
if p1.sum() == 5:
#print('player ', player, ' win')
game_result = 1
return game_result
#print(board)
#
for i_idx in range(len(board)-4):
for j_idx in range(len(board)-4):
diag_line[0] = board[i_idx+0,j_idx+0]
diag_line[1] = board[i_idx+1,j_idx+1]
diag_line[2] = board[i_idx+2,j_idx+2]
diag_line[3] = board[i_idx+3,j_idx+3]
diag_line[4] = board[i_idx+4,j_idx+4]
p1 = (diag_line == player)
if p1.sum() == 5:
#print('player ', player, ' win')
game_result = 1
return game_result
#print(board)
#
for i_idx in range(len(board)-4):
for j_idx in range(len(board)-4):
diag_line[0] = board[i_idx+4,j_idx+0]
diag_line[1] = board[i_idx+3,j_idx+1]
diag_line[2] = board[i_idx+2,j_idx+2]
diag_line[3] = board[i_idx+1,j_idx+3]
diag_line[4] = board[i_idx+0,j_idx+4]
p1 = (diag_line == player)
if p1.sum() == 5:
game_result = 1
return game_result
return game_result
def save_history(self):
result=np.array(self.board_history).flatten()
f = open(self.save_name, 'w')
f.write(np.array2string(result))
f.close()
if name == 'main':
app = QApplication(sys.argv)
ex = MyApp()
sys.exit(app.exec_())
Related
I was trying to run my yolov5 custom model on cpu and I got this error.
this is the github page I have used : https://github.com/Amelia0911/onnxruntime-for-yolov5
import onnxruntime
from models.utils import *
import time
IMAGE_SIZE = (416, 416)
CONF_TH = 0.3
NMS_TH = 0.45
CLASSES = 80
model = onnxruntime.InferenceSession("models_train/bestnone.onnx")
anchor_list = [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]]
stride = [8, 16, 32]
def draw(img, boxinfo, dst, id):
for *xyxy, conf, cls in boxinfo:
label = '{}|{}'.format(int(cls), '%.2f' % conf)
plot_one_box(xyxy, img, label=label, color=[0, 0, 255])
cv2.imencode('.jpg', img)[1].tofile(dst)
def detect(image):
img = cv2.resize(image,IMAGE_SIZE)
img = img.transpose(2, 0, 1)
dataset = (img, image)
img = dataset[0].astype('float32')
img_size = [dataset[0].shape[1], dataset[0].shape[2]]
img /= 255.0
img = img.reshape(1, 3, img_size[0], img_size[1])
inputs = {model.get_inputs()[0].name: img}
pred = torch.tensor(model.run(None, inputs)[0])
anchor = torch.tensor(anchor_list).float().view(3, -1, 2)
area = img_size[0]*img_size[1]
size = [int(area/stride[0]**2), int(area/stride[1]**2), int(area/stride[2]**2)]
feature = [[int(j/stride[i]) for j in img_size] for i in range(3)]
y = []
y.append(pred[:, :size[0]*3, :])
y.append(pred[:, size[0]*3:size[0]*3+size[1]*3, :])
y.append(pred[:, size[0]*3+size[1]*3:, :])
grid = []
for k, f in enumerate(feature):
grid.append([[i, j] for j in range(f[0]) for i in range(f[1])])
z = []
for i in range(3):
src = y[i]
xy = src[..., 0:2] * 2. - 0.5
wh = (src[..., 2:4] * 2) ** 2
dst_xy = []
dst_wh = []
for j in range(3):
dst_xy.append((xy[:, j*size[i]:(j+1)*size[i], :] + torch.tensor(grid[i])) * stride[i])
dst_wh.append(wh[:, j*size[i]:(j+1)*size[i], :] * anchor[i][j])
src[..., 0:2] = torch.from_numpy(np.concatenate((dst_xy[0], dst_xy[1], dst_xy[2]), axis=1))
src[..., 2:4] = torch.from_numpy(np.concatenate((dst_wh[0], dst_wh[1], dst_wh[2]), axis=1))
z.append(src.view(1, -1, CLASSES+5)) #85
pred = torch.cat(z, 1)
pred = nms(pred, CONF_TH, NMS_TH)
for det in pred:
if det is not None and len(det):
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], dataset[1].shape).round()
if det == None:
return np.array([])
return det
if __name__ == '__main__':
import time
src = 'Temp-640x640.jpg'
t1 = time.time()
img = cv2.imdecode(np.fromfile(src, dtype=np.uint8), -1)
print(IMAGE_SIZE)
results = detect(img)
t2 = time.time()
print(results)
print("onnxruntime time = ", t2 - t1)
if results is not None and len(results):
draw(img, results, 'dst3.jpg', str(id))
print('Down!')
when I run this code I got the following error:
File "C:\Users\acer\.spyder-py3\metallic surface defect detection\3_onnx_cpu_detec.py", line 85, in <module>
results = detect(img)
File "C:\Users\acer\.spyder-py3\metallic surface defect detection\3_onnx_cpu_detec.py", line 30, in detect
img = img.reshape(1, 3, img_size[0], img_size[1])
ValueError: cannot reshape array of size 692224 into shape (1,3,416,416)
I think it is a color channel issue. I have tried to fix it .But it doesn't work .If someone know how to fix it please inform me.Thanks in advance
I am trying to implement mAP as the main metric for yolov1 training. It ran fine for several epochs and was able to give the mAP value along with its loss for each batch. but after several epochs, it would crash, and I can't figure out what was wrong.
This is the error code that I got:
InvalidArgumentError: in user code:
C:\Users\DeepLab\AppData\Local\Temp/ipykernel_11432/1408655327.py:105 mean_average_precision *
if iou > best_iou:
C:\Users\DeepLab\anaconda3\envs\GPU\lib\site-packages\tensorflow\python\autograph\operators\control_flow.py:1172 if_stmt
_tf_if_stmt(cond, body, orelse, get_state, set_state, symbol_names, nouts)
C:\Users\DeepLab\anaconda3\envs\GPU\lib\site-packages\tensorflow\python\autograph\operators\control_flow.py:1180 _tf_if_stmt
cond = _verify_tf_condition(cond, 'if statement')
C:\Users\DeepLab\anaconda3\envs\GPU\lib\site-packages\tensorflow\python\autograph\operators\control_flow.py:139 _verify_tf_condition
cond = array_ops.reshape(cond, ())
C:\Users\DeepLab\anaconda3\envs\GPU\lib\site-packages\tensorflow\python\util\dispatch.py:206 wrapper
return target(*args, **kwargs)
C:\Users\DeepLab\anaconda3\envs\GPU\lib\site-packages\tensorflow\python\ops\array_ops.py:196 reshape
result = gen_array_ops.reshape(tensor, shape, name)
C:\Users\DeepLab\anaconda3\envs\GPU\lib\site-packages\tensorflow\python\ops\gen_array_ops.py:8397 reshape
return reshape_eager_fallback(
C:\Users\DeepLab\anaconda3\envs\GPU\lib\site-packages\tensorflow\python\ops\gen_array_ops.py:8422 reshape_eager_fallback
_result = _execute.execute(b"Reshape", 1, inputs=_inputs_flat, attrs=_attrs,
C:\Users\DeepLab\anaconda3\envs\GPU\lib\site-packages\tensorflow\python\eager\execute.py:59 quick_execute
tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
InvalidArgumentError: Input to reshape is a tensor with 0 values, but the requested shape has 1 [Op:Reshape]
For calculating mAP, I use these functions:
intersection_over_union used to return iou in tensor type
convert_cellboxes used to return the label value measured from the shape of the image
cellboxes_to_boxes used to return the list of lists containing 6 values (class_idx, confident, x, y, w, h)
non_max_suppression used to return the filtered version of cellboxes_to_boxes output
get_bboxes used to return a list containing 7 value, img_idx, class_idx, confident, x, y, w, h). It will be used as an input to calculate mAP.
mean_average_precisions is used to calculate mAP.
def intersection_over_union(boxes_preds, boxes_labels, box_format="midpoint"):
if box_format == "midpoint":
box1_x1 = boxes_preds[..., 0:1] - boxes_preds[..., 2:3] / 2 ## ==> x - w / 2 for each grid in each image
box1_y1 = boxes_preds[..., 1:2] - boxes_preds[..., 3:4] / 2 ## ==> y - h / 2 for each grid in each image
box1_x2 = boxes_preds[..., 0:1] + boxes_preds[..., 2:3] / 2 ## ==> x + w / 2 for each grid in each image
box1_y2 = boxes_preds[..., 1:2] + boxes_preds[..., 3:4] / 2 ## ==> y + h / 2 for each grid in each image
box2_x1 = boxes_labels[..., 0:1] - boxes_labels[..., 2:3] / 2
box2_y1 = boxes_labels[..., 1:2] - boxes_labels[..., 3:4] / 2
box2_x2 = boxes_labels[..., 0:1] + boxes_labels[..., 2:3] / 2
box2_y2 = boxes_labels[..., 1:2] + boxes_labels[..., 3:4] / 2
if box_format == "corners":
box1_x1 = boxes_preds[..., 0:1]
box1_y1 = boxes_preds[..., 1:2]
box1_x2 = boxes_preds[..., 2:3]
box1_y2 = boxes_preds[..., 3:4] # (N, 1)
box2_x1 = boxes_labels[..., 0:1]
box2_y1 = boxes_labels[..., 1:2]
box2_x2 = boxes_labels[..., 2:3]
box2_y2 = boxes_labels[..., 3:4]
x1 = K.max((box1_x1, box2_x1))
y1 = K.max((box1_y1, box2_y1))
x2 = K.min((box1_x2, box2_x2))
y2 = K.min((box1_y2, box2_y2))
intersection = K.clip((x2-x1), min_value=0, max_value=abs(x2-x1)) * K.clip((y2-y1), min_value=0, max_value=abs(y2-y1))
#intersection = 2
box1_area = abs((box1_x2 - box1_x1) * (box1_y2 - box1_y1))
box2_area = abs((box2_x2 - box2_x1) * (box2_y2 - box2_y1))
return intersection / (box1_area + box2_area - intersection + 1e-6)
def convert_cellboxes(predictions, S=7): #array (n, 7, 7, 30) (n, 7 x 7, 30)
#batch_size = predictions.shape[0]
try:
n = batch_size
predictions = K.reshape(predictions, (n, 7, 7, 30))
except:
n = len(X_val)%batch_size
predictions = K.reshape(predictions, (n, 7, 7, 30))
bboxes1 = predictions[..., 21:25]
bboxes2 = predictions[..., 26:30]
scores = tf.concat(
(tf.expand_dims(predictions[..., 20], 0), tf.expand_dims(predictions[..., 25], 0)), axis=0 #(1, 7, 7, 2)
) ## (n, 7, 7, 2)
best_box = tf.expand_dims(K.argmax(scores, 0), -1)
#print(best_box)
best_boxes = bboxes1 * (1 - best_box) + best_box * bboxes2 ##(7, 7, 4)
cell_indices = tf.expand_dims(tf.tile(tf.range(start=0, limit=7, delta=1), (7,)), -1) # (49, 1) (1, 7, 7, 1)
cell_indices = tf.repeat(tf.reshape(cell_indices, (1, 7, 7, 1)), n, 0) ## reshape from (49, 1) to (n, 7, 7, 1)
best_boxes = tf.cast(best_boxes, tf.float32)
cell_indices = tf.cast(cell_indices, tf.float32)
x = 1 / S * (best_boxes[..., :1] + cell_indices)
y = 1 / S * (best_boxes[..., 1:2] + K.permute_dimensions(cell_indices, (0, 2, 1, 3)))
w_h = 1 / S * best_boxes[..., 2:4]
converted_bboxes = tf.concat((x, y, w_h), axis=-1) # dimensi terakhir = 4
predicted_class = tf.expand_dims(K.argmax(predictions[..., :20], -1), -1) #n, 7, 7, 1
best_confidence = tf.expand_dims(K.max((predictions[..., 20], predictions[..., 25]), 0), -1)
predicted_class = tf.cast(predicted_class, tf.float32)
best_confidence = tf.cast(best_confidence, tf.float32)
#print(predicted_class.shape)
#print(best_confidence.shape)
#print(converted_bboxes.shape)
converted_preds = tf.concat(
(predicted_class, best_confidence, converted_bboxes), -1 # n, 7, 7, 6
)
#print(converted_preds.shape)
return converted_preds
def cellboxes_to_boxes(out, S=7):
try:
n = batch_size
converted_pred = K.reshape(convert_cellboxes(out), (n, S * S, -1)) # (n, 49, 6)
except:
n = len(X_val)%batch_size
converted_pred = K.reshape(convert_cellboxes(out), (n, S * S, -1)) # (n, 49, 6)
#print(converted_pred.shape)
converted_pred = converted_pred.numpy() # mode graph
all_bboxes = []
for ex_idx in range(out.shape[0]):
bboxes = []
for bbox_idx in range(S * S):
bboxes.append([x for x in converted_pred[ex_idx, bbox_idx, :]])
all_bboxes.append(bboxes)
return all_bboxes
def non_max_suppression(bboxes, iou_threshold, threshold, box_format="midpoint"):
#bboxes = bboxes[0]
#print(bboxes[:2])
#for i, box in enumerate(bboxes):
# bboxes[i][4:6] = box[4:6] * 7
#print(bboxes[:2])
assert type(bboxes) == list
bboxes = [box for box in bboxes if box[1] > threshold]
bboxes = sorted(bboxes, key=lambda x: x[1], reverse=True)
bboxes_after_nms = []
while bboxes:
chosen_box = bboxes.pop(0)
bboxes = [
box # (6)
for box in bboxes
if box[0] != chosen_box[0]
or intersection_over_union(
tf.constant(box[2:]),
tf.constant(chosen_box[2:]),
box_format=box_format,
)
< iou_threshold
]
bboxes_after_nms.append(chosen_box)
return bboxes_after_nms
def get_bboxes(gt_labels, pred_labels, iou_threshold, threshold, box_format="midpoint"):
"""
return:
images_pred_boxes = list with each element in this format (image_idx, class_prediction, prob_score, x, y, w, h)
images_gt_boxes = list with each element in this format (image_idx, class, prob_score, x, y, w, h)
"""
images_pred_boxes = []
images_gt_boxes = []
#pred_labels = model.predict(images) # data training, validation, testing
image_idx = 0
gt_boxes = cellboxes_to_boxes(gt_labels)
pred_boxes = cellboxes_to_boxes(pred_labels)
for i in range(len(gt_labels)):
pred_box_nms = non_max_suppression(pred_boxes[i], iou_threshold, threshold, box_format="midpoint")
for nms_box in pred_box_nms:
images_pred_boxes.append([image_idx] + nms_box)
for box in gt_boxes[i]:
if box[1] > threshold:
images_gt_boxes.append([image_idx] + box)
image_idx += 1
#print(images_pred_boxes[:10])
#print(images_gt_boxes[:10])
return images_pred_boxes, images_gt_boxes
def mean_average_precision(
y_true, y_pred, iou_threshold=0.5, box_format="midpoint", num_classes=20
):
pred_boxes, true_boxes = get_bboxes(y_true, y_pred, iou_threshold=0.6, threshold=0.3, box_format="midpoint")
# list storing all AP for respective classes
average_precisions = []
# used for numerical stability later on
epsilon = 1e-6
for c in range(num_classes):
detections = []
ground_truths = []
# Go through all predictions and targets,
# and only add the ones that belong to the
# current class c
for detection in pred_boxes:
if detection[1] == c:
detections.append(detection)
for true_box in true_boxes:
if true_box[1] == c:
ground_truths.append(true_box)
# find the amount of bboxes for each training example
# Counter here finds how many ground truth bboxes we get
# for each training example, so let's say img 0 has 3,
# img 1 has 5 then we will obtain a dictionary with:
# amount_bboxes = {0:3, 1:5, ..., 20: 10}
amount_bboxes = Counter([gt[0] for gt in ground_truths])
# We then go through each key, val in this dictionary
# and convert to the following (w.r.t same example):
# amount_bboxes = {0:torch.tensor[0,0,0], 1:torch.tensor[0,0,0,0,0]}
for key, val in amount_bboxes.items():
amount_bboxes[key] = np.zeros(val)
# sort by box probabilities which is index 2
detections.sort(key=lambda x: x[2], reverse=True)
TP = np.zeros((len(detections)))
FP = np.zeros((len(detections)))
total_true_bboxes = len(ground_truths)
# If none exists for this class then we can safely skip
if total_true_bboxes == 0:
continue
for detection_idx, detection in enumerate(detections):
# Only take out the ground_truths that have the same
# training idx as detection
ground_truth_img = [
bbox for bbox in ground_truths if bbox[0] == detection[0]
]
num_gts = len(ground_truth_img) #
best_iou = 0
best_gt_idx = 0
iou = 0
for idx, gt in enumerate(ground_truth_img):
iou = intersection_over_union(
tf.constant(detection[3:]),
tf.constant(gt[3:]),
box_format=box_format,
)
if iou > best_iou:
best_iou = iou
best_gt_idx = idx
if best_iou > iou_threshold:
# only detect ground truth detection once
if amount_bboxes[detection[0]][best_gt_idx] == 0:
# true positive and add this bounding box to seen
TP[detection_idx] = 1
amount_bboxes[detection[0]][best_gt_idx] = 1
else:
FP[detection_idx] = 1
# if IOU is lower then the detection is a false positive
else:
FP[detection_idx] = 1
TP = tf.constant(TP)
FP = tf.constant(FP)
#print(TP)
#print(FP)
TP_cumsum = tf.cumsum(TP, axis=0)
FP_cumsum = tf.cumsum(FP, axis=0)
recalls = TP_cumsum / (total_true_bboxes + epsilon)
precisions = tf.math.divide(TP_cumsum, (TP_cumsum + FP_cumsum + epsilon))
precisions = tf.concat((tf.cast(tf.constant([1]), precisions.dtype), precisions), axis=0)
recalls = tf.concat((tf.cast(tf.constant([0]), recalls.dtype), recalls), axis=0)
# torch.trapz for numerical integration
average_precisions.append(tfp.math.trapz(precisions, recalls))
return sum(average_precisions) / len(average_precisions)
for training, I used a standard model.fit with pascalvoc2007 as its dataset and a batch size of 4.
I am working on an artifical neural network which I have created via subclassing.
The subclassing looks like this:
import time
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import scipy.stats as si
import sympy as sy
from sympy.stats import Normal, cdf
from sympy import init_printing
class DGMNet(tf.keras.Model):
def __init__(self, n_layers, n_nodes, dimensions=1):
"""
Parameters:
- n_layers: number of layers
- n_nodes: number of nodes in (inner) layers
- dimensions: number of spacial dimensions
"""
super().__init__()
self.n_layers = n_layers
self.initial_layer = DenseLayer(dimensions + 1, n_nodes, activation="relu")
self.lstmlikelist = []
for _ in range(self.n_layers):
self.lstmlikelist.append(LSTMLikeLayer(dimensions + 1, n_nodes, activation="relu"))
self.final_layer = DenseLayer(n_nodes, 1, activation=None)
def call(self, t, x):
X = tf.concat([t,x], 1)
S = self.initial_layer.call(X)
for i in range(self.n_layers):
S = self.lstmlikelist[i].call({'S': S, 'X': X})
result = self.final_layer.call(S)
return result
class DenseLayer(tf.keras.layers.Layer):
def __init__(self, n_inputs, n_outputs, activation):
"""
Parameters:
- n_inputs: number of inputs
- n_outputs: number of outputs
- activation: activation function
"""
super(DenseLayer, self).__init__()
self.n_inputs = n_inputs
self.n_outputs = n_outputs
self.W = self.add_weight(shape=(self.n_inputs, self.n_outputs),
initializer='random_normal',
trainable=True)
self.b = self.add_weight(shape=(1, self.n_outputs),
initializer='random_normal',
trainable=True)
self.activation = _get_function(activation)
def call(self, inputs):
S = tf.add(tf.matmul(inputs, self.W), self.b)
S = self.activation(S)
return S
class LSTMLikeLayer(tf.keras.layers.Layer):
def __init__(self, n_inputs, n_outputs, activation):
"""
Parameters:
- n_inputs: number of inputs
- n_outputs: number of outputs
- activation: activation function
"""
super(LSTMLikeLayer, self).__init__()
self.n_outputs = n_outputs
self.n_inputs = n_inputs
self.Uz = self.add_variable("Uz", shape=[self.n_inputs, self.n_outputs])
self.Ug = self.add_variable("Ug", shape=[self.n_inputs, self.n_outputs])
self.Ur = self.add_variable("Ur", shape=[self.n_inputs, self.n_outputs])
self.Uh = self.add_variable("Uh", shape=[self.n_inputs, self.n_outputs])
self.Wz = self.add_variable("Wz", shape=[self.n_outputs, self.n_outputs])
self.Wg = self.add_variable("Wg", shape=[self.n_outputs, self.n_outputs])
self.Wr = self.add_variable("Wr", shape=[self.n_outputs, self.n_outputs])
self.Wh = self.add_variable("Wh", shape=[self.n_outputs, self.n_outputs])
self.bz = self.add_variable("bz", shape=[1, self.n_outputs])
self.bg = self.add_variable("bg", shape=[1, self.n_outputs])
self.br = self.add_variable("br", shape=[1, self.n_outputs])
self.bh = self.add_variable("bh", shape=[1, self.n_outputs])
self.activation = _get_function(activation)
def call(self, inputs):
S = inputs['S']
X = inputs['X']
Z = self.activation(tf.add(tf.add(tf.matmul(X, self.Uz), tf.matmul(S, self.Wz)), self.bz))
G = self.activation(tf.add(tf.add(tf.matmul(X, self.Ug), tf.matmul(S, self.Wg)), self.bg))
R = self.activation(tf.add(tf.add(tf.matmul(X, self.Ur), tf.matmul(S, self.Wr)), self.br))
H = self.activation(tf.add(tf.add(tf.matmul(X, self.Uh), tf.matmul(tf.multiply(S, R), self.Wh)), self.bh))
Snew = tf.add(tf.multiply(tf.subtract(tf.ones_like(G), G), H), tf.multiply(Z, S))
return Snew
def _get_function(name):
f = None
if name == "tanh":
f = tf.nn.tanh
elif name == "sigmoid":
f = tf.nn.sigmoid
elif name == "relu":
f = tf.nn.relu
elif not name:
f = tf.identity
assert f is not None
return f
# Sampling
def sampler(N1, N2, N3):
np.random.seed(42)
# Sampler #1: PDE domain
t1 = np.random.uniform(low=T0,
high=T,
size=[N1,1])
s1 = np.random.uniform(low=S1,
high=S2,
size=[N1,1])
# Sampler #2: boundary condition
t2 = np.zeros(shape=(1, 1))
s2 = np.zeros(shape=(1, 1))
# Sampler #3: initial/terminal condition
t3 = T * np.ones((N3,1)) #Terminal condition
s3 = np.random.uniform(low=S1,
high=S2,
size=[N3,1])
return (t1, s1, t2, s2, t3, s3)
# Loss function
def loss(model, t1, x1, t2, x2, t3, x3):
# Loss term #1: PDE
V = model(t1, x1)
V_t = tf.gradients(V, t1)[0]
V_x = tf.gradients(V, x1)[0]
V_xx = tf.gradients(V_x, x1)[0]
f = V_t + r*x1*V_x + 0.5*sigma**2*x1**2*V_xx - r*V
L1 = tf.reduce_mean(tf.square(f))
# Loss term #2: boundary condition
#L2 = tf.reduce_mean(tf.square(V))
# Loss term #3: initial/terminal condition
L3 = tf.reduce_mean(tf.square(model(t3, x3) - tf.math.maximum(x3-K,0)))
return (L1, L3)
# B-S's analytical known solution
def analytical_solution(t, x):
#C = SN(d1) - Xe- rt N(d2)
#S: spot price
#K: strike price
#T: time to maturity
#r: interest rate
#sigma: volatility of underlying asset
d1 = (np.log(x / K) + (r + 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))
d2 = (np.log(x / K) + (r - 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))
call = (x * si.norm.cdf(d1, 0.0, 1.0) - K * np.exp(-r * T) * si.norm.cdf(d2, 0.0, 1.0))
return call
# Set random seeds
np.random.seed(42)
tf.random.set_seed(42)
# Strike price
K = 0.5
# PDE parameters
r = 0.05 # Interest rate
sigma = 0.25 # Volatility
# Time limits
T0 = 0.0 + 1e-10 # Initial time
T = 1.0 # Terminal time
# Space limits
S1 = 0.0 + 1e-10 # Low boundary
S2 = 1.0 # High boundary
# Number of samples
NS_1 = 1000
NS_2 = 0
NS_3 = 100
t1, s1, t2, s2, t3, s3 = sampler(NS_1, NS_2, NS_3)
Now what I want to do is to iterate over different parameters and create a new ann for each iteration.
My plan was to do it in this way:
tf.compat.v1.disable_eager_execution()
t1_t = tf.compat.v1.placeholder(tf.float32, [None,1])
x1_t = tf.compat.v1.placeholder(tf.float32, [None,1])
t2_t = tf.compat.v1.placeholder(tf.float32, [None,1])
x2_t = tf.compat.v1.placeholder(tf.float32, [None,1])
t3_t = tf.compat.v1.placeholder(tf.float32, [None,1])
x3_t = tf.compat.v1.placeholder(tf.float32, [None,1])
volatility_list = [0.08]#[0.08, 0.16, 0.18, 0.2, 0.28]
stages_list = [10]#, 50, 100]
layers_list = [3]#, 5, 7]
npl_list = [3]#, 6, 9, 12, 15]
for sigma in volatility_list:
for st in stages_list:
for lay in layers_list:
for npl in npl_list:
# Neural Network definition
num_layers = lay
nodes_per_layer = npl
ann = DGMNet(num_layers, nodes_per_layer)
L1_t, L3_t = loss(ann, t1_t, x1_t, t2_t, x2_t, t3_t, x3_t)
loss_t = L1_t + L3_t
# Optimizer parameters
global_step = tf.Variable(1, trainable=False)
starter_learning_rate = 0.001
learning_rate = tf.compat.v1.train.exponential_decay(starter_learning_rate, global_step,
100000, 0.96, staircase=True)
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss_t)
# Training parameters
steps_per_sample = st
sampling_stages = 100#2000
# Plot tensors
tplot_t = tf.compat.v1.placeholder(tf.float32, [None,1], name="tplot_t") # We name to recover it later
xplot_t = tf.compat.v1.placeholder(tf.float32, [None,1], name="xplot_t")
vplot_t = tf.identity(ann(tplot_t, xplot_t), name="vplot_t") # Trick for naming the trained model
# Training data holders
sampling_stages_list = []
elapsed_time_list = []
loss_list = []
L1_list = []
L3_list = []
# Train network!!
init_op = tf.compat.v1.global_variables_initializer()
sess = tf.compat.v1.Session()
sess.run(init_op)
for i in range(sampling_stages):
t1, x1, t2, x2, t3, x3 = sampler(NS_1, NS_2, NS_3)
start_time = time.clock()
for _ in range(steps_per_sample):
loss, L1, L3, _ = sess.run([loss_t, L1_t, L3_t, optimizer],
feed_dict = {t1_t:t1, x1_t:x1, t2_t:t2, x2_t:x2, t3_t:t3, x3_t:x3})
end_time = time.clock()
elapsed_time = end_time - start_time
sampling_stages_list.append(i)
elapsed_time_list.append(elapsed_time)
loss_list.append(loss)
L1_list.append(L1)
L3_list.append(L3)
text = "Stage: {:04d}, Loss: {:e}, L1: {:e}, L3: {:e}, {:f} seconds".format(i, loss, L1, L3, elapsed_time)
print(text)
#goodness of fit
time_0 = 0
listofzeros = [time_0] * 100
prices_for_goodness = np.linspace(S1,S2, 100)
goodness_list = []
solution_goodness = analytical_solution(listofzeros, prices_for_goodness)
ttt = time_0*np.ones_like(prices_for_goodness.reshape(-1,1))
nn_goodness, = sess.run([vplot_t],
feed_dict={tplot_t:ttt, xplot_t:prices_for_goodness.reshape(-1,1)})
deviation_list = np.abs(solution_goodness - nn_goodness)/(T-T0)
print("{0:.2f}%".format(np.average(deviation_list)*100))
Unfortunately as soon as it ends the first iteration I get a TypeError that 'numpy.float32' object is not callable
Error Traceback:
TypeError Traceback (most recent call last)
<ipython-input-14-bb14643d0c42> in <module>()
10
11
---> 12 L1_t, L3_t = loss(ann, t1_t, x1_t, t2_t, x2_t, t3_t, x3_t)
13 loss_t = L1_t + L3_t
14
TypeError: 'numpy.float32' object is not callable
I guess that the problem is with the creation of the placeholders, however I am not sure how to solve it. Maybe one of you can help me
Thanks in advance!
Chris
Did you create a variable called 'loss'? It seems that the loss function is redefined by a variable with the same name, so then python tries to call that variable as a function.
Good day,
I'm developing a deep learning model for wireless signal detection. Below is the snippet of the function that computes the model accuracy and bit error rate (BER):
from chainer.datasets import TupleDataset
import numpy as np
from chainer import cuda
from chainer import function
def get_idp_acc(model, dataset_tuple, comp_ratio, profile = None, batchsize = 128, gpu = -1):
chainer.config.train = True
xp = np if gpu < 0 else cuda.cupy
x, indices, x_zf, HtH, Hty = dataset_tuple._datasets[0], dataset_tuple._datasets[1], dataset_tuple._datasets[2], dataset_tuple._datasets[3], dataset_tuple._datasets[4]
accs = 0
BERs = 0
model.train = False
for j in range(0, len(x), batchsize):
x_batch = xp.array(x[j:j + batchsize])
indices_batch = xp.array(indices[j:j + batchsize])
x_zf_batch = xp.array(x_zf[j:j + batchsize])
HtH_batch = xp.array(HtH[j:j + batchsize])
Hty_batch = xp.array(Hty[j:j + batchsize])
if profile == None:
acc_data = model(x_batch, indices_batch, x_zf_batch, HtH_batch, Hty_batch, comp_ratio = comp_ratio,
ret_param = 'acc')
else:
acc_data = model(x_batch, indices_batch, x_zf_batch, HtH_batch, Hty_batch, comp_ratio = comp_ratio,
ret_param = 'acc', profile = profile)
acc_data.to_cpu()
acc = acc_data.data
BER = 1.0 - acc
accs += acc * len(x_batch)
BERs += BER * len(x_batch)
return (accs / len(x)) * 100.
When the code is run, I get the following error below despite having imported all the required chainer modules. I really need your help on this issue as I'm stuck for nearly two months without making any headways in my project.
Traceback (most recent call last):
File "/Users/mac/Documents/idp_detnet/examples/run_mlp.py", line 14, in <module>
mlp.run(args)
File "/Users/mac/Documents/idp_detnet/examples/mlp.py", line 39, in run
acc_dict[name], BER_dict[name] = util.sweep_idp(model, test, comp_ratios, args)
File "/Users/mac/Documents/idp_detnet/examples/util.py", line 107, in sweep_idp
batchsize=args.batchsize, profile=profile))
File "/Users/mac/Documents/idp_detnet/examples/util.py", line 83, in get_idp_acc
acc_data.to_cpu()
AttributeError: 'numpy.float32' object has no attribute 'to_cpu'
Below is the additional information providing codes for model definition:
K = 10
num_layers = 3*K
def lin_soft_sign(x, t):
'''Linear soft sign activation function from the original paper Eq. (11)'''
y = -1 + F.relu(x + t)/ F.absolute(t) - F.relu(- t)/ F.absolute(t)
return y
def accuracy(x, y):
'''Computes the fraction of elements for which x and y are equal'''
return np.mean(np.equal(x, y)).astype(np.float32)
class MLP(chainer.Chain):
def __init__(self, K, coeff_generator, profiles = None, z_dims = 8*K, v_dims = 2*K):
super(MLP, self).__init__()
if profiles == None:
profiles = [(0, 10)]
self.coeff_generator = coeff_generator
self.z_dims = z_dims
self.v_dims = v_dims
self.K = K
self.profiles = profiles
self.profile = 0
with self.init_scope():
self.p0_l1 = IncompleteLinear(None, self.z_dims)
self.p1_l1 = IncompleteLinear(None, self.z_dims)
self.p2_l1 = IncompleteLinear(None, self.z_dims)
self.p0_lv = IncompleteLinear(None, self.v_dims)
self.p1_lv = IncompleteLinear(None, self.v_dims)
self.p2_lv = IncompleteLinear(None, self.v_dims)
self.p0_l3 = IncompleteLinear(None, self.K)
self.p1_l3 = IncompleteLinear(None, self.K)
self.p2_l3 = IncompleteLinear(None, self.K)
def __call__(self, x, indices, x_zf, HtH, Hty, ret_param = 'loss', profile = None, comp_ratio = None):
if profile == None:
profile = self.profile
# Form Zero-forcing detection
err_rel = F.sum((x - x_zf)**2, axis = 1)
params = layer_profile(self.coeff_generator,
*self.profiles[profile], self.z_dims,
self.v_dims, comp_ratio)
def detnet_layer(x_d, x_logit, v, z_dims, v_dims):
HtH_x = np.matmul(HtH, np.expand_dims(x_d.data, axis = 2).astype(np.float32))
HtH_x = F.squeeze(HtH_x, axis = -1)
#x_concat = np.concatenate([Hty, x, HtH_x, v], axis=1)
x_concat = F.concat([Hty, x_d, HtH_x, v], axis = 1)
if profile == 0:
z = F.relu(self.p0_l1(x_concat))
v += self.p0_lv(z, *params)
x_logit += self.p0_l3(z, *params)
x = lin_soft_sign(x_logit, F.broadcast_to(np.ones(1).astype(np.float32), x_logit.shape))
elif profile == 1:
z = F.relu(self.p1_l1(x_concat))
v += self.p1_lv(z, *params)
x_logit += self.p1_l3(z, *params)
x = lin_soft_sign(x_logit, F.broadcast_to(np.ones(1).astype(np.float32), x_logit.shape))
elif profile == 2:
z = F.relu(self.p2_l1(x_concat))
v += self.p2_lv(z, *params)
x_logit += self.p2_l3(z, *params)
x = lin_soft_sign(x_logit, F.broadcast_to(np.ones(1).astype(np.float32), x_logit.shape))
return x, x_logit, v
x_k = np.zeros((Hty.shape[0], self.K), dtype = np.float32)
x_k_logit = np.zeros((Hty.shape[0], self.K), dtype = np.float32)
v = np.zeros((Hty.shape[0], self.v_dims), dtype = np.float32)
loss = 0
mod = sg.Modulator('BPSK', K)
for k in range(1, num_layers + 1):
x_k, x_k_logit, v = detnet_layer(x_k, x_k_logit, v, self.z_dims, self.v_dims)
err = F.sum((x - x_k)**2, 1)
loss += (np.log(k)).astype(np.float32) * F.mean(err/err_rel)
report = {'loss': loss, 'acc': accuracy(mod.demodulate(x_k.data), indices)}
reporter.report(report, self)
return report[ret_param]
def report_params(self):
return ['validation/main/acc']
def param_names(self):
if len(self.profiles) > 1:
return 'IDPDETNET_{}_{}_{}_p{}'.format(self.z_dims, self.v_dims, self.coeff_generator.__name__, len(self.profiles))
return 'IDPDETNET_{}_{}_{}'.format(self.z_dims, self.v_dims, self.coeff_generator.__name__)
import os
import sys
sys.path.insert(0, os.path.abspath(
os.path.join(os.path.dirname(__file__), '..')))
import numpy as np
import visualize as vz
import idp.coeffs_generator as cg
from net import MLP
import util
K = 10
N = 4
v_dims = 2*K
z_dims = 8*K
SNR_dB_tmin = -4
SNR_dB_tmax = 24
SNR_dB_test = np.linspace(SNR_dB_tmin, SNR_dB_tmax, 8)
num_snr_test = len(SNR_dB_test)
def run(args):
train, test = util.get_dataset(args.modeltype)
names = ['all-one (standard)', 'linear']
colors = [vz.colors.all_one_lg, vz.colors.linear_lg]
models = [
MLP.MLP(K, cg.uniform, z_dims = 8*K, v_dims = 2*K),
MLP.MLP(K, cg.linear, z_dims = 8*K, v_dims = 2*K)
]
comp_ratios = np.linspace(0.1, 1.0, 20)
acc_dict = {}
BER_dict = {}
ratios_dict = {}
for i in range(num_snr_test):
for name, model in zip(names, models):
util.load_or_train_model(model, train, test, args)
acc_dict[name], BER_dict[name] = util.sweep_idp(model, test, comp_ratios, args)
ratios_dict[name] = [100. * cr for cr in comp_ratios]
filename = "IDPDETNET1_{}".format(args.modeltype)
vz.plot(ratios_dict, acc_dict, names, filename, colors = colors,
folder = args.figure_path, ext=args.ext,
title = 'IDPDETNET (BPSK)',
xlabel = 'IDP (%)',
ylabel = 'Test Accuracy (%)', ylim = (0, 100))
filename = "IDPDETNET2_{}".format(args.modeltype)
vz.plot(ratios_dict, BER_dict, names, filename, colors = colors,
folder=args.figure_path, ext=args.ext,
title='IDPDETNET (BPSK)',
xlabel='IDP (%)',
ylabel='BER (bits/sec)')
filename = "IDPDETNET3_{}".format(args.modeltype)
vz.plot(num_snr_test, BER_dict, names, filename, colors = colors,
folder = args.figure_path, ext = args.ext,
title = 'IDPDETNET (BPSK)',
xlabel = 'SNR (dB)',
ylabel = ' BER (bits/sec)')
if __name__ == '__main__':
args = util.default_parser('IDPDETNET Example').parse_args()
run(args)
Hi Seiya Tokui. Thank you for your kind input. Here is the model definition based on the above code:
model = MLP.MLP(K, cg.uniform, z_dims = 8*K, v_dims = 2*K)
OR
model = MLP.MLP(K, cg.linear, z_dims = 8*K, v_dims = 2*K)
Hi #BloodyD. Thank for your brilliant contributions. The model started training, but then later returned the following error:
1 nan nan 0.50108 5.85448
Traceback (most recent call last):
File "run_mlp.py", line 14, in <module>
mlp.run(args)
File "/Users/mac/Documents/idp_detnet/examples/mlp.py", line 38, in run
util.load_or_train_model(model, train, test, args)
File "/Users/mac/Documents/idp_detnet/examples/util.py", line 204, in load_or_train_model
train_model(model, train, test, args)
File "/Users/mac/Documents/idp_detnet/examples/util.py", line 184, in train_model
return eval(fp.read().replace('\n', ''))
File "<string>", line 1, in <module>
NameError: name 'NaN' is not defined
The error occurs in the last line of this snippet code below:
name = model.param_names()
save_model(model, os.path.join(args.model_path, name))
chainer.config.train = False
with open(os.path.join(args.out, 'log'), 'r') as fp:
return eval(fp.read().replace('\n', ''))
I am a beginner in tensorflow and I am trying to train a model using "mini batch". To do that I created a generator and iterate it. The problem I encounter is that, at the beginning of the epoch, the train seems fast (many batch per seconds) then the train slow down (1 batch per second) so I am wondering where I am wrong in my code but I do not find the problem.
def prepare_data(filename):
'''load file which give path and label for the data'''
f = open(filename, 'r')
data = [line.split() for line in f]
feat =[]
label=[]
for l in data:
feat.append(l[0])
label.append(l[1])
n_samples = len(feat)
shuf = list(range(n_samples))
random.shuffle(shuf)
count = Counter(label)
print(count)
feature = [feat[i] for i in shuf]
label = np.array(label, dtype=np.int)
return feature, label[shuf]
def get_specgrams(paths, nsamples=16000):
'''
Given list of paths, return specgrams.
'''
# read the wav files
wavs = [wavfile.read(x)[1] for x in paths]
# zero pad the shorter samples and cut off the long ones.
data = []
for wav in wavs:
if wav.size < 16000:
d = np.pad(wav, (nsamples - wav.size, 0), mode='constant')
else:
d = wav[0:nsamples]
data.append(d)
# get the specgram
#specgram = [signal.spectrogram(d, nperseg=256, noverlap=128)[2] for d in data]
#specgram = [s.reshape(129, 124, -1) for s in specgram]
return np.asarray(data)
def get_specgram(path, nsamples=16000):
'''
Given path, return specgrams.
'''
# read the wav files
wav = wavfile.read(path)[1]
# zero pad the shorter samples and cut off the long ones.
if wav.size < 16000:
d = np.pad(wav, (nsamples - wav.size, 0), mode='constant')
else:
d = wav[0:nsamples]
# get the specgram
#specgram = [signal.spectrogram(d, nperseg=256, noverlap=128)[2] for d in data]
#specgram = [s.reshape(129, 124, -1) for s in specgram]
return d
# multci classification binary labels
def one_hot_encode(labels, n_unique_labels=31):
n_labels = len(labels)
#print('number of unique labels:', n_unique_labels)
one_hot_encode = np.zeros((n_labels,n_unique_labels))
one_hot_encode[np.arange(n_labels), labels] = 1
return np.array(one_hot_encode, dtype=np.int)
#create_path_file('train/audio/')
def model(tr_features, tr_labels, ts_features, ts_labels):
# remove gpu device error
config = tf.ConfigProto(allow_soft_placement = True)
# parameters
BATCH_SIZE = 4
number_loop = math.ceil(len(tr_features)/BATCH_SIZE)
training_epochs = 10
n_dim = 16000
n_classes = 31 #len(np.unique(ts_labels))
n_hidden_units_one = 280
n_hidden_units_two = 300
sd = 1 / np.sqrt(n_dim)
learning_rate = 0.1
# get test data
ts_features, ts_labels = get_data(ts_features, ts_labels)
# Model
X = tf.placeholder(tf.float32,[None,n_dim])
Y = tf.placeholder(tf.float32,[None,n_classes])
W_1 = tf.Variable(tf.random_normal([n_dim,n_hidden_units_one], mean = 0, stddev=sd))
b_1 = tf.Variable(tf.random_normal([n_hidden_units_one], mean = 0, stddev=sd))
h_1 = tf.nn.tanh(tf.matmul(X,W_1) + b_1)
W_2 = tf.Variable(tf.random_normal([n_hidden_units_one,n_hidden_units_two], mean = 0, stddev=sd))
b_2 = tf.Variable(tf.random_normal([n_hidden_units_two], mean = 0, stddev=sd))
h_2 = tf.nn.sigmoid(tf.matmul(h_1,W_2) + b_2)
W = tf.Variable(tf.random_normal([n_hidden_units_two,n_classes], mean = 0, stddev=sd))
b = tf.Variable(tf.random_normal([n_classes], mean = 0, stddev=sd))
y_ = tf.nn.softmax(tf.matmul(h_2,W) + b)
init = tf.initialize_all_variables()
# function and optimizers
cost_function = -tf.reduce_sum(Y * tf.log(y_))
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost_function)
correct_prediction = tf.equal(tf.argmax(y_,1), tf.argmax(Y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# train loop
cost_history = np.empty(shape=[1],dtype=float)
y_true, y_pred = None, None
with tf.Session(config = config) as sess:
sess.run(init)
for epoch in range(training_epochs):
print(' ## Epoch n°', epoch+1 )
batch = batch_generator(BATCH_SIZE, tr_features, tr_labels)
acc_total = 0.0
for cpt, (train_features_batch, train_labels_batch) in enumerate(batch):
_,cost = sess.run([optimizer,cost_function],feed_dict={X:train_features_batch,Y:train_labels_batch})
cost_history = np.append(cost_history,cost)
correct_prediction = tf.equal(tf.argmax(y_,1), tf.argmax(Y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
acc = accuracy.eval(feed_dict={X:train_features_batch,Y:train_labels_batch})
acc_total = (acc_total*cpt + acc)/(cpt+1)
print('Train accuracy : ', acc_total, '[',str(cpt+1), '/',str(number_loop), ']' ,flush=True, end='\r')
clear_output()
print('Train accuracy : ', acc_total)
y_pred = sess.run(tf.argmax(y_,1),feed_dict={X: ts_features})
y_true = sess.run(tf.argmax(ts_labels,1))
print('Test accuracy: ', round(sess.run(accuracy, feed_dict={X: ts_features, Y: ts_labels}) , 3))
fig = plt.figure(figsize=(10,8))
plt.plot(cost_history)
plt.axis([0,training_epochs,0,np.max(cost_history)])
plt.show()
p,r,f,s = precision_recall_fscore_support(y_true, y_pred, average='micro')
print("F-Score:", round(f,3))
def batch_generator(batch_size, feat_path, labels):
n_sample = len(feat_path)
ite = math.ceil(n_sample/batch_size)
for i in range(0, ite):
if i == ite-1:
label = one_hot_encode(labels[-batch_size:])
feat = get_specgrams(feat_path[-batch_size:])
yield (feat, label)
else:
label = one_hot_encode(labels[i*batch_size:i*batch_size+batch_size])
feat = get_specgrams(feat_path[i*batch_size:i*batch_size+batch_size])
yield (feat, label)
def get_data(feat_path, labels):
feat = get_specgrams(feat_path)
label = one_hot_encode(labels)
return feat, label
def __main__():
print('## Load data and shuffle')
feat_path, labels = prepare_data('data_labelised2.txt')
idx = int(len(labels)*0.8)
print("## Create Model")
model(feat_path[0:idx], labels[0:idx], feat_path[idx+1:], labels[idx+1:])
with tf.device('/gpu:0'):
__main__()