how to slice the tensorflow tensor to multiple - tensorflow

the tensor is:
batch(3) * length(5) * dim(2)
tensor = tf.constant([[[1,1],[2,2],[3,3],[4,4],[5,5]],[[1,1],[2,2],[3,3],[4,4],[5,5]],[[1,1],[2,2],[3,3],[4,4],[5,5]]] )
and i want get more slices by length_index [0,0],[0,1] ... [3,4],[4,4] according to length_axis_index[0,1,2,3,4],the operation like
spans_length=0
with tf.variable_scope("loss_span"):
output=[]
for i in range(0,1+n_spans):
for j in range(1,seq_length):
if j + i < seq_length:
res = tf.slice(output_layer_sequence, [0, j, 0], [-1, j+i-j+1, -1])
res = tf.reduce_sum(res,axis=1)
output.append(res)
# output = tf.convert_to_tensor(output)
spans_length+=1
output = tf.convert_to_tensor(output)
vsp = tf.transpose(output, [1,0,2])#batch , spans_length,hidden_size
vsp = tf.reshape(vsp,[-1,hidden_size])#batch * span_length,hidden_size
span_logits = tf.matmul(vsp, output_span_weight, transpose_b=True) # output:[batch * spans_length,class_labels]
span_logits = tf.nn.bias_add(span_logits, output_span_bias) # output:[batch * spans_length,class_labels]
span_matrix = tf.reshape(span_logits,[-1,spans_length,class_labels],name="span_matrix_val")#[batch , spans_length,class_labels]
label_span_logists = tf.one_hot(indices=label_span,depth=class_labels, on_value=1, off_value=0, axis=-1, dtype=tf.int32)
label_span_logists=tf.cast(label_span_logists,tf.int64)
span_loss = tf.nn.softmax_cross_entropy_with_logits(logits=span_matrix, labels=label_span_logists)
span_loss = tf.reduce_mean(span_loss, name='loss_span')
when i doing such operation, training model 's time is very long;how to speed it.thanks

This code works:
# tensor = tf.constant([[[1,1],[2,2],[3,3],[4,4],[5,5]],[[1,1],[2,2],[3,3],[4,4],[5,5]],[[1,1],[2,2],[3,3],[4,4],[5,5]]] )
tensor = tf.random.uniform((3, 2000, 2))
length = tf.shape(tensor)[1].numpy()
output = []
for begins in range(length):
for size in range(length - begins):
res = tf.slice(tensor, [0, begins, 0], [-1, size + 1, -1])
res = tf.reduce_sum(res)
output.append(res)
output = tf.convert_to_tensor(output)
I tried to use tf.scan(), but I don't see any benefits:
output = tf.constant([], tf.int32)
for begins in range(length):
t = tensor[:, begins:, :]
t = tf.transpose(t, (1, 0, 2))
t = tf.scan(lambda a, x: a + x, t)
t = tf.transpose(t, (1, 0, 2))
t = tf.reduce_sum(t, [0, 2])
output = tf.concat([output, t], 0)
Edits:
Tried to apply reduce_sum() along the unused dimension [0, 2] in preprocessing:
tensor = tf.reduce_sum(tensor, [0, 2])
output = tf.constant([])
for begins in range(length):
t = tensor[begins:]
t = tf.scan(lambda a, x: a + x, t)
output = tf.concat([output, t], 0)
Still don't see performance benefits.

for i in range(0,50):
for j in range(1,200):
if j + i < 200:
res = tf.slice(output_layer_sequence, [0, j, 0], [-1, j+i-j+1, -1])
res = tf.reduce_sum(res,axis=1)
output.append(res)
output = tf.convert_to_tensor(output)
when i doing such operation, training time is very long;how to speed it.thanks

Related

takes 1 positional argument but 2 were given in #tf.function

I have a network written with tensorflow Keras, in part of my code I need to use scipy.cKDTree, so I decorated my function with #tf.function. When I want to make the tree I receive the following error. (Let me know if more details are required.)
The error happens when it tries to make cKDTree. The size of the pc2e is shape=(46080, 3).
In similar questions I found that it could be because of the Pillow version, I changed the version and didn't solve the error.
Also is there a better way to have KDTree in tensorflow?
TypeError: in user code:
/home/***/My_Models.py:731 var_layer *
tree2 = cKDTree(pc2e, leafsize=500, balanced_tree=False)
ckdtree.pyx:522 scipy.spatial.ckdtree.cKDTree.__init__ **
TypeError: __array__() takes 1 positional argument but 2 were given
Process finished with exit code 1
The function:
#tf.function
def var_layer(self, inputs, output): # output: x y z i j k w
inputs_v = tf.Variable(inputs)
pc1_raw, pc2_raw = tf.split(inputs_v, num_or_size_splits=2, axis=4)
# B x T x W x H x Channels
s0, s1, s2, s3, s4 = pc1_raw.shape[0], pc1_raw.shape[1], pc1_raw.shape[2], pc1_raw.shape[3], pc1_raw.shape[4]
pc1 = tf.reshape(pc1_raw[:, -1, :, :, 0:3], shape=[-1, s2 * s3, 3])
pc2 = tf.reshape(pc2_raw[:, -1, :, :, 0:3], shape=[-1, s2 * s3, 3])
# normal2 = tf.reshape(pc2_raw[:, -1, :, :, 3:6], [-1, s2 * s3, 3])
# normal1 = tf.reshape(pc1_raw[:, -1, :, :, 3:6], [-1, s2 * s3, 3])
Rq, Tr3 = tfg.dual_quaternion.to_rotation_translation(output)
R33 = tfg.rotation_matrix_3d.from_quaternion(Rq)
RT = tf.concat([R33, tf.expand_dims(Tr3, axis=2)], -1)
RT = tf.pad(RT, [[0, 0], [0, 1], [0, 0]], constant_values=[0.0, 0.0, 0.0, 1.0])
pc1 = tf.pad(pc1, [[0, 0], [0, 0], [0, 1]], constant_values=1)
pc1 = tf.transpose(pc1, perm=[0, 2, 1])
pc1_tr = tf.linalg.matmul(RT, pc1)
pc1_tr = pc1_tr[:, 0:3]
pc1_tr = tf.transpose(pc1_tr, perm=[0, 2, 1]) # B x WH x 3
# remove zero values
for epoch in range(self.Epochs):
pc2e = pc2[epoch]
print(pc2e)
tree2 = cKDTree(pc2e, leafsize=500, balanced_tree=False)
dist_in, ind = tree2.query(pc1_tr[epoch], k=1)
nonempty = np.count_nonzero(dist_in)
dist_in = np.sum(np.abs(dist_in))
if nonempty != 0:
dist_in = np.divide(dist_in, nonempty)
dist_p2p = dist_in
print(dist_p2p)
return dist_p2p
versions:
Tensorflow 2.3.0
Scipy 1.4.1
pillow==8.2.0
Input of the function is a point cloud with this shape: Batch x Time x W x H x Channels
and the size of pc2e is shape=(46080, 3)

Set timer on detected object

i'm using yolo to detect object but i want to set timer for the detected object, can anyone help me?
so i want to make the object detecting with limited time for my projcet
i'm try my best but i don't have any idea how to do it
here is my code:
import cv2 as cv
import numpy as np
cap = cv.VideoCapture(0)
whT = 320
confThreshold = 0.1
nmsThreshold = 0.4
classesFile = "coco.names"
classNames = []
with open(classesFile, 'rt') as f:
classNames = [line.strip() for line in f.readlines()]
modelConfiguration = "yolov4.cfg"
modelWeights = "yolov4.weights"
net = cv.dnn.readNetFromDarknet(modelConfiguration, modelWeights)
net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
def findObjects(outputs,img):
hT, wT, cT = img.shape
bbox = []
classIds = []
confs = []
for output in outputs:
for det in output:
scores = det[5:]
classId = np.argmax(scores)
confidence = scores[classId]
if confidence > confThreshold:
w,h = int(det[2]*wT) , int(det[3]*hT)
x,y = int((det[0]*wT)-w/2) , int((det[1]*hT)-h/2)
bbox.append([x,y,w,h])
classIds.append(classId)
confs.append(float(confidence))
indices = cv.dnn.NMSBoxes(bbox, confs, confThreshold, nmsThreshold)
font = cv.FONT_HERSHEY_PLAIN
for i in indices:
label = str(classNames[classIds[i]])
x, y, w, h = bbox[i]
#print(x,y,w,h)
cv.rectangle(img, (x, y), (x+w,y+h), (255, 0 , 255), 2)
cv.putText(img, label, (x, y + 30), font, 3, (0,0,0), 3)
print("Jenis Mobil: " + label)
#cv.putText(img,f'{classNames[classIds[i]].upper()} {int(confs[i]*100)}%', (x, y-10), cv.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 255), 2)
while True:
success, img = cap.read()
blob = cv.dnn.blobFromImage(img, 1 / 255, (whT, whT), [0, 0, 0], 1, crop=False)
net.setInput(blob)
layersNames = net.getLayerNames()
outputNames = [(layersNames[i - 1]) for i in net.getUnconnectedOutLayers()]
outputs = net.forward(outputNames)
findObjects(outputs,img)
cv.imshow('Image', img)
key = cv.waitKey(1)
if key == 27:
break
cap.release()
cv.destroyAllWindows()

ValueError: cannot reshape array of size 692224 into shape (1,3,416,416)-yolov5 cpu error

I was trying to run my yolov5 custom model on cpu and I got this error.
this is the github page I have used : https://github.com/Amelia0911/onnxruntime-for-yolov5
import onnxruntime
from models.utils import *
import time
IMAGE_SIZE = (416, 416)
CONF_TH = 0.3
NMS_TH = 0.45
CLASSES = 80
model = onnxruntime.InferenceSession("models_train/bestnone.onnx")
anchor_list = [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]]
stride = [8, 16, 32]
def draw(img, boxinfo, dst, id):
for *xyxy, conf, cls in boxinfo:
label = '{}|{}'.format(int(cls), '%.2f' % conf)
plot_one_box(xyxy, img, label=label, color=[0, 0, 255])
cv2.imencode('.jpg', img)[1].tofile(dst)
def detect(image):
img = cv2.resize(image,IMAGE_SIZE)
img = img.transpose(2, 0, 1)
dataset = (img, image)
img = dataset[0].astype('float32')
img_size = [dataset[0].shape[1], dataset[0].shape[2]]
img /= 255.0
img = img.reshape(1, 3, img_size[0], img_size[1])
inputs = {model.get_inputs()[0].name: img}
pred = torch.tensor(model.run(None, inputs)[0])
anchor = torch.tensor(anchor_list).float().view(3, -1, 2)
area = img_size[0]*img_size[1]
size = [int(area/stride[0]**2), int(area/stride[1]**2), int(area/stride[2]**2)]
feature = [[int(j/stride[i]) for j in img_size] for i in range(3)]
y = []
y.append(pred[:, :size[0]*3, :])
y.append(pred[:, size[0]*3:size[0]*3+size[1]*3, :])
y.append(pred[:, size[0]*3+size[1]*3:, :])
grid = []
for k, f in enumerate(feature):
grid.append([[i, j] for j in range(f[0]) for i in range(f[1])])
z = []
for i in range(3):
src = y[i]
xy = src[..., 0:2] * 2. - 0.5
wh = (src[..., 2:4] * 2) ** 2
dst_xy = []
dst_wh = []
for j in range(3):
dst_xy.append((xy[:, j*size[i]:(j+1)*size[i], :] + torch.tensor(grid[i])) * stride[i])
dst_wh.append(wh[:, j*size[i]:(j+1)*size[i], :] * anchor[i][j])
src[..., 0:2] = torch.from_numpy(np.concatenate((dst_xy[0], dst_xy[1], dst_xy[2]), axis=1))
src[..., 2:4] = torch.from_numpy(np.concatenate((dst_wh[0], dst_wh[1], dst_wh[2]), axis=1))
z.append(src.view(1, -1, CLASSES+5)) #85
pred = torch.cat(z, 1)
pred = nms(pred, CONF_TH, NMS_TH)
for det in pred:
if det is not None and len(det):
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], dataset[1].shape).round()
if det == None:
return np.array([])
return det
if __name__ == '__main__':
import time
src = 'Temp-640x640.jpg'
t1 = time.time()
img = cv2.imdecode(np.fromfile(src, dtype=np.uint8), -1)
print(IMAGE_SIZE)
results = detect(img)
t2 = time.time()
print(results)
print("onnxruntime time = ", t2 - t1)
if results is not None and len(results):
draw(img, results, 'dst3.jpg', str(id))
print('Down!')
when I run this code I got the following error:
File "C:\Users\acer\.spyder-py3\metallic surface defect detection\3_onnx_cpu_detec.py", line 85, in <module>
results = detect(img)
File "C:\Users\acer\.spyder-py3\metallic surface defect detection\3_onnx_cpu_detec.py", line 30, in detect
img = img.reshape(1, 3, img_size[0], img_size[1])
ValueError: cannot reshape array of size 692224 into shape (1,3,416,416)
I think it is a color channel issue. I have tried to fix it .But it doesn't work .If someone know how to fix it please inform me.Thanks in advance

Error when trying to implement mAP as metrics in yolov1 training using tensorflow

I am trying to implement mAP as the main metric for yolov1 training. It ran fine for several epochs and was able to give the mAP value along with its loss for each batch. but after several epochs, it would crash, and I can't figure out what was wrong.
This is the error code that I got:
InvalidArgumentError: in user code:
C:\Users\DeepLab\AppData\Local\Temp/ipykernel_11432/1408655327.py:105 mean_average_precision *
if iou > best_iou:
C:\Users\DeepLab\anaconda3\envs\GPU\lib\site-packages\tensorflow\python\autograph\operators\control_flow.py:1172 if_stmt
_tf_if_stmt(cond, body, orelse, get_state, set_state, symbol_names, nouts)
C:\Users\DeepLab\anaconda3\envs\GPU\lib\site-packages\tensorflow\python\autograph\operators\control_flow.py:1180 _tf_if_stmt
cond = _verify_tf_condition(cond, 'if statement')
C:\Users\DeepLab\anaconda3\envs\GPU\lib\site-packages\tensorflow\python\autograph\operators\control_flow.py:139 _verify_tf_condition
cond = array_ops.reshape(cond, ())
C:\Users\DeepLab\anaconda3\envs\GPU\lib\site-packages\tensorflow\python\util\dispatch.py:206 wrapper
return target(*args, **kwargs)
C:\Users\DeepLab\anaconda3\envs\GPU\lib\site-packages\tensorflow\python\ops\array_ops.py:196 reshape
result = gen_array_ops.reshape(tensor, shape, name)
C:\Users\DeepLab\anaconda3\envs\GPU\lib\site-packages\tensorflow\python\ops\gen_array_ops.py:8397 reshape
return reshape_eager_fallback(
C:\Users\DeepLab\anaconda3\envs\GPU\lib\site-packages\tensorflow\python\ops\gen_array_ops.py:8422 reshape_eager_fallback
_result = _execute.execute(b"Reshape", 1, inputs=_inputs_flat, attrs=_attrs,
C:\Users\DeepLab\anaconda3\envs\GPU\lib\site-packages\tensorflow\python\eager\execute.py:59 quick_execute
tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
InvalidArgumentError: Input to reshape is a tensor with 0 values, but the requested shape has 1 [Op:Reshape]
For calculating mAP, I use these functions:
intersection_over_union used to return iou in tensor type
convert_cellboxes used to return the label value measured from the shape of the image
cellboxes_to_boxes used to return the list of lists containing 6 values (class_idx, confident, x, y, w, h)
non_max_suppression used to return the filtered version of cellboxes_to_boxes output
get_bboxes used to return a list containing 7 value, img_idx, class_idx, confident, x, y, w, h). It will be used as an input to calculate mAP.
mean_average_precisions is used to calculate mAP.
def intersection_over_union(boxes_preds, boxes_labels, box_format="midpoint"):
if box_format == "midpoint":
box1_x1 = boxes_preds[..., 0:1] - boxes_preds[..., 2:3] / 2 ## ==> x - w / 2 for each grid in each image
box1_y1 = boxes_preds[..., 1:2] - boxes_preds[..., 3:4] / 2 ## ==> y - h / 2 for each grid in each image
box1_x2 = boxes_preds[..., 0:1] + boxes_preds[..., 2:3] / 2 ## ==> x + w / 2 for each grid in each image
box1_y2 = boxes_preds[..., 1:2] + boxes_preds[..., 3:4] / 2 ## ==> y + h / 2 for each grid in each image
box2_x1 = boxes_labels[..., 0:1] - boxes_labels[..., 2:3] / 2
box2_y1 = boxes_labels[..., 1:2] - boxes_labels[..., 3:4] / 2
box2_x2 = boxes_labels[..., 0:1] + boxes_labels[..., 2:3] / 2
box2_y2 = boxes_labels[..., 1:2] + boxes_labels[..., 3:4] / 2
if box_format == "corners":
box1_x1 = boxes_preds[..., 0:1]
box1_y1 = boxes_preds[..., 1:2]
box1_x2 = boxes_preds[..., 2:3]
box1_y2 = boxes_preds[..., 3:4] # (N, 1)
box2_x1 = boxes_labels[..., 0:1]
box2_y1 = boxes_labels[..., 1:2]
box2_x2 = boxes_labels[..., 2:3]
box2_y2 = boxes_labels[..., 3:4]
x1 = K.max((box1_x1, box2_x1))
y1 = K.max((box1_y1, box2_y1))
x2 = K.min((box1_x2, box2_x2))
y2 = K.min((box1_y2, box2_y2))
intersection = K.clip((x2-x1), min_value=0, max_value=abs(x2-x1)) * K.clip((y2-y1), min_value=0, max_value=abs(y2-y1))
#intersection = 2
box1_area = abs((box1_x2 - box1_x1) * (box1_y2 - box1_y1))
box2_area = abs((box2_x2 - box2_x1) * (box2_y2 - box2_y1))
return intersection / (box1_area + box2_area - intersection + 1e-6)
def convert_cellboxes(predictions, S=7): #array (n, 7, 7, 30) (n, 7 x 7, 30)
#batch_size = predictions.shape[0]
try:
n = batch_size
predictions = K.reshape(predictions, (n, 7, 7, 30))
except:
n = len(X_val)%batch_size
predictions = K.reshape(predictions, (n, 7, 7, 30))
bboxes1 = predictions[..., 21:25]
bboxes2 = predictions[..., 26:30]
scores = tf.concat(
(tf.expand_dims(predictions[..., 20], 0), tf.expand_dims(predictions[..., 25], 0)), axis=0 #(1, 7, 7, 2)
) ## (n, 7, 7, 2)
best_box = tf.expand_dims(K.argmax(scores, 0), -1)
#print(best_box)
best_boxes = bboxes1 * (1 - best_box) + best_box * bboxes2 ##(7, 7, 4)
cell_indices = tf.expand_dims(tf.tile(tf.range(start=0, limit=7, delta=1), (7,)), -1) # (49, 1) (1, 7, 7, 1)
cell_indices = tf.repeat(tf.reshape(cell_indices, (1, 7, 7, 1)), n, 0) ## reshape from (49, 1) to (n, 7, 7, 1)
best_boxes = tf.cast(best_boxes, tf.float32)
cell_indices = tf.cast(cell_indices, tf.float32)
x = 1 / S * (best_boxes[..., :1] + cell_indices)
y = 1 / S * (best_boxes[..., 1:2] + K.permute_dimensions(cell_indices, (0, 2, 1, 3)))
w_h = 1 / S * best_boxes[..., 2:4]
converted_bboxes = tf.concat((x, y, w_h), axis=-1) # dimensi terakhir = 4
predicted_class = tf.expand_dims(K.argmax(predictions[..., :20], -1), -1) #n, 7, 7, 1
best_confidence = tf.expand_dims(K.max((predictions[..., 20], predictions[..., 25]), 0), -1)
predicted_class = tf.cast(predicted_class, tf.float32)
best_confidence = tf.cast(best_confidence, tf.float32)
#print(predicted_class.shape)
#print(best_confidence.shape)
#print(converted_bboxes.shape)
converted_preds = tf.concat(
(predicted_class, best_confidence, converted_bboxes), -1 # n, 7, 7, 6
)
#print(converted_preds.shape)
return converted_preds
def cellboxes_to_boxes(out, S=7):
try:
n = batch_size
converted_pred = K.reshape(convert_cellboxes(out), (n, S * S, -1)) # (n, 49, 6)
except:
n = len(X_val)%batch_size
converted_pred = K.reshape(convert_cellboxes(out), (n, S * S, -1)) # (n, 49, 6)
#print(converted_pred.shape)
converted_pred = converted_pred.numpy() # mode graph
all_bboxes = []
for ex_idx in range(out.shape[0]):
bboxes = []
for bbox_idx in range(S * S):
bboxes.append([x for x in converted_pred[ex_idx, bbox_idx, :]])
all_bboxes.append(bboxes)
return all_bboxes
def non_max_suppression(bboxes, iou_threshold, threshold, box_format="midpoint"):
#bboxes = bboxes[0]
#print(bboxes[:2])
#for i, box in enumerate(bboxes):
# bboxes[i][4:6] = box[4:6] * 7
#print(bboxes[:2])
assert type(bboxes) == list
bboxes = [box for box in bboxes if box[1] > threshold]
bboxes = sorted(bboxes, key=lambda x: x[1], reverse=True)
bboxes_after_nms = []
while bboxes:
chosen_box = bboxes.pop(0)
bboxes = [
box # (6)
for box in bboxes
if box[0] != chosen_box[0]
or intersection_over_union(
tf.constant(box[2:]),
tf.constant(chosen_box[2:]),
box_format=box_format,
)
< iou_threshold
]
bboxes_after_nms.append(chosen_box)
return bboxes_after_nms
def get_bboxes(gt_labels, pred_labels, iou_threshold, threshold, box_format="midpoint"):
"""
return:
images_pred_boxes = list with each element in this format (image_idx, class_prediction, prob_score, x, y, w, h)
images_gt_boxes = list with each element in this format (image_idx, class, prob_score, x, y, w, h)
"""
images_pred_boxes = []
images_gt_boxes = []
#pred_labels = model.predict(images) # data training, validation, testing
image_idx = 0
gt_boxes = cellboxes_to_boxes(gt_labels)
pred_boxes = cellboxes_to_boxes(pred_labels)
for i in range(len(gt_labels)):
pred_box_nms = non_max_suppression(pred_boxes[i], iou_threshold, threshold, box_format="midpoint")
for nms_box in pred_box_nms:
images_pred_boxes.append([image_idx] + nms_box)
for box in gt_boxes[i]:
if box[1] > threshold:
images_gt_boxes.append([image_idx] + box)
image_idx += 1
#print(images_pred_boxes[:10])
#print(images_gt_boxes[:10])
return images_pred_boxes, images_gt_boxes
def mean_average_precision(
y_true, y_pred, iou_threshold=0.5, box_format="midpoint", num_classes=20
):
pred_boxes, true_boxes = get_bboxes(y_true, y_pred, iou_threshold=0.6, threshold=0.3, box_format="midpoint")
# list storing all AP for respective classes
average_precisions = []
# used for numerical stability later on
epsilon = 1e-6
for c in range(num_classes):
detections = []
ground_truths = []
# Go through all predictions and targets,
# and only add the ones that belong to the
# current class c
for detection in pred_boxes:
if detection[1] == c:
detections.append(detection)
for true_box in true_boxes:
if true_box[1] == c:
ground_truths.append(true_box)
# find the amount of bboxes for each training example
# Counter here finds how many ground truth bboxes we get
# for each training example, so let's say img 0 has 3,
# img 1 has 5 then we will obtain a dictionary with:
# amount_bboxes = {0:3, 1:5, ..., 20: 10}
amount_bboxes = Counter([gt[0] for gt in ground_truths])
# We then go through each key, val in this dictionary
# and convert to the following (w.r.t same example):
# amount_bboxes = {0:torch.tensor[0,0,0], 1:torch.tensor[0,0,0,0,0]}
for key, val in amount_bboxes.items():
amount_bboxes[key] = np.zeros(val)
# sort by box probabilities which is index 2
detections.sort(key=lambda x: x[2], reverse=True)
TP = np.zeros((len(detections)))
FP = np.zeros((len(detections)))
total_true_bboxes = len(ground_truths)
# If none exists for this class then we can safely skip
if total_true_bboxes == 0:
continue
for detection_idx, detection in enumerate(detections):
# Only take out the ground_truths that have the same
# training idx as detection
ground_truth_img = [
bbox for bbox in ground_truths if bbox[0] == detection[0]
]
num_gts = len(ground_truth_img) #
best_iou = 0
best_gt_idx = 0
iou = 0
for idx, gt in enumerate(ground_truth_img):
iou = intersection_over_union(
tf.constant(detection[3:]),
tf.constant(gt[3:]),
box_format=box_format,
)
if iou > best_iou:
best_iou = iou
best_gt_idx = idx
if best_iou > iou_threshold:
# only detect ground truth detection once
if amount_bboxes[detection[0]][best_gt_idx] == 0:
# true positive and add this bounding box to seen
TP[detection_idx] = 1
amount_bboxes[detection[0]][best_gt_idx] = 1
else:
FP[detection_idx] = 1
# if IOU is lower then the detection is a false positive
else:
FP[detection_idx] = 1
TP = tf.constant(TP)
FP = tf.constant(FP)
#print(TP)
#print(FP)
TP_cumsum = tf.cumsum(TP, axis=0)
FP_cumsum = tf.cumsum(FP, axis=0)
recalls = TP_cumsum / (total_true_bboxes + epsilon)
precisions = tf.math.divide(TP_cumsum, (TP_cumsum + FP_cumsum + epsilon))
precisions = tf.concat((tf.cast(tf.constant([1]), precisions.dtype), precisions), axis=0)
recalls = tf.concat((tf.cast(tf.constant([0]), recalls.dtype), recalls), axis=0)
# torch.trapz for numerical integration
average_precisions.append(tfp.math.trapz(precisions, recalls))
return sum(average_precisions) / len(average_precisions)
for training, I used a standard model.fit with pascalvoc2007 as its dataset and a batch size of 4.

Convert a function from Python to TensorFlow

I am trying to convert the R3Det Model that outputs rotated bounding boxes to a TensorFlow Lite model for on device inference on mobile devices. The problem that I am facing is that a part of the inference model uses python code wrapped by tf.py_func which is not serializable. I am trying to convert the function to TensorFlow but it contains a for loop and some OpenCV funtion calls, and I have no idea how to convert these into TensorFlow code. I would appreciate it, if anybody can help me out with this. The python function is given below.
def nms_rotate_cpu(boxes, scores, iou_threshold, max_output_size):
"""
:param boxes: format [x_c, y_c, w, h, theta]
:param scores: scores of boxes
:param threshold: iou threshold (0.7 or 0.5)
:param max_output_size: max number of output
:return: the remaining index of boxes
"""
keep = []
order = scores.argsort()[::-1]
num = boxes.shape[0]
suppressed = np.zeros((num), dtype=np.int)
for _i in range(num):
if len(keep) >= max_output_size:
break
i = order[_i]
if suppressed[i] == 1:
continue
keep.append(i)
r1 = ((boxes[i, 0], boxes[i, 1]), (boxes[i, 2], boxes[i, 3]), boxes[i, 4])
area_r1 = boxes[i, 2] * boxes[i, 3]
for _j in range(_i + 1, num):
j = order[_j]
if suppressed[i] == 1:
continue
if np.sqrt((boxes[i, 0] - boxes[j, 0])**2 + (boxes[i, 1] - boxes[j, 1])**2) > (boxes[i, 2] + boxes[j, 2] + boxes[i, 3] + boxes[j, 3]):
inter = 0.0
else:
r2 = ((boxes[j, 0], boxes[j, 1]), (boxes[j, 2], boxes[j, 3]), boxes[j, 4])
area_r2 = boxes[j, 2] * boxes[j, 3]
inter = 0.0
try:
int_pts = cv2.rotatedRectangleIntersection(r1, r2)[1]
if int_pts is not None:
order_pts = cv2.convexHull(int_pts, returnPoints=True)
int_area = cv2.contourArea(order_pts)
inter = int_area * 1.0 / (area_r1 + area_r2 - int_area + cfgs.EPSILON)
except:
"""
cv2.error: /io/opencv/modules/imgproc/src/intersection.cpp:247:
error: (-215) intersection.size() <= 8 in function rotatedRectangleIntersection
"""
# print(r1)
# print(r2)
inter = 0.9999
if inter >= iou_threshold:
suppressed[j] = 1
return np.array(keep, np.int64)