I have used the tensorflow object detection api2 for a model , now I would want to loop through multiple bounding boxes in a single image and create a separate image for each bounding box. The code I have used, with which I am able to get a image of one bounding box but unable to loop through, I think I have misunderstood the contents of the detections file.
category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS)
image_np = np.array(Image.open(test_image_path))
print('Running inference for {}... '.format(test_image_path), end='')
input_tensor = tf.convert_to_tensor(image_np)
input_tensor = input_tensor[tf.newaxis, ...]
#input_tensor = input_tensor[:, :, :, :3]
#input_tensor = tf.convert_to_tensor(np.expand_dims(image_np, 0), dtype=tf.float32)
detections = detect_fn(input_tensor)
num_detections = int(detections.pop('num_detections'))
detections = {key: value[0, :num_detections].numpy()
for key, value in detections.items()}
detections['num_detections'] = num_detections
# detection_classes should be ints.
detections['detection_classes'] = detections['detection_classes'].astype(np.int64)
image_np_with_detections = image_np.copy()
viz_utils.visualize_boxes_and_labels_on_image_array(
image_np_with_detections,
detections['detection_boxes'],
detections['detection_classes'],
detections['detection_scores'],
category_index,
use_normalized_coordinates=True,
max_boxes_to_draw=500,
min_score_thresh=.2,
agnostic_mode=False)
plt.figure()
plt.imshow(image_np_with_detections)
print('Done')
plt.show()
def crop_objects(image, image_np_with_detections, detections):
global ymin, ymax, xmin, xmax
width, height = image.size
#Coordinates of detected objects
ymin = int(detections['detection_boxes'][0][0]*height)
xmin = int(detections['detection_boxes'][0][1]*width)
ymax = int(detections['detection_boxes'][0][2]*height)
xmax = int(detections['detection_boxes'][0][3]*width)
crop_img = image_np_with_detections[ymin:ymax, xmin:xmax]
if detections['detection_scores'][0] < 0.5:
crop_img.fill(0)
#Save cropped object into image
cv2.imwrite('D:\\pcb_project\\test_images' + '.png', crop_img)
return ymin, ymax, xmin, xmax
Just looked into the detections file. From there it was simple. For anyone else who wants it, this is working for me.
def crop_objects(image, image_np_with_detections, detections, i):
#global ymin, ymax, xmin, xmax
width, height = image.size
i=i
#Coordinates of detected objects
for detection in detections['detection_boxes']:
ymin = int(detection[0]*height)
xmin = int(detection[1]*width)
ymax = int(detection[2]*height)
xmax = int(detection[3]*width)
crop_img = image_np_with_detections[ymin:ymax, xmin:xmax]
if detections['detection_scores'][i] < detection_threshold:
crop_img.fill(0)
#Save cropped object into image
cv2.imwrite('D:\\pcb_project\\test_images' + str(i) +'.png', crop_img)
i+=1
The function doesn't work for me. This code is doing the job though:
#export cropped objects/boxes
detection_threshold=.3 #confidence of detection
height = image.shape[0]
width = image.shape[1]
box_index = 0
for detection in detections['detection_boxes']:
ymin = int(detection[0]*height)
xmin = int(detection[1]*width)
ymax = int(detection[2]*height)
xmax = int(detection[3]*width)
crop_img = image_np[ymin:ymax, xmin:xmax]
if detections['detection_scores'][box_index] > detection_threshold:
label = category_index[detections['detection_classes'][box_index]]['name']
print(str(label))
print(str(detections['detection_scores'][box_index]) + '\n-------------')
#Save cropped object into image
file_name = your_file_name
cv2.imwrite(your_path_to_saved_images + str(file_name) + 'object-' + str(box_index+1) +'.png', crop_img)
box_index += 1
Why is it less than in the code of if detections['detection_scores'][i] < detection_threshold:
? If I am correct it should be greater-equals to, ie
if detections['detection_scores'][i] >= detection_threshold:
# crop your image
The technical post webpages of this site follow the CC BY-SA 4.0 protocol. If you need to reprint, please indicate the site URL or the original address.Any question please contact:yoyou2525@163.com.