You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

736 lines
34 KiB

6 years ago
  1. # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. """Class for evaluating object detections with COCO metrics."""
  16. import numpy as np
  17. import tensorflow as tf
  18. from object_detection.core import standard_fields
  19. from object_detection.metrics import coco_tools
  20. from object_detection.utils import json_utils
  21. from object_detection.utils import object_detection_evaluation
  22. class CocoDetectionEvaluator(object_detection_evaluation.DetectionEvaluator):
  23. """Class to evaluate COCO detection metrics."""
  24. def __init__(self,
  25. categories,
  26. include_metrics_per_category=False,
  27. all_metrics_per_category=False):
  28. """Constructor.
  29. Args:
  30. categories: A list of dicts, each of which has the following keys -
  31. 'id': (required) an integer id uniquely identifying this category.
  32. 'name': (required) string representing category name e.g., 'cat', 'dog'.
  33. include_metrics_per_category: If True, include metrics for each category.
  34. all_metrics_per_category: Whether to include all the summary metrics for
  35. each category in per_category_ap. Be careful with setting it to true if
  36. you have more than handful of categories, because it will pollute
  37. your mldash.
  38. """
  39. super(CocoDetectionEvaluator, self).__init__(categories)
  40. # _image_ids is a dictionary that maps unique image ids to Booleans which
  41. # indicate whether a corresponding detection has been added.
  42. self._image_ids = {}
  43. self._groundtruth_list = []
  44. self._detection_boxes_list = []
  45. self._category_id_set = set([cat['id'] for cat in self._categories])
  46. self._annotation_id = 1
  47. self._metrics = None
  48. self._include_metrics_per_category = include_metrics_per_category
  49. self._all_metrics_per_category = all_metrics_per_category
  50. def clear(self):
  51. """Clears the state to prepare for a fresh evaluation."""
  52. self._image_ids.clear()
  53. self._groundtruth_list = []
  54. self._detection_boxes_list = []
  55. def add_single_ground_truth_image_info(self,
  56. image_id,
  57. groundtruth_dict):
  58. """Adds groundtruth for a single image to be used for evaluation.
  59. If the image has already been added, a warning is logged, and groundtruth is
  60. ignored.
  61. Args:
  62. image_id: A unique string/integer identifier for the image.
  63. groundtruth_dict: A dictionary containing -
  64. InputDataFields.groundtruth_boxes: float32 numpy array of shape
  65. [num_boxes, 4] containing `num_boxes` groundtruth boxes of the format
  66. [ymin, xmin, ymax, xmax] in absolute image coordinates.
  67. InputDataFields.groundtruth_classes: integer numpy array of shape
  68. [num_boxes] containing 1-indexed groundtruth classes for the boxes.
  69. InputDataFields.groundtruth_is_crowd (optional): integer numpy array of
  70. shape [num_boxes] containing iscrowd flag for groundtruth boxes.
  71. """
  72. if image_id in self._image_ids:
  73. tf.logging.warning('Ignoring ground truth with image id %s since it was '
  74. 'previously added', image_id)
  75. return
  76. groundtruth_is_crowd = groundtruth_dict.get(
  77. standard_fields.InputDataFields.groundtruth_is_crowd)
  78. # Drop groundtruth_is_crowd if empty tensor.
  79. if groundtruth_is_crowd is not None and not groundtruth_is_crowd.shape[0]:
  80. groundtruth_is_crowd = None
  81. self._groundtruth_list.extend(
  82. coco_tools.ExportSingleImageGroundtruthToCoco(
  83. image_id=image_id,
  84. next_annotation_id=self._annotation_id,
  85. category_id_set=self._category_id_set,
  86. groundtruth_boxes=groundtruth_dict[
  87. standard_fields.InputDataFields.groundtruth_boxes],
  88. groundtruth_classes=groundtruth_dict[
  89. standard_fields.InputDataFields.groundtruth_classes],
  90. groundtruth_is_crowd=groundtruth_is_crowd))
  91. self._annotation_id += groundtruth_dict[standard_fields.InputDataFields.
  92. groundtruth_boxes].shape[0]
  93. # Boolean to indicate whether a detection has been added for this image.
  94. self._image_ids[image_id] = False
  95. def add_single_detected_image_info(self,
  96. image_id,
  97. detections_dict):
  98. """Adds detections for a single image to be used for evaluation.
  99. If a detection has already been added for this image id, a warning is
  100. logged, and the detection is skipped.
  101. Args:
  102. image_id: A unique string/integer identifier for the image.
  103. detections_dict: A dictionary containing -
  104. DetectionResultFields.detection_boxes: float32 numpy array of shape
  105. [num_boxes, 4] containing `num_boxes` detection boxes of the format
  106. [ymin, xmin, ymax, xmax] in absolute image coordinates.
  107. DetectionResultFields.detection_scores: float32 numpy array of shape
  108. [num_boxes] containing detection scores for the boxes.
  109. DetectionResultFields.detection_classes: integer numpy array of shape
  110. [num_boxes] containing 1-indexed detection classes for the boxes.
  111. Raises:
  112. ValueError: If groundtruth for the image_id is not available.
  113. """
  114. if image_id not in self._image_ids:
  115. raise ValueError('Missing groundtruth for image id: {}'.format(image_id))
  116. if self._image_ids[image_id]:
  117. tf.logging.warning('Ignoring detection with image id %s since it was '
  118. 'previously added', image_id)
  119. return
  120. self._detection_boxes_list.extend(
  121. coco_tools.ExportSingleImageDetectionBoxesToCoco(
  122. image_id=image_id,
  123. category_id_set=self._category_id_set,
  124. detection_boxes=detections_dict[standard_fields.
  125. DetectionResultFields
  126. .detection_boxes],
  127. detection_scores=detections_dict[standard_fields.
  128. DetectionResultFields.
  129. detection_scores],
  130. detection_classes=detections_dict[standard_fields.
  131. DetectionResultFields.
  132. detection_classes]))
  133. self._image_ids[image_id] = True
  134. def dump_detections_to_json_file(self, json_output_path):
  135. """Saves the detections into json_output_path in the format used by MS COCO.
  136. Args:
  137. json_output_path: String containing the output file's path. It can be also
  138. None. In that case nothing will be written to the output file.
  139. """
  140. if json_output_path and json_output_path is not None:
  141. with tf.gfile.GFile(json_output_path, 'w') as fid:
  142. tf.logging.info('Dumping detections to output json file.')
  143. json_utils.Dump(
  144. obj=self._detection_boxes_list, fid=fid, float_digits=4, indent=2)
  145. def evaluate(self):
  146. """Evaluates the detection boxes and returns a dictionary of coco metrics.
  147. Returns:
  148. A dictionary holding -
  149. 1. summary_metrics:
  150. 'DetectionBoxes_Precision/mAP': mean average precision over classes
  151. averaged over IOU thresholds ranging from .5 to .95 with .05
  152. increments.
  153. 'DetectionBoxes_Precision/mAP@.50IOU': mean average precision at 50% IOU
  154. 'DetectionBoxes_Precision/mAP@.75IOU': mean average precision at 75% IOU
  155. 'DetectionBoxes_Precision/mAP (small)': mean average precision for small
  156. objects (area < 32^2 pixels).
  157. 'DetectionBoxes_Precision/mAP (medium)': mean average precision for
  158. medium sized objects (32^2 pixels < area < 96^2 pixels).
  159. 'DetectionBoxes_Precision/mAP (large)': mean average precision for large
  160. objects (96^2 pixels < area < 10000^2 pixels).
  161. 'DetectionBoxes_Recall/AR@1': average recall with 1 detection.
  162. 'DetectionBoxes_Recall/AR@10': average recall with 10 detections.
  163. 'DetectionBoxes_Recall/AR@100': average recall with 100 detections.
  164. 'DetectionBoxes_Recall/AR@100 (small)': average recall for small objects
  165. with 100.
  166. 'DetectionBoxes_Recall/AR@100 (medium)': average recall for medium objects
  167. with 100.
  168. 'DetectionBoxes_Recall/AR@100 (large)': average recall for large objects
  169. with 100 detections.
  170. 2. per_category_ap: if include_metrics_per_category is True, category
  171. specific results with keys of the form:
  172. 'Precision mAP ByCategory/category' (without the supercategory part if
  173. no supercategories exist). For backward compatibility
  174. 'PerformanceByCategory' is included in the output regardless of
  175. all_metrics_per_category.
  176. """
  177. groundtruth_dict = {
  178. 'annotations': self._groundtruth_list,
  179. 'images': [{'id': image_id} for image_id in self._image_ids],
  180. 'categories': self._categories
  181. }
  182. coco_wrapped_groundtruth = coco_tools.COCOWrapper(groundtruth_dict)
  183. coco_wrapped_detections = coco_wrapped_groundtruth.LoadAnnotations(
  184. self._detection_boxes_list)
  185. box_evaluator = coco_tools.COCOEvalWrapper(
  186. coco_wrapped_groundtruth, coco_wrapped_detections, agnostic_mode=False)
  187. box_metrics, box_per_category_ap = box_evaluator.ComputeMetrics(
  188. include_metrics_per_category=self._include_metrics_per_category,
  189. all_metrics_per_category=self._all_metrics_per_category)
  190. box_metrics.update(box_per_category_ap)
  191. box_metrics = {'DetectionBoxes_'+ key: value
  192. for key, value in iter(box_metrics.items())}
  193. return box_metrics
  194. def get_estimator_eval_metric_ops(self, eval_dict):
  195. """Returns a dictionary of eval metric ops.
  196. Note that once value_op is called, the detections and groundtruth added via
  197. update_op are cleared.
  198. This function can take in groundtruth and detections for a batch of images,
  199. or for a single image. For the latter case, the batch dimension for input
  200. tensors need not be present.
  201. Args:
  202. eval_dict: A dictionary that holds tensors for evaluating object detection
  203. performance. For single-image evaluation, this dictionary may be
  204. produced from eval_util.result_dict_for_single_example(). If multi-image
  205. evaluation, `eval_dict` should contain the fields
  206. 'num_groundtruth_boxes_per_image' and 'num_det_boxes_per_image' to
  207. properly unpad the tensors from the batch.
  208. Returns:
  209. a dictionary of metric names to tuple of value_op and update_op that can
  210. be used as eval metric ops in tf.estimator.EstimatorSpec. Note that all
  211. update ops must be run together and similarly all value ops must be run
  212. together to guarantee correct behaviour.
  213. """
  214. def update_op(
  215. image_id_batched,
  216. groundtruth_boxes_batched,
  217. groundtruth_classes_batched,
  218. groundtruth_is_crowd_batched,
  219. num_gt_boxes_per_image,
  220. detection_boxes_batched,
  221. detection_scores_batched,
  222. detection_classes_batched,
  223. num_det_boxes_per_image,
  224. is_annotated_batched):
  225. """Update operation for adding batch of images to Coco evaluator."""
  226. for (image_id, gt_box, gt_class, gt_is_crowd, num_gt_box, det_box,
  227. det_score, det_class, num_det_box, is_annotated) in zip(
  228. image_id_batched, groundtruth_boxes_batched,
  229. groundtruth_classes_batched, groundtruth_is_crowd_batched,
  230. num_gt_boxes_per_image,
  231. detection_boxes_batched, detection_scores_batched,
  232. detection_classes_batched, num_det_boxes_per_image,
  233. is_annotated_batched):
  234. if is_annotated:
  235. self.add_single_ground_truth_image_info(
  236. image_id, {
  237. 'groundtruth_boxes': gt_box[:num_gt_box],
  238. 'groundtruth_classes': gt_class[:num_gt_box],
  239. 'groundtruth_is_crowd': gt_is_crowd[:num_gt_box]
  240. })
  241. self.add_single_detected_image_info(
  242. image_id,
  243. {'detection_boxes': det_box[:num_det_box],
  244. 'detection_scores': det_score[:num_det_box],
  245. 'detection_classes': det_class[:num_det_box]})
  246. # Unpack items from the evaluation dictionary.
  247. input_data_fields = standard_fields.InputDataFields
  248. detection_fields = standard_fields.DetectionResultFields
  249. image_id = eval_dict[input_data_fields.key]
  250. groundtruth_boxes = eval_dict[input_data_fields.groundtruth_boxes]
  251. groundtruth_classes = eval_dict[input_data_fields.groundtruth_classes]
  252. groundtruth_is_crowd = eval_dict.get(
  253. input_data_fields.groundtruth_is_crowd, None)
  254. detection_boxes = eval_dict[detection_fields.detection_boxes]
  255. detection_scores = eval_dict[detection_fields.detection_scores]
  256. detection_classes = eval_dict[detection_fields.detection_classes]
  257. num_gt_boxes_per_image = eval_dict.get(
  258. 'num_groundtruth_boxes_per_image', None)
  259. num_det_boxes_per_image = eval_dict.get('num_det_boxes_per_image', None)
  260. is_annotated = eval_dict.get('is_annotated', None)
  261. if groundtruth_is_crowd is None:
  262. groundtruth_is_crowd = tf.zeros_like(groundtruth_classes, dtype=tf.bool)
  263. if not image_id.shape.as_list():
  264. # Apply a batch dimension to all tensors.
  265. image_id = tf.expand_dims(image_id, 0)
  266. groundtruth_boxes = tf.expand_dims(groundtruth_boxes, 0)
  267. groundtruth_classes = tf.expand_dims(groundtruth_classes, 0)
  268. groundtruth_is_crowd = tf.expand_dims(groundtruth_is_crowd, 0)
  269. detection_boxes = tf.expand_dims(detection_boxes, 0)
  270. detection_scores = tf.expand_dims(detection_scores, 0)
  271. detection_classes = tf.expand_dims(detection_classes, 0)
  272. if num_gt_boxes_per_image is None:
  273. num_gt_boxes_per_image = tf.shape(groundtruth_boxes)[1:2]
  274. else:
  275. num_gt_boxes_per_image = tf.expand_dims(num_gt_boxes_per_image, 0)
  276. if num_det_boxes_per_image is None:
  277. num_det_boxes_per_image = tf.shape(detection_boxes)[1:2]
  278. else:
  279. num_det_boxes_per_image = tf.expand_dims(num_det_boxes_per_image, 0)
  280. if is_annotated is None:
  281. is_annotated = tf.constant([True])
  282. else:
  283. is_annotated = tf.expand_dims(is_annotated, 0)
  284. else:
  285. if num_gt_boxes_per_image is None:
  286. num_gt_boxes_per_image = tf.tile(
  287. tf.shape(groundtruth_boxes)[1:2],
  288. multiples=tf.shape(groundtruth_boxes)[0:1])
  289. if num_det_boxes_per_image is None:
  290. num_det_boxes_per_image = tf.tile(
  291. tf.shape(detection_boxes)[1:2],
  292. multiples=tf.shape(detection_boxes)[0:1])
  293. if is_annotated is None:
  294. is_annotated = tf.ones_like(image_id, dtype=tf.bool)
  295. update_op = tf.py_func(update_op, [image_id,
  296. groundtruth_boxes,
  297. groundtruth_classes,
  298. groundtruth_is_crowd,
  299. num_gt_boxes_per_image,
  300. detection_boxes,
  301. detection_scores,
  302. detection_classes,
  303. num_det_boxes_per_image,
  304. is_annotated], [])
  305. metric_names = ['DetectionBoxes_Precision/mAP',
  306. 'DetectionBoxes_Precision/mAP@.50IOU',
  307. 'DetectionBoxes_Precision/mAP@.75IOU',
  308. 'DetectionBoxes_Precision/mAP (large)',
  309. 'DetectionBoxes_Precision/mAP (medium)',
  310. 'DetectionBoxes_Precision/mAP (small)',
  311. 'DetectionBoxes_Recall/AR@1',
  312. 'DetectionBoxes_Recall/AR@10',
  313. 'DetectionBoxes_Recall/AR@100',
  314. 'DetectionBoxes_Recall/AR@100 (large)',
  315. 'DetectionBoxes_Recall/AR@100 (medium)',
  316. 'DetectionBoxes_Recall/AR@100 (small)']
  317. if self._include_metrics_per_category:
  318. for category_dict in self._categories:
  319. metric_names.append('DetectionBoxes_PerformanceByCategory/mAP/' +
  320. category_dict['name'])
  321. def first_value_func():
  322. self._metrics = self.evaluate()
  323. self.clear()
  324. return np.float32(self._metrics[metric_names[0]])
  325. def value_func_factory(metric_name):
  326. def value_func():
  327. return np.float32(self._metrics[metric_name])
  328. return value_func
  329. # Ensure that the metrics are only evaluated once.
  330. first_value_op = tf.py_func(first_value_func, [], tf.float32)
  331. eval_metric_ops = {metric_names[0]: (first_value_op, update_op)}
  332. with tf.control_dependencies([first_value_op]):
  333. for metric_name in metric_names[1:]:
  334. eval_metric_ops[metric_name] = (tf.py_func(
  335. value_func_factory(metric_name), [], np.float32), update_op)
  336. return eval_metric_ops
  337. def _check_mask_type_and_value(array_name, masks):
  338. """Checks whether mask dtype is uint8 and the values are either 0 or 1."""
  339. if masks.dtype != np.uint8:
  340. raise ValueError('{} must be of type np.uint8. Found {}.'.format(
  341. array_name, masks.dtype))
  342. if np.any(np.logical_and(masks != 0, masks != 1)):
  343. raise ValueError('{} elements can only be either 0 or 1.'.format(
  344. array_name))
  345. class CocoMaskEvaluator(object_detection_evaluation.DetectionEvaluator):
  346. """Class to evaluate COCO detection metrics."""
  347. def __init__(self, categories, include_metrics_per_category=False):
  348. """Constructor.
  349. Args:
  350. categories: A list of dicts, each of which has the following keys -
  351. 'id': (required) an integer id uniquely identifying this category.
  352. 'name': (required) string representing category name e.g., 'cat', 'dog'.
  353. include_metrics_per_category: If True, include metrics for each category.
  354. """
  355. super(CocoMaskEvaluator, self).__init__(categories)
  356. self._image_id_to_mask_shape_map = {}
  357. self._image_ids_with_detections = set([])
  358. self._groundtruth_list = []
  359. self._detection_masks_list = []
  360. self._category_id_set = set([cat['id'] for cat in self._categories])
  361. self._annotation_id = 1
  362. self._include_metrics_per_category = include_metrics_per_category
  363. def clear(self):
  364. """Clears the state to prepare for a fresh evaluation."""
  365. self._image_id_to_mask_shape_map.clear()
  366. self._image_ids_with_detections.clear()
  367. self._groundtruth_list = []
  368. self._detection_masks_list = []
  369. def add_single_ground_truth_image_info(self,
  370. image_id,
  371. groundtruth_dict):
  372. """Adds groundtruth for a single image to be used for evaluation.
  373. If the image has already been added, a warning is logged, and groundtruth is
  374. ignored.
  375. Args:
  376. image_id: A unique string/integer identifier for the image.
  377. groundtruth_dict: A dictionary containing -
  378. InputDataFields.groundtruth_boxes: float32 numpy array of shape
  379. [num_boxes, 4] containing `num_boxes` groundtruth boxes of the format
  380. [ymin, xmin, ymax, xmax] in absolute image coordinates.
  381. InputDataFields.groundtruth_classes: integer numpy array of shape
  382. [num_boxes] containing 1-indexed groundtruth classes for the boxes.
  383. InputDataFields.groundtruth_instance_masks: uint8 numpy array of shape
  384. [num_boxes, image_height, image_width] containing groundtruth masks
  385. corresponding to the boxes. The elements of the array must be in
  386. {0, 1}.
  387. """
  388. if image_id in self._image_id_to_mask_shape_map:
  389. tf.logging.warning('Ignoring ground truth with image id %s since it was '
  390. 'previously added', image_id)
  391. return
  392. groundtruth_instance_masks = groundtruth_dict[
  393. standard_fields.InputDataFields.groundtruth_instance_masks]
  394. _check_mask_type_and_value(standard_fields.InputDataFields.
  395. groundtruth_instance_masks,
  396. groundtruth_instance_masks)
  397. self._groundtruth_list.extend(
  398. coco_tools.
  399. ExportSingleImageGroundtruthToCoco(
  400. image_id=image_id,
  401. next_annotation_id=self._annotation_id,
  402. category_id_set=self._category_id_set,
  403. groundtruth_boxes=groundtruth_dict[standard_fields.InputDataFields.
  404. groundtruth_boxes],
  405. groundtruth_classes=groundtruth_dict[standard_fields.
  406. InputDataFields.
  407. groundtruth_classes],
  408. groundtruth_masks=groundtruth_instance_masks))
  409. self._annotation_id += groundtruth_dict[standard_fields.InputDataFields.
  410. groundtruth_boxes].shape[0]
  411. self._image_id_to_mask_shape_map[image_id] = groundtruth_dict[
  412. standard_fields.InputDataFields.groundtruth_instance_masks].shape
  413. def add_single_detected_image_info(self,
  414. image_id,
  415. detections_dict):
  416. """Adds detections for a single image to be used for evaluation.
  417. If a detection has already been added for this image id, a warning is
  418. logged, and the detection is skipped.
  419. Args:
  420. image_id: A unique string/integer identifier for the image.
  421. detections_dict: A dictionary containing -
  422. DetectionResultFields.detection_scores: float32 numpy array of shape
  423. [num_boxes] containing detection scores for the boxes.
  424. DetectionResultFields.detection_classes: integer numpy array of shape
  425. [num_boxes] containing 1-indexed detection classes for the boxes.
  426. DetectionResultFields.detection_masks: optional uint8 numpy array of
  427. shape [num_boxes, image_height, image_width] containing instance
  428. masks corresponding to the boxes. The elements of the array must be
  429. in {0, 1}.
  430. Raises:
  431. ValueError: If groundtruth for the image_id is not available or if
  432. spatial shapes of groundtruth_instance_masks and detection_masks are
  433. incompatible.
  434. """
  435. if image_id not in self._image_id_to_mask_shape_map:
  436. raise ValueError('Missing groundtruth for image id: {}'.format(image_id))
  437. if image_id in self._image_ids_with_detections:
  438. tf.logging.warning('Ignoring detection with image id %s since it was '
  439. 'previously added', image_id)
  440. return
  441. groundtruth_masks_shape = self._image_id_to_mask_shape_map[image_id]
  442. detection_masks = detections_dict[standard_fields.DetectionResultFields.
  443. detection_masks]
  444. if groundtruth_masks_shape[1:] != detection_masks.shape[1:]:
  445. raise ValueError('Spatial shape of groundtruth masks and detection masks '
  446. 'are incompatible: {} vs {}'.format(
  447. groundtruth_masks_shape,
  448. detection_masks.shape))
  449. _check_mask_type_and_value(standard_fields.DetectionResultFields.
  450. detection_masks,
  451. detection_masks)
  452. self._detection_masks_list.extend(
  453. coco_tools.ExportSingleImageDetectionMasksToCoco(
  454. image_id=image_id,
  455. category_id_set=self._category_id_set,
  456. detection_masks=detection_masks,
  457. detection_scores=detections_dict[standard_fields.
  458. DetectionResultFields.
  459. detection_scores],
  460. detection_classes=detections_dict[standard_fields.
  461. DetectionResultFields.
  462. detection_classes]))
  463. self._image_ids_with_detections.update([image_id])
  464. def dump_detections_to_json_file(self, json_output_path):
  465. """Saves the detections into json_output_path in the format used by MS COCO.
  466. Args:
  467. json_output_path: String containing the output file's path. It can be also
  468. None. In that case nothing will be written to the output file.
  469. """
  470. if json_output_path and json_output_path is not None:
  471. tf.logging.info('Dumping detections to output json file.')
  472. with tf.gfile.GFile(json_output_path, 'w') as fid:
  473. json_utils.Dump(
  474. obj=self._detection_masks_list, fid=fid, float_digits=4, indent=2)
  475. def evaluate(self):
  476. """Evaluates the detection masks and returns a dictionary of coco metrics.
  477. Returns:
  478. A dictionary holding -
  479. 1. summary_metrics:
  480. 'DetectionMasks_Precision/mAP': mean average precision over classes
  481. averaged over IOU thresholds ranging from .5 to .95 with .05 increments.
  482. 'DetectionMasks_Precision/mAP@.50IOU': mean average precision at 50% IOU.
  483. 'DetectionMasks_Precision/mAP@.75IOU': mean average precision at 75% IOU.
  484. 'DetectionMasks_Precision/mAP (small)': mean average precision for small
  485. objects (area < 32^2 pixels).
  486. 'DetectionMasks_Precision/mAP (medium)': mean average precision for medium
  487. sized objects (32^2 pixels < area < 96^2 pixels).
  488. 'DetectionMasks_Precision/mAP (large)': mean average precision for large
  489. objects (96^2 pixels < area < 10000^2 pixels).
  490. 'DetectionMasks_Recall/AR@1': average recall with 1 detection.
  491. 'DetectionMasks_Recall/AR@10': average recall with 10 detections.
  492. 'DetectionMasks_Recall/AR@100': average recall with 100 detections.
  493. 'DetectionMasks_Recall/AR@100 (small)': average recall for small objects
  494. with 100 detections.
  495. 'DetectionMasks_Recall/AR@100 (medium)': average recall for medium objects
  496. with 100 detections.
  497. 'DetectionMasks_Recall/AR@100 (large)': average recall for large objects
  498. with 100 detections.
  499. 2. per_category_ap: if include_metrics_per_category is True, category
  500. specific results with keys of the form:
  501. 'Precision mAP ByCategory/category' (without the supercategory part if
  502. no supercategories exist). For backward compatibility
  503. 'PerformanceByCategory' is included in the output regardless of
  504. all_metrics_per_category.
  505. """
  506. groundtruth_dict = {
  507. 'annotations': self._groundtruth_list,
  508. 'images': [{'id': image_id, 'height': shape[1], 'width': shape[2]}
  509. for image_id, shape in self._image_id_to_mask_shape_map.
  510. items()],
  511. 'categories': self._categories
  512. }
  513. coco_wrapped_groundtruth = coco_tools.COCOWrapper(
  514. groundtruth_dict, detection_type='segmentation')
  515. coco_wrapped_detection_masks = coco_wrapped_groundtruth.LoadAnnotations(
  516. self._detection_masks_list)
  517. mask_evaluator = coco_tools.COCOEvalWrapper(
  518. coco_wrapped_groundtruth, coco_wrapped_detection_masks,
  519. agnostic_mode=False, iou_type='segm')
  520. mask_metrics, mask_per_category_ap = mask_evaluator.ComputeMetrics(
  521. include_metrics_per_category=self._include_metrics_per_category)
  522. mask_metrics.update(mask_per_category_ap)
  523. mask_metrics = {'DetectionMasks_'+ key: value
  524. for key, value in mask_metrics.items()}
  525. return mask_metrics
  526. def get_estimator_eval_metric_ops(self, eval_dict):
  527. """Returns a dictionary of eval metric ops.
  528. Note that once value_op is called, the detections and groundtruth added via
  529. update_op are cleared.
  530. Args:
  531. eval_dict: A dictionary that holds tensors for evaluating object detection
  532. performance. For single-image evaluation, this dictionary may be
  533. produced from eval_util.result_dict_for_single_example(). If multi-image
  534. evaluation, `eval_dict` should contain the fields
  535. 'num_groundtruth_boxes_per_image' and 'num_det_boxes_per_image' to
  536. properly unpad the tensors from the batch.
  537. Returns:
  538. a dictionary of metric names to tuple of value_op and update_op that can
  539. be used as eval metric ops in tf.estimator.EstimatorSpec. Note that all
  540. update ops must be run together and similarly all value ops must be run
  541. together to guarantee correct behaviour.
  542. """
  543. def update_op(image_id_batched, groundtruth_boxes_batched,
  544. groundtruth_classes_batched,
  545. groundtruth_instance_masks_batched,
  546. groundtruth_is_crowd_batched, num_gt_boxes_per_image,
  547. detection_scores_batched, detection_classes_batched,
  548. detection_masks_batched, num_det_boxes_per_image):
  549. """Update op for metrics."""
  550. for (image_id, groundtruth_boxes, groundtruth_classes,
  551. groundtruth_instance_masks, groundtruth_is_crowd, num_gt_box,
  552. detection_scores, detection_classes,
  553. detection_masks, num_det_box) in zip(
  554. image_id_batched, groundtruth_boxes_batched,
  555. groundtruth_classes_batched, groundtruth_instance_masks_batched,
  556. groundtruth_is_crowd_batched, num_gt_boxes_per_image,
  557. detection_scores_batched, detection_classes_batched,
  558. detection_masks_batched, num_det_boxes_per_image):
  559. self.add_single_ground_truth_image_info(
  560. image_id, {
  561. 'groundtruth_boxes':
  562. groundtruth_boxes[:num_gt_box],
  563. 'groundtruth_classes':
  564. groundtruth_classes[:num_gt_box],
  565. 'groundtruth_instance_masks':
  566. groundtruth_instance_masks[:num_gt_box],
  567. 'groundtruth_is_crowd':
  568. groundtruth_is_crowd[:num_gt_box]
  569. })
  570. self.add_single_detected_image_info(
  571. image_id, {
  572. 'detection_scores': detection_scores[:num_det_box],
  573. 'detection_classes': detection_classes[:num_det_box],
  574. 'detection_masks': detection_masks[:num_det_box]
  575. })
  576. # Unpack items from the evaluation dictionary.
  577. input_data_fields = standard_fields.InputDataFields
  578. detection_fields = standard_fields.DetectionResultFields
  579. image_id = eval_dict[input_data_fields.key]
  580. groundtruth_boxes = eval_dict[input_data_fields.groundtruth_boxes]
  581. groundtruth_classes = eval_dict[input_data_fields.groundtruth_classes]
  582. groundtruth_instance_masks = eval_dict[
  583. input_data_fields.groundtruth_instance_masks]
  584. groundtruth_is_crowd = eval_dict.get(
  585. input_data_fields.groundtruth_is_crowd, None)
  586. num_gt_boxes_per_image = eval_dict.get(
  587. input_data_fields.num_groundtruth_boxes, None)
  588. detection_scores = eval_dict[detection_fields.detection_scores]
  589. detection_classes = eval_dict[detection_fields.detection_classes]
  590. detection_masks = eval_dict[detection_fields.detection_masks]
  591. num_det_boxes_per_image = eval_dict.get(detection_fields.num_detections,
  592. None)
  593. if groundtruth_is_crowd is None:
  594. groundtruth_is_crowd = tf.zeros_like(groundtruth_classes, dtype=tf.bool)
  595. if not image_id.shape.as_list():
  596. # Apply a batch dimension to all tensors.
  597. image_id = tf.expand_dims(image_id, 0)
  598. groundtruth_boxes = tf.expand_dims(groundtruth_boxes, 0)
  599. groundtruth_classes = tf.expand_dims(groundtruth_classes, 0)
  600. groundtruth_instance_masks = tf.expand_dims(groundtruth_instance_masks, 0)
  601. groundtruth_is_crowd = tf.expand_dims(groundtruth_is_crowd, 0)
  602. detection_scores = tf.expand_dims(detection_scores, 0)
  603. detection_classes = tf.expand_dims(detection_classes, 0)
  604. detection_masks = tf.expand_dims(detection_masks, 0)
  605. if num_gt_boxes_per_image is None:
  606. num_gt_boxes_per_image = tf.shape(groundtruth_boxes)[1:2]
  607. else:
  608. num_gt_boxes_per_image = tf.expand_dims(num_gt_boxes_per_image, 0)
  609. if num_det_boxes_per_image is None:
  610. num_det_boxes_per_image = tf.shape(detection_scores)[1:2]
  611. else:
  612. num_det_boxes_per_image = tf.expand_dims(num_det_boxes_per_image, 0)
  613. else:
  614. if num_gt_boxes_per_image is None:
  615. num_gt_boxes_per_image = tf.tile(
  616. tf.shape(groundtruth_boxes)[1:2],
  617. multiples=tf.shape(groundtruth_boxes)[0:1])
  618. if num_det_boxes_per_image is None:
  619. num_det_boxes_per_image = tf.tile(
  620. tf.shape(detection_scores)[1:2],
  621. multiples=tf.shape(detection_scores)[0:1])
  622. update_op = tf.py_func(update_op, [
  623. image_id, groundtruth_boxes, groundtruth_classes,
  624. groundtruth_instance_masks, groundtruth_is_crowd,
  625. num_gt_boxes_per_image, detection_scores, detection_classes,
  626. detection_masks, num_det_boxes_per_image
  627. ], [])
  628. metric_names = ['DetectionMasks_Precision/mAP',
  629. 'DetectionMasks_Precision/mAP@.50IOU',
  630. 'DetectionMasks_Precision/mAP@.75IOU',
  631. 'DetectionMasks_Precision/mAP (large)',
  632. 'DetectionMasks_Precision/mAP (medium)',
  633. 'DetectionMasks_Precision/mAP (small)',
  634. 'DetectionMasks_Recall/AR@1',
  635. 'DetectionMasks_Recall/AR@10',
  636. 'DetectionMasks_Recall/AR@100',
  637. 'DetectionMasks_Recall/AR@100 (large)',
  638. 'DetectionMasks_Recall/AR@100 (medium)',
  639. 'DetectionMasks_Recall/AR@100 (small)']
  640. if self._include_metrics_per_category:
  641. for category_dict in self._categories:
  642. metric_names.append('DetectionMasks_PerformanceByCategory/mAP/' +
  643. category_dict['name'])
  644. def first_value_func():
  645. self._metrics = self.evaluate()
  646. self.clear()
  647. return np.float32(self._metrics[metric_names[0]])
  648. def value_func_factory(metric_name):
  649. def value_func():
  650. return np.float32(self._metrics[metric_name])
  651. return value_func
  652. # Ensure that the metrics are only evaluated once.
  653. first_value_op = tf.py_func(first_value_func, [], tf.float32)
  654. eval_metric_ops = {metric_names[0]: (first_value_op, update_op)}
  655. with tf.control_dependencies([first_value_op]):
  656. for metric_name in metric_names[1:]:
  657. eval_metric_ops[metric_name] = (tf.py_func(
  658. value_func_factory(metric_name), [], np.float32), update_op)
  659. return eval_metric_ops