You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1060 lines
46 KiB

6 years ago
  1. # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. """object_detection_evaluation module.
  16. ObjectDetectionEvaluation is a class which manages ground truth information of a
  17. object detection dataset, and computes frequently used detection metrics such as
  18. Precision, Recall, CorLoc of the provided detection results.
  19. It supports the following operations:
  20. 1) Add ground truth information of images sequentially.
  21. 2) Add detection result of images sequentially.
  22. 3) Evaluate detection metrics on already inserted detection results.
  23. 4) Write evaluation result into a pickle file for future processing or
  24. visualization.
  25. Note: This module operates on numpy boxes and box lists.
  26. """
  27. from abc import ABCMeta
  28. from abc import abstractmethod
  29. import collections
  30. import logging
  31. import unicodedata
  32. import numpy as np
  33. import tensorflow as tf
  34. from object_detection.core import standard_fields
  35. from object_detection.utils import label_map_util
  36. from object_detection.utils import metrics
  37. from object_detection.utils import per_image_evaluation
  38. class DetectionEvaluator(object):
  39. """Interface for object detection evalution classes.
  40. Example usage of the Evaluator:
  41. ------------------------------
  42. evaluator = DetectionEvaluator(categories)
  43. # Detections and groundtruth for image 1.
  44. evaluator.add_single_groundtruth_image_info(...)
  45. evaluator.add_single_detected_image_info(...)
  46. # Detections and groundtruth for image 2.
  47. evaluator.add_single_groundtruth_image_info(...)
  48. evaluator.add_single_detected_image_info(...)
  49. metrics_dict = evaluator.evaluate()
  50. """
  51. __metaclass__ = ABCMeta
  52. def __init__(self, categories):
  53. """Constructor.
  54. Args:
  55. categories: A list of dicts, each of which has the following keys -
  56. 'id': (required) an integer id uniquely identifying this category.
  57. 'name': (required) string representing category name e.g., 'cat', 'dog'.
  58. """
  59. self._categories = categories
  60. @abstractmethod
  61. def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
  62. """Adds groundtruth for a single image to be used for evaluation.
  63. Args:
  64. image_id: A unique string/integer identifier for the image.
  65. groundtruth_dict: A dictionary of groundtruth numpy arrays required
  66. for evaluations.
  67. """
  68. pass
  69. @abstractmethod
  70. def add_single_detected_image_info(self, image_id, detections_dict):
  71. """Adds detections for a single image to be used for evaluation.
  72. Args:
  73. image_id: A unique string/integer identifier for the image.
  74. detections_dict: A dictionary of detection numpy arrays required
  75. for evaluation.
  76. """
  77. pass
  78. def get_estimator_eval_metric_ops(self, eval_dict):
  79. """Returns dict of metrics to use with `tf.estimator.EstimatorSpec`.
  80. Note that this must only be implemented if performing evaluation with a
  81. `tf.estimator.Estimator`.
  82. Args:
  83. eval_dict: A dictionary that holds tensors for evaluating an object
  84. detection model, returned from
  85. eval_util.result_dict_for_single_example().
  86. Returns:
  87. A dictionary of metric names to tuple of value_op and update_op that can
  88. be used as eval metric ops in `tf.estimator.EstimatorSpec`.
  89. """
  90. pass
  91. @abstractmethod
  92. def evaluate(self):
  93. """Evaluates detections and returns a dictionary of metrics."""
  94. pass
  95. @abstractmethod
  96. def clear(self):
  97. """Clears the state to prepare for a fresh evaluation."""
  98. pass
  99. class ObjectDetectionEvaluator(DetectionEvaluator):
  100. """A class to evaluate detections."""
  101. def __init__(self,
  102. categories,
  103. matching_iou_threshold=0.5,
  104. evaluate_corlocs=False,
  105. evaluate_precision_recall=False,
  106. metric_prefix=None,
  107. use_weighted_mean_ap=False,
  108. evaluate_masks=False,
  109. group_of_weight=0.0):
  110. """Constructor.
  111. Args:
  112. categories: A list of dicts, each of which has the following keys -
  113. 'id': (required) an integer id uniquely identifying this category.
  114. 'name': (required) string representing category name e.g., 'cat', 'dog'.
  115. matching_iou_threshold: IOU threshold to use for matching groundtruth
  116. boxes to detection boxes.
  117. evaluate_corlocs: (optional) boolean which determines if corloc scores
  118. are to be returned or not.
  119. evaluate_precision_recall: (optional) boolean which determines if
  120. precision and recall values are to be returned or not.
  121. metric_prefix: (optional) string prefix for metric name; if None, no
  122. prefix is used.
  123. use_weighted_mean_ap: (optional) boolean which determines if the mean
  124. average precision is computed directly from the scores and tp_fp_labels
  125. of all classes.
  126. evaluate_masks: If False, evaluation will be performed based on boxes.
  127. If True, mask evaluation will be performed instead.
  128. group_of_weight: Weight of group-of boxes.If set to 0, detections of the
  129. correct class within a group-of box are ignored. If weight is > 0, then
  130. if at least one detection falls within a group-of box with
  131. matching_iou_threshold, weight group_of_weight is added to true
  132. positives. Consequently, if no detection falls within a group-of box,
  133. weight group_of_weight is added to false negatives.
  134. Raises:
  135. ValueError: If the category ids are not 1-indexed.
  136. """
  137. super(ObjectDetectionEvaluator, self).__init__(categories)
  138. self._num_classes = max([cat['id'] for cat in categories])
  139. if min(cat['id'] for cat in categories) < 1:
  140. raise ValueError('Classes should be 1-indexed.')
  141. self._matching_iou_threshold = matching_iou_threshold
  142. self._use_weighted_mean_ap = use_weighted_mean_ap
  143. self._label_id_offset = 1
  144. self._evaluate_masks = evaluate_masks
  145. self._group_of_weight = group_of_weight
  146. self._evaluation = ObjectDetectionEvaluation(
  147. num_groundtruth_classes=self._num_classes,
  148. matching_iou_threshold=self._matching_iou_threshold,
  149. use_weighted_mean_ap=self._use_weighted_mean_ap,
  150. label_id_offset=self._label_id_offset,
  151. group_of_weight=self._group_of_weight)
  152. self._image_ids = set([])
  153. self._evaluate_corlocs = evaluate_corlocs
  154. self._evaluate_precision_recall = evaluate_precision_recall
  155. self._metric_prefix = (metric_prefix + '_') if metric_prefix else ''
  156. self._expected_keys = set([
  157. standard_fields.InputDataFields.key,
  158. standard_fields.InputDataFields.groundtruth_boxes,
  159. standard_fields.InputDataFields.groundtruth_classes,
  160. standard_fields.InputDataFields.groundtruth_difficult,
  161. standard_fields.InputDataFields.groundtruth_instance_masks,
  162. standard_fields.DetectionResultFields.detection_boxes,
  163. standard_fields.DetectionResultFields.detection_scores,
  164. standard_fields.DetectionResultFields.detection_classes,
  165. standard_fields.DetectionResultFields.detection_masks
  166. ])
  167. self._build_metric_names()
  168. def _build_metric_names(self):
  169. """Builds a list with metric names."""
  170. self._metric_names = [
  171. self._metric_prefix + 'Precision/mAP@{}IOU'.format(
  172. self._matching_iou_threshold)
  173. ]
  174. if self._evaluate_corlocs:
  175. self._metric_names.append(
  176. self._metric_prefix +
  177. 'Precision/meanCorLoc@{}IOU'.format(self._matching_iou_threshold))
  178. category_index = label_map_util.create_category_index(self._categories)
  179. for idx in range(self._num_classes):
  180. if idx + self._label_id_offset in category_index:
  181. category_name = category_index[idx + self._label_id_offset]['name']
  182. try:
  183. category_name = unicode(category_name, 'utf-8')
  184. except TypeError:
  185. pass
  186. category_name = unicodedata.normalize('NFKD', category_name).encode(
  187. 'ascii', 'ignore')
  188. self._metric_names.append(
  189. self._metric_prefix + 'PerformanceByCategory/AP@{}IOU/{}'.format(
  190. self._matching_iou_threshold, category_name))
  191. if self._evaluate_corlocs:
  192. self._metric_names.append(
  193. self._metric_prefix + 'PerformanceByCategory/CorLoc@{}IOU/{}'
  194. .format(self._matching_iou_threshold, category_name))
  195. def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
  196. """Adds groundtruth for a single image to be used for evaluation.
  197. Args:
  198. image_id: A unique string/integer identifier for the image.
  199. groundtruth_dict: A dictionary containing -
  200. standard_fields.InputDataFields.groundtruth_boxes: float32 numpy array
  201. of shape [num_boxes, 4] containing `num_boxes` groundtruth boxes of
  202. the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
  203. standard_fields.InputDataFields.groundtruth_classes: integer numpy array
  204. of shape [num_boxes] containing 1-indexed groundtruth classes for the
  205. boxes.
  206. standard_fields.InputDataFields.groundtruth_difficult: Optional length
  207. M numpy boolean array denoting whether a ground truth box is a
  208. difficult instance or not. This field is optional to support the case
  209. that no boxes are difficult.
  210. standard_fields.InputDataFields.groundtruth_instance_masks: Optional
  211. numpy array of shape [num_boxes, height, width] with values in {0, 1}.
  212. Raises:
  213. ValueError: On adding groundtruth for an image more than once. Will also
  214. raise error if instance masks are not in groundtruth dictionary.
  215. """
  216. if image_id in self._image_ids:
  217. raise ValueError('Image with id {} already added.'.format(image_id))
  218. groundtruth_classes = (
  219. groundtruth_dict[standard_fields.InputDataFields.groundtruth_classes] -
  220. self._label_id_offset)
  221. # If the key is not present in the groundtruth_dict or the array is empty
  222. # (unless there are no annotations for the groundtruth on this image)
  223. # use values from the dictionary or insert None otherwise.
  224. if (standard_fields.InputDataFields.groundtruth_difficult in
  225. groundtruth_dict.keys() and
  226. (groundtruth_dict[standard_fields.InputDataFields.groundtruth_difficult]
  227. .size or not groundtruth_classes.size)):
  228. groundtruth_difficult = groundtruth_dict[
  229. standard_fields.InputDataFields.groundtruth_difficult]
  230. else:
  231. groundtruth_difficult = None
  232. if not len(self._image_ids) % 1000:
  233. logging.warn(
  234. 'image %s does not have groundtruth difficult flag specified',
  235. image_id)
  236. groundtruth_masks = None
  237. if self._evaluate_masks:
  238. if (standard_fields.InputDataFields.groundtruth_instance_masks not in
  239. groundtruth_dict):
  240. raise ValueError('Instance masks not in groundtruth dictionary.')
  241. groundtruth_masks = groundtruth_dict[
  242. standard_fields.InputDataFields.groundtruth_instance_masks]
  243. self._evaluation.add_single_ground_truth_image_info(
  244. image_key=image_id,
  245. groundtruth_boxes=groundtruth_dict[
  246. standard_fields.InputDataFields.groundtruth_boxes],
  247. groundtruth_class_labels=groundtruth_classes,
  248. groundtruth_is_difficult_list=groundtruth_difficult,
  249. groundtruth_masks=groundtruth_masks)
  250. self._image_ids.update([image_id])
  251. def add_single_detected_image_info(self, image_id, detections_dict):
  252. """Adds detections for a single image to be used for evaluation.
  253. Args:
  254. image_id: A unique string/integer identifier for the image.
  255. detections_dict: A dictionary containing -
  256. standard_fields.DetectionResultFields.detection_boxes: float32 numpy
  257. array of shape [num_boxes, 4] containing `num_boxes` detection boxes
  258. of the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
  259. standard_fields.DetectionResultFields.detection_scores: float32 numpy
  260. array of shape [num_boxes] containing detection scores for the boxes.
  261. standard_fields.DetectionResultFields.detection_classes: integer numpy
  262. array of shape [num_boxes] containing 1-indexed detection classes for
  263. the boxes.
  264. standard_fields.DetectionResultFields.detection_masks: uint8 numpy
  265. array of shape [num_boxes, height, width] containing `num_boxes` masks
  266. of values ranging between 0 and 1.
  267. Raises:
  268. ValueError: If detection masks are not in detections dictionary.
  269. """
  270. detection_classes = (
  271. detections_dict[standard_fields.DetectionResultFields.detection_classes]
  272. - self._label_id_offset)
  273. detection_masks = None
  274. if self._evaluate_masks:
  275. if (standard_fields.DetectionResultFields.detection_masks not in
  276. detections_dict):
  277. raise ValueError('Detection masks not in detections dictionary.')
  278. detection_masks = detections_dict[
  279. standard_fields.DetectionResultFields.detection_masks]
  280. self._evaluation.add_single_detected_image_info(
  281. image_key=image_id,
  282. detected_boxes=detections_dict[
  283. standard_fields.DetectionResultFields.detection_boxes],
  284. detected_scores=detections_dict[
  285. standard_fields.DetectionResultFields.detection_scores],
  286. detected_class_labels=detection_classes,
  287. detected_masks=detection_masks)
  288. def evaluate(self):
  289. """Compute evaluation result.
  290. Returns:
  291. A dictionary of metrics with the following fields -
  292. 1. summary_metrics:
  293. '<prefix if not empty>_Precision/mAP@<matching_iou_threshold>IOU': mean
  294. average precision at the specified IOU threshold.
  295. 2. per_category_ap: category specific results with keys of the form
  296. '<prefix if not empty>_PerformanceByCategory/
  297. mAP@<matching_iou_threshold>IOU/category'.
  298. """
  299. (per_class_ap, mean_ap, per_class_precision, per_class_recall,
  300. per_class_corloc, mean_corloc) = (
  301. self._evaluation.evaluate())
  302. pascal_metrics = {self._metric_names[0]: mean_ap}
  303. if self._evaluate_corlocs:
  304. pascal_metrics[self._metric_names[1]] = mean_corloc
  305. category_index = label_map_util.create_category_index(self._categories)
  306. for idx in range(per_class_ap.size):
  307. if idx + self._label_id_offset in category_index:
  308. category_name = category_index[idx + self._label_id_offset]['name']
  309. try:
  310. category_name = unicode(category_name, 'utf-8')
  311. except TypeError:
  312. pass
  313. category_name = unicodedata.normalize(
  314. 'NFKD', category_name).encode('ascii', 'ignore')
  315. display_name = (
  316. self._metric_prefix + 'PerformanceByCategory/AP@{}IOU/{}'.format(
  317. self._matching_iou_threshold, category_name))
  318. pascal_metrics[display_name] = per_class_ap[idx]
  319. # Optionally add precision and recall values
  320. if self._evaluate_precision_recall:
  321. display_name = (
  322. self._metric_prefix +
  323. 'PerformanceByCategory/Precision@{}IOU/{}'.format(
  324. self._matching_iou_threshold, category_name))
  325. pascal_metrics[display_name] = per_class_precision[idx]
  326. display_name = (
  327. self._metric_prefix +
  328. 'PerformanceByCategory/Recall@{}IOU/{}'.format(
  329. self._matching_iou_threshold, category_name))
  330. pascal_metrics[display_name] = per_class_recall[idx]
  331. # Optionally add CorLoc metrics.classes
  332. if self._evaluate_corlocs:
  333. display_name = (
  334. self._metric_prefix + 'PerformanceByCategory/CorLoc@{}IOU/{}'
  335. .format(self._matching_iou_threshold, category_name))
  336. pascal_metrics[display_name] = per_class_corloc[idx]
  337. return pascal_metrics
  338. def clear(self):
  339. """Clears the state to prepare for a fresh evaluation."""
  340. self._evaluation = ObjectDetectionEvaluation(
  341. num_groundtruth_classes=self._num_classes,
  342. matching_iou_threshold=self._matching_iou_threshold,
  343. use_weighted_mean_ap=self._use_weighted_mean_ap,
  344. label_id_offset=self._label_id_offset)
  345. self._image_ids.clear()
  346. def get_estimator_eval_metric_ops(self, eval_dict):
  347. """Returns dict of metrics to use with `tf.estimator.EstimatorSpec`.
  348. Note that this must only be implemented if performing evaluation with a
  349. `tf.estimator.Estimator`.
  350. Args:
  351. eval_dict: A dictionary that holds tensors for evaluating an object
  352. detection model, returned from
  353. eval_util.result_dict_for_single_example(). It must contain
  354. standard_fields.InputDataFields.key.
  355. Returns:
  356. A dictionary of metric names to tuple of value_op and update_op that can
  357. be used as eval metric ops in `tf.estimator.EstimatorSpec`.
  358. """
  359. # remove unexpected fields
  360. eval_dict_filtered = dict()
  361. for key, value in eval_dict.items():
  362. if key in self._expected_keys:
  363. eval_dict_filtered[key] = value
  364. eval_dict_keys = eval_dict_filtered.keys()
  365. def update_op(image_id, *eval_dict_batched_as_list):
  366. """Update operation that adds batch of images to ObjectDetectionEvaluator.
  367. Args:
  368. image_id: image id (single id or an array)
  369. *eval_dict_batched_as_list: the values of the dictionary of tensors.
  370. """
  371. if np.isscalar(image_id):
  372. single_example_dict = dict(
  373. zip(eval_dict_keys, eval_dict_batched_as_list))
  374. self.add_single_ground_truth_image_info(image_id, single_example_dict)
  375. self.add_single_detected_image_info(image_id, single_example_dict)
  376. else:
  377. for unzipped_tuple in zip(*eval_dict_batched_as_list):
  378. single_example_dict = dict(zip(eval_dict_keys, unzipped_tuple))
  379. image_id = single_example_dict[standard_fields.InputDataFields.key]
  380. self.add_single_ground_truth_image_info(image_id, single_example_dict)
  381. self.add_single_detected_image_info(image_id, single_example_dict)
  382. args = [eval_dict_filtered[standard_fields.InputDataFields.key]]
  383. args.extend(eval_dict_filtered.values())
  384. update_op = tf.py_func(update_op, args, [])
  385. def first_value_func():
  386. self._metrics = self.evaluate()
  387. self.clear()
  388. return np.float32(self._metrics[self._metric_names[0]])
  389. def value_func_factory(metric_name):
  390. def value_func():
  391. return np.float32(self._metrics[metric_name])
  392. return value_func
  393. # Ensure that the metrics are only evaluated once.
  394. first_value_op = tf.py_func(first_value_func, [], tf.float32)
  395. eval_metric_ops = {self._metric_names[0]: (first_value_op, update_op)}
  396. with tf.control_dependencies([first_value_op]):
  397. for metric_name in self._metric_names[1:]:
  398. eval_metric_ops[metric_name] = (tf.py_func(
  399. value_func_factory(metric_name), [], np.float32), update_op)
  400. return eval_metric_ops
  401. class PascalDetectionEvaluator(ObjectDetectionEvaluator):
  402. """A class to evaluate detections using PASCAL metrics."""
  403. def __init__(self, categories, matching_iou_threshold=0.5):
  404. super(PascalDetectionEvaluator, self).__init__(
  405. categories,
  406. matching_iou_threshold=matching_iou_threshold,
  407. evaluate_corlocs=False,
  408. metric_prefix='PascalBoxes',
  409. use_weighted_mean_ap=False)
  410. class WeightedPascalDetectionEvaluator(ObjectDetectionEvaluator):
  411. """A class to evaluate detections using weighted PASCAL metrics.
  412. Weighted PASCAL metrics computes the mean average precision as the average
  413. precision given the scores and tp_fp_labels of all classes. In comparison,
  414. PASCAL metrics computes the mean average precision as the mean of the
  415. per-class average precisions.
  416. This definition is very similar to the mean of the per-class average
  417. precisions weighted by class frequency. However, they are typically not the
  418. same as the average precision is not a linear function of the scores and
  419. tp_fp_labels.
  420. """
  421. def __init__(self, categories, matching_iou_threshold=0.5):
  422. super(WeightedPascalDetectionEvaluator, self).__init__(
  423. categories,
  424. matching_iou_threshold=matching_iou_threshold,
  425. evaluate_corlocs=False,
  426. metric_prefix='WeightedPascalBoxes',
  427. use_weighted_mean_ap=True)
  428. class PascalInstanceSegmentationEvaluator(ObjectDetectionEvaluator):
  429. """A class to evaluate instance masks using PASCAL metrics."""
  430. def __init__(self, categories, matching_iou_threshold=0.5):
  431. super(PascalInstanceSegmentationEvaluator, self).__init__(
  432. categories,
  433. matching_iou_threshold=matching_iou_threshold,
  434. evaluate_corlocs=False,
  435. metric_prefix='PascalMasks',
  436. use_weighted_mean_ap=False,
  437. evaluate_masks=True)
  438. class WeightedPascalInstanceSegmentationEvaluator(ObjectDetectionEvaluator):
  439. """A class to evaluate instance masks using weighted PASCAL metrics.
  440. Weighted PASCAL metrics computes the mean average precision as the average
  441. precision given the scores and tp_fp_labels of all classes. In comparison,
  442. PASCAL metrics computes the mean average precision as the mean of the
  443. per-class average precisions.
  444. This definition is very similar to the mean of the per-class average
  445. precisions weighted by class frequency. However, they are typically not the
  446. same as the average precision is not a linear function of the scores and
  447. tp_fp_labels.
  448. """
  449. def __init__(self, categories, matching_iou_threshold=0.5):
  450. super(WeightedPascalInstanceSegmentationEvaluator, self).__init__(
  451. categories,
  452. matching_iou_threshold=matching_iou_threshold,
  453. evaluate_corlocs=False,
  454. metric_prefix='WeightedPascalMasks',
  455. use_weighted_mean_ap=True,
  456. evaluate_masks=True)
  457. class OpenImagesDetectionEvaluator(ObjectDetectionEvaluator):
  458. """A class to evaluate detections using Open Images V2 metrics.
  459. Open Images V2 introduce group_of type of bounding boxes and this metric
  460. handles those boxes appropriately.
  461. """
  462. def __init__(self,
  463. categories,
  464. matching_iou_threshold=0.5,
  465. evaluate_corlocs=False,
  466. metric_prefix='OpenImagesV2',
  467. group_of_weight=0.0):
  468. """Constructor.
  469. Args:
  470. categories: A list of dicts, each of which has the following keys -
  471. 'id': (required) an integer id uniquely identifying this category.
  472. 'name': (required) string representing category name e.g., 'cat', 'dog'.
  473. matching_iou_threshold: IOU threshold to use for matching groundtruth
  474. boxes to detection boxes.
  475. evaluate_corlocs: if True, additionally evaluates and returns CorLoc.
  476. metric_prefix: Prefix name of the metric.
  477. group_of_weight: Weight of the group-of bounding box. If set to 0 (default
  478. for Open Images V2 detection protocol), detections of the correct class
  479. within a group-of box are ignored. If weight is > 0, then if at least
  480. one detection falls within a group-of box with matching_iou_threshold,
  481. weight group_of_weight is added to true positives. Consequently, if no
  482. detection falls within a group-of box, weight group_of_weight is added
  483. to false negatives.
  484. """
  485. super(OpenImagesDetectionEvaluator, self).__init__(
  486. categories,
  487. matching_iou_threshold,
  488. evaluate_corlocs,
  489. metric_prefix=metric_prefix,
  490. group_of_weight=group_of_weight)
  491. self._expected_keys = set([
  492. standard_fields.InputDataFields.key,
  493. standard_fields.InputDataFields.groundtruth_boxes,
  494. standard_fields.InputDataFields.groundtruth_classes,
  495. standard_fields.InputDataFields.groundtruth_group_of,
  496. standard_fields.DetectionResultFields.detection_boxes,
  497. standard_fields.DetectionResultFields.detection_scores,
  498. standard_fields.DetectionResultFields.detection_classes,
  499. ])
  500. def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
  501. """Adds groundtruth for a single image to be used for evaluation.
  502. Args:
  503. image_id: A unique string/integer identifier for the image.
  504. groundtruth_dict: A dictionary containing -
  505. standard_fields.InputDataFields.groundtruth_boxes: float32 numpy array
  506. of shape [num_boxes, 4] containing `num_boxes` groundtruth boxes of
  507. the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
  508. standard_fields.InputDataFields.groundtruth_classes: integer numpy array
  509. of shape [num_boxes] containing 1-indexed groundtruth classes for the
  510. boxes.
  511. standard_fields.InputDataFields.groundtruth_group_of: Optional length
  512. M numpy boolean array denoting whether a groundtruth box contains a
  513. group of instances.
  514. Raises:
  515. ValueError: On adding groundtruth for an image more than once.
  516. """
  517. if image_id in self._image_ids:
  518. raise ValueError('Image with id {} already added.'.format(image_id))
  519. groundtruth_classes = (
  520. groundtruth_dict[standard_fields.InputDataFields.groundtruth_classes] -
  521. self._label_id_offset)
  522. # If the key is not present in the groundtruth_dict or the array is empty
  523. # (unless there are no annotations for the groundtruth on this image)
  524. # use values from the dictionary or insert None otherwise.
  525. if (standard_fields.InputDataFields.groundtruth_group_of in
  526. groundtruth_dict.keys() and
  527. (groundtruth_dict[standard_fields.InputDataFields.groundtruth_group_of]
  528. .size or not groundtruth_classes.size)):
  529. groundtruth_group_of = groundtruth_dict[
  530. standard_fields.InputDataFields.groundtruth_group_of]
  531. else:
  532. groundtruth_group_of = None
  533. if not len(self._image_ids) % 1000:
  534. logging.warn(
  535. 'image %s does not have groundtruth group_of flag specified',
  536. image_id)
  537. self._evaluation.add_single_ground_truth_image_info(
  538. image_id,
  539. groundtruth_dict[standard_fields.InputDataFields.groundtruth_boxes],
  540. groundtruth_classes,
  541. groundtruth_is_difficult_list=None,
  542. groundtruth_is_group_of_list=groundtruth_group_of)
  543. self._image_ids.update([image_id])
  544. class OpenImagesDetectionChallengeEvaluator(OpenImagesDetectionEvaluator):
  545. """A class implements Open Images Challenge Detection metrics.
  546. Open Images Challenge Detection metric has two major changes in comparison
  547. with Open Images V2 detection metric:
  548. - a custom weight might be specified for detecting an object contained in
  549. a group-of box.
  550. - verified image-level labels should be explicitelly provided for
  551. evaluation: in case in image has neither positive nor negative image level
  552. label of class c, all detections of this class on this image will be
  553. ignored.
  554. """
  555. def __init__(self,
  556. categories,
  557. matching_iou_threshold=0.5,
  558. evaluate_corlocs=False,
  559. group_of_weight=1.0):
  560. """Constructor.
  561. Args:
  562. categories: A list of dicts, each of which has the following keys -
  563. 'id': (required) an integer id uniquely identifying this category.
  564. 'name': (required) string representing category name e.g., 'cat', 'dog'.
  565. matching_iou_threshold: IOU threshold to use for matching groundtruth
  566. boxes to detection boxes.
  567. evaluate_corlocs: if True, additionally evaluates and returns CorLoc.
  568. group_of_weight: weight of a group-of box. If set to 0, detections of the
  569. correct class within a group-of box are ignored. If weight is > 0
  570. (default for Open Images Detection Challenge 2018), then if at least one
  571. detection falls within a group-of box with matching_iou_threshold,
  572. weight group_of_weight is added to true positives. Consequently, if no
  573. detection falls within a group-of box, weight group_of_weight is added
  574. to false negatives.
  575. """
  576. super(OpenImagesDetectionChallengeEvaluator, self).__init__(
  577. categories,
  578. matching_iou_threshold,
  579. evaluate_corlocs,
  580. metric_prefix='OpenImagesChallenge2018',
  581. group_of_weight=group_of_weight)
  582. self._evaluatable_labels = {}
  583. self._expected_keys = set([
  584. standard_fields.InputDataFields.key,
  585. standard_fields.InputDataFields.groundtruth_boxes,
  586. standard_fields.InputDataFields.groundtruth_classes,
  587. standard_fields.InputDataFields.groundtruth_group_of,
  588. standard_fields.InputDataFields.groundtruth_image_classes,
  589. standard_fields.DetectionResultFields.detection_boxes,
  590. standard_fields.DetectionResultFields.detection_scores,
  591. standard_fields.DetectionResultFields.detection_classes,
  592. ])
  593. def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
  594. """Adds groundtruth for a single image to be used for evaluation.
  595. Args:
  596. image_id: A unique string/integer identifier for the image.
  597. groundtruth_dict: A dictionary containing -
  598. standard_fields.InputDataFields.groundtruth_boxes: float32 numpy array
  599. of shape [num_boxes, 4] containing `num_boxes` groundtruth boxes of
  600. the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
  601. standard_fields.InputDataFields.groundtruth_classes: integer numpy array
  602. of shape [num_boxes] containing 1-indexed groundtruth classes for the
  603. boxes.
  604. standard_fields.InputDataFields.groundtruth_image_classes: integer 1D
  605. numpy array containing all classes for which labels are verified.
  606. standard_fields.InputDataFields.groundtruth_group_of: Optional length
  607. M numpy boolean array denoting whether a groundtruth box contains a
  608. group of instances.
  609. Raises:
  610. ValueError: On adding groundtruth for an image more than once.
  611. """
  612. super(OpenImagesDetectionChallengeEvaluator,
  613. self).add_single_ground_truth_image_info(image_id, groundtruth_dict)
  614. groundtruth_classes = (
  615. groundtruth_dict[standard_fields.InputDataFields.groundtruth_classes] -
  616. self._label_id_offset)
  617. self._evaluatable_labels[image_id] = np.unique(
  618. np.concatenate(((groundtruth_dict.get(
  619. standard_fields.InputDataFields.groundtruth_image_classes,
  620. np.array([], dtype=int)) - self._label_id_offset),
  621. groundtruth_classes)))
  622. def add_single_detected_image_info(self, image_id, detections_dict):
  623. """Adds detections for a single image to be used for evaluation.
  624. Args:
  625. image_id: A unique string/integer identifier for the image.
  626. detections_dict: A dictionary containing -
  627. standard_fields.DetectionResultFields.detection_boxes: float32 numpy
  628. array of shape [num_boxes, 4] containing `num_boxes` detection boxes
  629. of the format [ymin, xmin, ymax, xmax] in absolute image coordinates.
  630. standard_fields.DetectionResultFields.detection_scores: float32 numpy
  631. array of shape [num_boxes] containing detection scores for the boxes.
  632. standard_fields.DetectionResultFields.detection_classes: integer numpy
  633. array of shape [num_boxes] containing 1-indexed detection classes for
  634. the boxes.
  635. Raises:
  636. ValueError: If detection masks are not in detections dictionary.
  637. """
  638. if image_id not in self._image_ids:
  639. # Since for the correct work of evaluator it is assumed that groundtruth
  640. # is inserted first we make sure to break the code if is it not the case.
  641. self._image_ids.update([image_id])
  642. self._evaluatable_labels[image_id] = np.array([])
  643. detection_classes = (
  644. detections_dict[standard_fields.DetectionResultFields.detection_classes]
  645. - self._label_id_offset)
  646. allowed_classes = np.where(
  647. np.isin(detection_classes, self._evaluatable_labels[image_id]))
  648. detection_classes = detection_classes[allowed_classes]
  649. detected_boxes = detections_dict[
  650. standard_fields.DetectionResultFields.detection_boxes][allowed_classes]
  651. detected_scores = detections_dict[
  652. standard_fields.DetectionResultFields.detection_scores][allowed_classes]
  653. self._evaluation.add_single_detected_image_info(
  654. image_key=image_id,
  655. detected_boxes=detected_boxes,
  656. detected_scores=detected_scores,
  657. detected_class_labels=detection_classes)
  658. def clear(self):
  659. """Clears stored data."""
  660. super(OpenImagesDetectionChallengeEvaluator, self).clear()
  661. self._evaluatable_labels.clear()
  662. ObjectDetectionEvalMetrics = collections.namedtuple(
  663. 'ObjectDetectionEvalMetrics', [
  664. 'average_precisions', 'mean_ap', 'precisions', 'recalls', 'corlocs',
  665. 'mean_corloc'
  666. ])
  667. class ObjectDetectionEvaluation(object):
  668. """Internal implementation of Pascal object detection metrics."""
  669. def __init__(self,
  670. num_groundtruth_classes,
  671. matching_iou_threshold=0.5,
  672. nms_iou_threshold=1.0,
  673. nms_max_output_boxes=10000,
  674. use_weighted_mean_ap=False,
  675. label_id_offset=0,
  676. group_of_weight=0.0,
  677. per_image_eval_class=per_image_evaluation.PerImageEvaluation):
  678. """Constructor.
  679. Args:
  680. num_groundtruth_classes: Number of ground-truth classes.
  681. matching_iou_threshold: IOU threshold used for matching detected boxes
  682. to ground-truth boxes.
  683. nms_iou_threshold: IOU threshold used for non-maximum suppression.
  684. nms_max_output_boxes: Maximum number of boxes returned by non-maximum
  685. suppression.
  686. use_weighted_mean_ap: (optional) boolean which determines if the mean
  687. average precision is computed directly from the scores and tp_fp_labels
  688. of all classes.
  689. label_id_offset: The label id offset.
  690. group_of_weight: Weight of group-of boxes.If set to 0, detections of the
  691. correct class within a group-of box are ignored. If weight is > 0, then
  692. if at least one detection falls within a group-of box with
  693. matching_iou_threshold, weight group_of_weight is added to true
  694. positives. Consequently, if no detection falls within a group-of box,
  695. weight group_of_weight is added to false negatives.
  696. per_image_eval_class: The class that contains functions for computing
  697. per image metrics.
  698. Raises:
  699. ValueError: if num_groundtruth_classes is smaller than 1.
  700. """
  701. if num_groundtruth_classes < 1:
  702. raise ValueError('Need at least 1 groundtruth class for evaluation.')
  703. self.per_image_eval = per_image_eval_class(
  704. num_groundtruth_classes=num_groundtruth_classes,
  705. matching_iou_threshold=matching_iou_threshold,
  706. nms_iou_threshold=nms_iou_threshold,
  707. nms_max_output_boxes=nms_max_output_boxes,
  708. group_of_weight=group_of_weight)
  709. self.group_of_weight = group_of_weight
  710. self.num_class = num_groundtruth_classes
  711. self.use_weighted_mean_ap = use_weighted_mean_ap
  712. self.label_id_offset = label_id_offset
  713. self.groundtruth_boxes = {}
  714. self.groundtruth_class_labels = {}
  715. self.groundtruth_masks = {}
  716. self.groundtruth_is_difficult_list = {}
  717. self.groundtruth_is_group_of_list = {}
  718. self.num_gt_instances_per_class = np.zeros(self.num_class, dtype=float)
  719. self.num_gt_imgs_per_class = np.zeros(self.num_class, dtype=int)
  720. self._initialize_detections()
  721. def _initialize_detections(self):
  722. """Initializes internal data structures."""
  723. self.detection_keys = set()
  724. self.scores_per_class = [[] for _ in range(self.num_class)]
  725. self.tp_fp_labels_per_class = [[] for _ in range(self.num_class)]
  726. self.num_images_correctly_detected_per_class = np.zeros(self.num_class)
  727. self.average_precision_per_class = np.empty(self.num_class, dtype=float)
  728. self.average_precision_per_class.fill(np.nan)
  729. self.precisions_per_class = [np.nan] * self.num_class
  730. self.recalls_per_class = [np.nan] * self.num_class
  731. self.corloc_per_class = np.ones(self.num_class, dtype=float)
  732. def clear_detections(self):
  733. self._initialize_detections()
  734. def add_single_ground_truth_image_info(self,
  735. image_key,
  736. groundtruth_boxes,
  737. groundtruth_class_labels,
  738. groundtruth_is_difficult_list=None,
  739. groundtruth_is_group_of_list=None,
  740. groundtruth_masks=None):
  741. """Adds groundtruth for a single image to be used for evaluation.
  742. Args:
  743. image_key: A unique string/integer identifier for the image.
  744. groundtruth_boxes: float32 numpy array of shape [num_boxes, 4]
  745. containing `num_boxes` groundtruth boxes of the format
  746. [ymin, xmin, ymax, xmax] in absolute image coordinates.
  747. groundtruth_class_labels: integer numpy array of shape [num_boxes]
  748. containing 0-indexed groundtruth classes for the boxes.
  749. groundtruth_is_difficult_list: A length M numpy boolean array denoting
  750. whether a ground truth box is a difficult instance or not. To support
  751. the case that no boxes are difficult, it is by default set as None.
  752. groundtruth_is_group_of_list: A length M numpy boolean array denoting
  753. whether a ground truth box is a group-of box or not. To support
  754. the case that no boxes are groups-of, it is by default set as None.
  755. groundtruth_masks: uint8 numpy array of shape
  756. [num_boxes, height, width] containing `num_boxes` groundtruth masks.
  757. The mask values range from 0 to 1.
  758. """
  759. if image_key in self.groundtruth_boxes:
  760. logging.warn(
  761. 'image %s has already been added to the ground truth database.',
  762. image_key)
  763. return
  764. self.groundtruth_boxes[image_key] = groundtruth_boxes
  765. self.groundtruth_class_labels[image_key] = groundtruth_class_labels
  766. self.groundtruth_masks[image_key] = groundtruth_masks
  767. if groundtruth_is_difficult_list is None:
  768. num_boxes = groundtruth_boxes.shape[0]
  769. groundtruth_is_difficult_list = np.zeros(num_boxes, dtype=bool)
  770. self.groundtruth_is_difficult_list[
  771. image_key] = groundtruth_is_difficult_list.astype(dtype=bool)
  772. if groundtruth_is_group_of_list is None:
  773. num_boxes = groundtruth_boxes.shape[0]
  774. groundtruth_is_group_of_list = np.zeros(num_boxes, dtype=bool)
  775. self.groundtruth_is_group_of_list[
  776. image_key] = groundtruth_is_group_of_list.astype(dtype=bool)
  777. self._update_ground_truth_statistics(
  778. groundtruth_class_labels,
  779. groundtruth_is_difficult_list.astype(dtype=bool),
  780. groundtruth_is_group_of_list.astype(dtype=bool))
  781. def add_single_detected_image_info(self, image_key, detected_boxes,
  782. detected_scores, detected_class_labels,
  783. detected_masks=None):
  784. """Adds detections for a single image to be used for evaluation.
  785. Args:
  786. image_key: A unique string/integer identifier for the image.
  787. detected_boxes: float32 numpy array of shape [num_boxes, 4]
  788. containing `num_boxes` detection boxes of the format
  789. [ymin, xmin, ymax, xmax] in absolute image coordinates.
  790. detected_scores: float32 numpy array of shape [num_boxes] containing
  791. detection scores for the boxes.
  792. detected_class_labels: integer numpy array of shape [num_boxes] containing
  793. 0-indexed detection classes for the boxes.
  794. detected_masks: np.uint8 numpy array of shape [num_boxes, height, width]
  795. containing `num_boxes` detection masks with values ranging
  796. between 0 and 1.
  797. Raises:
  798. ValueError: if the number of boxes, scores and class labels differ in
  799. length.
  800. """
  801. if (len(detected_boxes) != len(detected_scores) or
  802. len(detected_boxes) != len(detected_class_labels)):
  803. raise ValueError('detected_boxes, detected_scores and '
  804. 'detected_class_labels should all have same lengths. Got'
  805. '[%d, %d, %d]' % len(detected_boxes),
  806. len(detected_scores), len(detected_class_labels))
  807. if image_key in self.detection_keys:
  808. logging.warn(
  809. 'image %s has already been added to the detection result database',
  810. image_key)
  811. return
  812. self.detection_keys.add(image_key)
  813. if image_key in self.groundtruth_boxes:
  814. groundtruth_boxes = self.groundtruth_boxes[image_key]
  815. groundtruth_class_labels = self.groundtruth_class_labels[image_key]
  816. # Masks are popped instead of look up. The reason is that we do not want
  817. # to keep all masks in memory which can cause memory overflow.
  818. groundtruth_masks = self.groundtruth_masks.pop(
  819. image_key)
  820. groundtruth_is_difficult_list = self.groundtruth_is_difficult_list[
  821. image_key]
  822. groundtruth_is_group_of_list = self.groundtruth_is_group_of_list[
  823. image_key]
  824. else:
  825. groundtruth_boxes = np.empty(shape=[0, 4], dtype=float)
  826. groundtruth_class_labels = np.array([], dtype=int)
  827. if detected_masks is None:
  828. groundtruth_masks = None
  829. else:
  830. groundtruth_masks = np.empty(shape=[0, 1, 1], dtype=float)
  831. groundtruth_is_difficult_list = np.array([], dtype=bool)
  832. groundtruth_is_group_of_list = np.array([], dtype=bool)
  833. scores, tp_fp_labels, is_class_correctly_detected_in_image = (
  834. self.per_image_eval.compute_object_detection_metrics(
  835. detected_boxes=detected_boxes,
  836. detected_scores=detected_scores,
  837. detected_class_labels=detected_class_labels,
  838. groundtruth_boxes=groundtruth_boxes,
  839. groundtruth_class_labels=groundtruth_class_labels,
  840. groundtruth_is_difficult_list=groundtruth_is_difficult_list,
  841. groundtruth_is_group_of_list=groundtruth_is_group_of_list,
  842. detected_masks=detected_masks,
  843. groundtruth_masks=groundtruth_masks))
  844. for i in range(self.num_class):
  845. if scores[i].shape[0] > 0:
  846. self.scores_per_class[i].append(scores[i])
  847. self.tp_fp_labels_per_class[i].append(tp_fp_labels[i])
  848. (self.num_images_correctly_detected_per_class
  849. ) += is_class_correctly_detected_in_image
  850. def _update_ground_truth_statistics(self, groundtruth_class_labels,
  851. groundtruth_is_difficult_list,
  852. groundtruth_is_group_of_list):
  853. """Update grouth truth statitistics.
  854. 1. Difficult boxes are ignored when counting the number of ground truth
  855. instances as done in Pascal VOC devkit.
  856. 2. Difficult boxes are treated as normal boxes when computing CorLoc related
  857. statitistics.
  858. Args:
  859. groundtruth_class_labels: An integer numpy array of length M,
  860. representing M class labels of object instances in ground truth
  861. groundtruth_is_difficult_list: A boolean numpy array of length M denoting
  862. whether a ground truth box is a difficult instance or not
  863. groundtruth_is_group_of_list: A boolean numpy array of length M denoting
  864. whether a ground truth box is a group-of box or not
  865. """
  866. for class_index in range(self.num_class):
  867. num_gt_instances = np.sum(groundtruth_class_labels[
  868. ~groundtruth_is_difficult_list
  869. & ~groundtruth_is_group_of_list] == class_index)
  870. num_groupof_gt_instances = self.group_of_weight * np.sum(
  871. groundtruth_class_labels[groundtruth_is_group_of_list] == class_index)
  872. self.num_gt_instances_per_class[
  873. class_index] += num_gt_instances + num_groupof_gt_instances
  874. if np.any(groundtruth_class_labels == class_index):
  875. self.num_gt_imgs_per_class[class_index] += 1
  876. def evaluate(self):
  877. """Compute evaluation result.
  878. Returns:
  879. A named tuple with the following fields -
  880. average_precision: float numpy array of average precision for
  881. each class.
  882. mean_ap: mean average precision of all classes, float scalar
  883. precisions: List of precisions, each precision is a float numpy
  884. array
  885. recalls: List of recalls, each recall is a float numpy array
  886. corloc: numpy float array
  887. mean_corloc: Mean CorLoc score for each class, float scalar
  888. """
  889. if (self.num_gt_instances_per_class == 0).any():
  890. logging.warn(
  891. 'The following classes have no ground truth examples: %s',
  892. np.squeeze(np.argwhere(self.num_gt_instances_per_class == 0)) +
  893. self.label_id_offset)
  894. if self.use_weighted_mean_ap:
  895. all_scores = np.array([], dtype=float)
  896. all_tp_fp_labels = np.array([], dtype=bool)
  897. for class_index in range(self.num_class):
  898. if self.num_gt_instances_per_class[class_index] == 0:
  899. continue
  900. if not self.scores_per_class[class_index]:
  901. scores = np.array([], dtype=float)
  902. tp_fp_labels = np.array([], dtype=float)
  903. else:
  904. scores = np.concatenate(self.scores_per_class[class_index])
  905. tp_fp_labels = np.concatenate(self.tp_fp_labels_per_class[class_index])
  906. if self.use_weighted_mean_ap:
  907. all_scores = np.append(all_scores, scores)
  908. all_tp_fp_labels = np.append(all_tp_fp_labels, tp_fp_labels)
  909. precision, recall = metrics.compute_precision_recall(
  910. scores, tp_fp_labels, self.num_gt_instances_per_class[class_index])
  911. self.precisions_per_class[class_index] = precision
  912. self.recalls_per_class[class_index] = recall
  913. average_precision = metrics.compute_average_precision(precision, recall)
  914. self.average_precision_per_class[class_index] = average_precision
  915. logging.info('average_precision: %f', average_precision)
  916. self.corloc_per_class = metrics.compute_cor_loc(
  917. self.num_gt_imgs_per_class,
  918. self.num_images_correctly_detected_per_class)
  919. if self.use_weighted_mean_ap:
  920. num_gt_instances = np.sum(self.num_gt_instances_per_class)
  921. precision, recall = metrics.compute_precision_recall(
  922. all_scores, all_tp_fp_labels, num_gt_instances)
  923. mean_ap = metrics.compute_average_precision(precision, recall)
  924. else:
  925. mean_ap = np.nanmean(self.average_precision_per_class)
  926. mean_corloc = np.nanmean(self.corloc_per_class)
  927. return ObjectDetectionEvalMetrics(
  928. self.average_precision_per_class, mean_ap, self.precisions_per_class,
  929. self.recalls_per_class, self.corloc_per_class, mean_corloc)