You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

234 lines
10 KiB

6 years ago
  1. # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. """Tests for eval_util."""
  16. from __future__ import absolute_import
  17. from __future__ import division
  18. from __future__ import print_function
  19. from absl.testing import parameterized
  20. import tensorflow as tf
  21. from object_detection import eval_util
  22. from object_detection.core import standard_fields as fields
  23. from object_detection.protos import eval_pb2
  24. from object_detection.utils import test_case
  25. class EvalUtilTest(test_case.TestCase, parameterized.TestCase):
  26. def _get_categories_list(self):
  27. return [{'id': 0, 'name': 'person'},
  28. {'id': 1, 'name': 'dog'},
  29. {'id': 2, 'name': 'cat'}]
  30. def _make_evaluation_dict(self,
  31. resized_groundtruth_masks=False,
  32. batch_size=1,
  33. max_gt_boxes=None,
  34. scale_to_absolute=False):
  35. input_data_fields = fields.InputDataFields
  36. detection_fields = fields.DetectionResultFields
  37. image = tf.zeros(shape=[batch_size, 20, 20, 3], dtype=tf.uint8)
  38. if batch_size == 1:
  39. key = tf.constant('image1')
  40. else:
  41. key = tf.constant([str(i) for i in range(batch_size)])
  42. detection_boxes = tf.tile(tf.constant([[[0., 0., 1., 1.]]]),
  43. multiples=[batch_size, 1, 1])
  44. detection_scores = tf.tile(tf.constant([[0.8]]), multiples=[batch_size, 1])
  45. detection_classes = tf.tile(tf.constant([[0]]), multiples=[batch_size, 1])
  46. detection_masks = tf.tile(tf.ones(shape=[1, 1, 20, 20], dtype=tf.float32),
  47. multiples=[batch_size, 1, 1, 1])
  48. num_detections = tf.ones([batch_size])
  49. groundtruth_boxes = tf.constant([[0., 0., 1., 1.]])
  50. groundtruth_classes = tf.constant([1])
  51. groundtruth_instance_masks = tf.ones(shape=[1, 20, 20], dtype=tf.uint8)
  52. if resized_groundtruth_masks:
  53. groundtruth_instance_masks = tf.ones(shape=[1, 10, 10], dtype=tf.uint8)
  54. if batch_size > 1:
  55. groundtruth_boxes = tf.tile(tf.expand_dims(groundtruth_boxes, 0),
  56. multiples=[batch_size, 1, 1])
  57. groundtruth_classes = tf.tile(tf.expand_dims(groundtruth_classes, 0),
  58. multiples=[batch_size, 1])
  59. groundtruth_instance_masks = tf.tile(
  60. tf.expand_dims(groundtruth_instance_masks, 0),
  61. multiples=[batch_size, 1, 1, 1])
  62. detections = {
  63. detection_fields.detection_boxes: detection_boxes,
  64. detection_fields.detection_scores: detection_scores,
  65. detection_fields.detection_classes: detection_classes,
  66. detection_fields.detection_masks: detection_masks,
  67. detection_fields.num_detections: num_detections
  68. }
  69. groundtruth = {
  70. input_data_fields.groundtruth_boxes: groundtruth_boxes,
  71. input_data_fields.groundtruth_classes: groundtruth_classes,
  72. input_data_fields.groundtruth_instance_masks: groundtruth_instance_masks
  73. }
  74. if batch_size > 1:
  75. return eval_util.result_dict_for_batched_example(
  76. image, key, detections, groundtruth,
  77. scale_to_absolute=scale_to_absolute,
  78. max_gt_boxes=max_gt_boxes)
  79. else:
  80. return eval_util.result_dict_for_single_example(
  81. image, key, detections, groundtruth,
  82. scale_to_absolute=scale_to_absolute)
  83. @parameterized.parameters(
  84. {'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': True},
  85. {'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': True},
  86. {'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': False},
  87. {'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': False}
  88. )
  89. def test_get_eval_metric_ops_for_coco_detections(self, batch_size=1,
  90. max_gt_boxes=None,
  91. scale_to_absolute=False):
  92. eval_config = eval_pb2.EvalConfig()
  93. eval_config.metrics_set.extend(['coco_detection_metrics'])
  94. categories = self._get_categories_list()
  95. eval_dict = self._make_evaluation_dict(batch_size=batch_size,
  96. max_gt_boxes=max_gt_boxes,
  97. scale_to_absolute=scale_to_absolute)
  98. metric_ops = eval_util.get_eval_metric_ops_for_evaluators(
  99. eval_config, categories, eval_dict)
  100. _, update_op = metric_ops['DetectionBoxes_Precision/mAP']
  101. with self.test_session() as sess:
  102. metrics = {}
  103. for key, (value_op, _) in metric_ops.iteritems():
  104. metrics[key] = value_op
  105. sess.run(update_op)
  106. metrics = sess.run(metrics)
  107. self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP'])
  108. self.assertNotIn('DetectionMasks_Precision/mAP', metrics)
  109. @parameterized.parameters(
  110. {'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': True},
  111. {'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': True},
  112. {'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': False},
  113. {'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': False}
  114. )
  115. def test_get_eval_metric_ops_for_coco_detections_and_masks(
  116. self, batch_size=1, max_gt_boxes=None, scale_to_absolute=False):
  117. eval_config = eval_pb2.EvalConfig()
  118. eval_config.metrics_set.extend(
  119. ['coco_detection_metrics', 'coco_mask_metrics'])
  120. categories = self._get_categories_list()
  121. eval_dict = self._make_evaluation_dict(batch_size=batch_size,
  122. max_gt_boxes=max_gt_boxes,
  123. scale_to_absolute=scale_to_absolute)
  124. metric_ops = eval_util.get_eval_metric_ops_for_evaluators(
  125. eval_config, categories, eval_dict)
  126. _, update_op_boxes = metric_ops['DetectionBoxes_Precision/mAP']
  127. _, update_op_masks = metric_ops['DetectionMasks_Precision/mAP']
  128. with self.test_session() as sess:
  129. metrics = {}
  130. for key, (value_op, _) in metric_ops.iteritems():
  131. metrics[key] = value_op
  132. sess.run(update_op_boxes)
  133. sess.run(update_op_masks)
  134. metrics = sess.run(metrics)
  135. self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP'])
  136. self.assertAlmostEqual(1.0, metrics['DetectionMasks_Precision/mAP'])
  137. @parameterized.parameters(
  138. {'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': True},
  139. {'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': True},
  140. {'batch_size': 1, 'max_gt_boxes': None, 'scale_to_absolute': False},
  141. {'batch_size': 8, 'max_gt_boxes': [1], 'scale_to_absolute': False}
  142. )
  143. def test_get_eval_metric_ops_for_coco_detections_and_resized_masks(
  144. self, batch_size=1, max_gt_boxes=None, scale_to_absolute=False):
  145. eval_config = eval_pb2.EvalConfig()
  146. eval_config.metrics_set.extend(
  147. ['coco_detection_metrics', 'coco_mask_metrics'])
  148. categories = self._get_categories_list()
  149. eval_dict = self._make_evaluation_dict(batch_size=batch_size,
  150. max_gt_boxes=max_gt_boxes,
  151. scale_to_absolute=scale_to_absolute,
  152. resized_groundtruth_masks=True)
  153. metric_ops = eval_util.get_eval_metric_ops_for_evaluators(
  154. eval_config, categories, eval_dict)
  155. _, update_op_boxes = metric_ops['DetectionBoxes_Precision/mAP']
  156. _, update_op_masks = metric_ops['DetectionMasks_Precision/mAP']
  157. with self.test_session() as sess:
  158. metrics = {}
  159. for key, (value_op, _) in metric_ops.iteritems():
  160. metrics[key] = value_op
  161. sess.run(update_op_boxes)
  162. sess.run(update_op_masks)
  163. metrics = sess.run(metrics)
  164. self.assertAlmostEqual(1.0, metrics['DetectionBoxes_Precision/mAP'])
  165. self.assertAlmostEqual(1.0, metrics['DetectionMasks_Precision/mAP'])
  166. def test_get_eval_metric_ops_raises_error_with_unsupported_metric(self):
  167. eval_config = eval_pb2.EvalConfig()
  168. eval_config.metrics_set.extend(['unsupported_metric'])
  169. categories = self._get_categories_list()
  170. eval_dict = self._make_evaluation_dict()
  171. with self.assertRaises(ValueError):
  172. eval_util.get_eval_metric_ops_for_evaluators(
  173. eval_config, categories, eval_dict)
  174. def test_get_eval_metric_ops_for_evaluators(self):
  175. eval_config = eval_pb2.EvalConfig()
  176. eval_config.metrics_set.extend(
  177. ['coco_detection_metrics', 'coco_mask_metrics'])
  178. eval_config.include_metrics_per_category = True
  179. evaluator_options = eval_util.evaluator_options_from_eval_config(
  180. eval_config)
  181. self.assertTrue(evaluator_options['coco_detection_metrics'][
  182. 'include_metrics_per_category'])
  183. self.assertTrue(evaluator_options['coco_mask_metrics'][
  184. 'include_metrics_per_category'])
  185. def test_get_evaluator_with_evaluator_options(self):
  186. eval_config = eval_pb2.EvalConfig()
  187. eval_config.metrics_set.extend(['coco_detection_metrics'])
  188. eval_config.include_metrics_per_category = True
  189. categories = self._get_categories_list()
  190. evaluator_options = eval_util.evaluator_options_from_eval_config(
  191. eval_config)
  192. evaluator = eval_util.get_evaluators(
  193. eval_config, categories, evaluator_options)
  194. self.assertTrue(evaluator[0]._include_metrics_per_category)
  195. def test_get_evaluator_with_no_evaluator_options(self):
  196. eval_config = eval_pb2.EvalConfig()
  197. eval_config.metrics_set.extend(['coco_detection_metrics'])
  198. eval_config.include_metrics_per_category = True
  199. categories = self._get_categories_list()
  200. evaluator = eval_util.get_evaluators(
  201. eval_config, categories, evaluator_options=None)
  202. # Even though we are setting eval_config.include_metrics_per_category = True
  203. # this option is never passed into the DetectionEvaluator constructor (via
  204. # `evaluator_options`).
  205. self.assertFalse(evaluator[0]._include_metrics_per_category)
  206. if __name__ == '__main__':
  207. tf.test.main()