You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

219 lines
9.0 KiB

  1. # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. """Python library for ssd model, tailored for TPU inference."""
  16. from __future__ import absolute_import
  17. from __future__ import division
  18. from __future__ import print_function
  19. import tensorflow as tf
  20. # pylint: disable=g-import-not-at-top
  21. # Checking TF version, because this module relies on TPUPartitionedCall
  22. # in tensorflow.python.tpu, which is not available until TF r1.14.
  23. major, minor, _ = tf.__version__.split('.') # pylint: disable=protected-access
  24. if int(major) < 1 or (int(major == 1) and int(minor) < 14):
  25. raise RuntimeError(
  26. 'TensorFlow version >= 1.14 is required. Found ({}).'.format(
  27. tf.__version__)) # pylint: disable=protected-access
  28. from tensorflow.python.framework import function
  29. from tensorflow.python.tpu import functional as tpu_functional
  30. from tensorflow.python.tpu.ops import tpu_ops
  31. from object_detection import exporter
  32. from object_detection.builders import model_builder
  33. from object_detection.tpu_exporters import utils
  34. ANCHORS = 'anchors'
  35. BOX_ENCODINGS = 'box_encodings'
  36. CLASS_PREDICTIONS_WITH_BACKGROUND = 'class_predictions_with_background'
  37. def get_prediction_tensor_shapes(pipeline_config):
  38. """Gets static shapes of tensors by building the graph on CPU.
  39. This function builds the graph on CPU and obtain static shapes of output
  40. tensors from TPUPartitionedCall. Shapes information are later used for setting
  41. shapes of tensors when TPU graphs are built. This is necessary because tensors
  42. coming out of TPUPartitionedCall lose their shape information, which are
  43. needed for a lot of CPU operations later.
  44. Args:
  45. pipeline_config: A TrainEvalPipelineConfig proto.
  46. Returns:
  47. A python dict of tensors' names and their shapes.
  48. """
  49. detection_model = model_builder.build(
  50. pipeline_config.model, is_training=False)
  51. _, input_tensors = exporter.input_placeholder_fn_map['image_tensor']()
  52. inputs = tf.cast(input_tensors, dtype=tf.float32)
  53. preprocessed_inputs, true_image_shapes = detection_model.preprocess(inputs)
  54. prediction_dict = detection_model.predict(preprocessed_inputs,
  55. true_image_shapes)
  56. return {
  57. BOX_ENCODINGS:
  58. prediction_dict[BOX_ENCODINGS].shape.as_list(),
  59. CLASS_PREDICTIONS_WITH_BACKGROUND:
  60. prediction_dict[CLASS_PREDICTIONS_WITH_BACKGROUND].shape.as_list(),
  61. ANCHORS:
  62. prediction_dict[ANCHORS].shape.as_list(),
  63. }
  64. def recover_shape(preprocessed_inputs, prediction_outputs, shapes_info):
  65. """Recovers shape from TPUPartitionedCall.
  66. Args:
  67. preprocessed_inputs: 4D tensor, shaped (batch, channels, height, width)
  68. prediction_outputs: Python list of tensors, in the following order -
  69. box_encodings - 3D tensor, shaped (code_size, batch, num_anchors);
  70. class_predictions_with_background - 3D tensor, shaped (num_classes + 1,
  71. batch, num_anchors); anchors - 2D tensor, shaped (4, num_anchors)
  72. shapes_info: Python dict of tensor shapes as lists.
  73. Returns:
  74. preprocessed_inputs: 4D tensor, shaped (batch, height, width, channels)
  75. box_encodings: 3D tensor, shaped (batch, num_anchors, code_size)
  76. class_predictions_with_background: 3D tensor,
  77. shaped (batch, num_anchors, num_classes + 1)
  78. anchors: 2D tensor, shaped (num_anchors, 4)
  79. """
  80. # Dimshuffle: (b, c, h, w) -> (b, h, w, c)
  81. preprocessed_inputs = tf.transpose(preprocessed_inputs, perm=[0, 2, 3, 1])
  82. box_encodings = tf.transpose(prediction_outputs[0], perm=[1, 2, 0])
  83. # [None, None, detection_model._box_coder.code_size]
  84. box_encodings.set_shape(shapes_info[BOX_ENCODINGS])
  85. class_predictions_with_background = tf.transpose(
  86. prediction_outputs[1], perm=[1, 2, 0])
  87. # [None, None, num_classes + 1]
  88. class_predictions_with_background.set_shape(
  89. shapes_info[CLASS_PREDICTIONS_WITH_BACKGROUND])
  90. anchors = tf.transpose(prediction_outputs[2], perm=[1, 0])
  91. # [None, 4]
  92. anchors.set_shape(shapes_info[ANCHORS])
  93. return (preprocessed_inputs, box_encodings, class_predictions_with_background,
  94. anchors)
  95. def build_graph(pipeline_config,
  96. shapes_info,
  97. input_type='encoded_image_string_tensor',
  98. use_bfloat16=False):
  99. """Builds TPU serving graph of ssd to be exported.
  100. Args:
  101. pipeline_config: A TrainEvalPipelineConfig proto.
  102. shapes_info: A python dict of tensors' names and their shapes, returned by
  103. `get_prediction_tensor_shapes()`.
  104. input_type: One of
  105. 'encoded_image_string_tensor': a 1d tensor with dtype=tf.string
  106. 'image_tensor': a 4d tensor with dtype=tf.uint8
  107. 'tf_example': a 1d tensor with dtype=tf.string
  108. use_bfloat16: If true, use tf.bfloat16 on TPU.
  109. Returns:
  110. placeholder_tensor: A placeholder tensor, type determined by `input_type`.
  111. result_tensor_dict: A python dict of tensors' names and tensors.
  112. """
  113. detection_model = model_builder.build(
  114. pipeline_config.model, is_training=False)
  115. placeholder_tensor, input_tensors = \
  116. exporter.input_placeholder_fn_map[input_type]()
  117. inputs = tf.cast(input_tensors, dtype=tf.float32)
  118. preprocessed_inputs, true_image_shapes = detection_model.preprocess(inputs)
  119. # Dimshuffle: (b, h, w, c) -> (b, c, h, w)
  120. # This is to avoid extra padding due to TPU memory layout:
  121. # We swap larger dimensions in and smaller dimensions out, so that small
  122. # dimensions don't get padded tens / hundreds times of its own size.
  123. # This trick is applied to other similar tensors below.
  124. preprocessed_inputs = tf.transpose(preprocessed_inputs, perm=[0, 3, 1, 2])
  125. if use_bfloat16:
  126. preprocessed_inputs = tf.cast(preprocessed_inputs, dtype=tf.bfloat16)
  127. def predict_tpu_subgraph(preprocessed_inputs, true_image_shapes):
  128. """Wraps over the CPU version of `predict()`.
  129. This builds a same graph as the original `predict()`, manipulates
  130. result tensors' dimensions to be memory efficient on TPU, and
  131. returns them as list of tensors.
  132. Args:
  133. preprocessed_inputs: A 4D tensor of shape (batch, channels, height, width)
  134. true_image_shapes: True image shapes tensor.
  135. Returns:
  136. A Python list of tensors:
  137. box_encodings: 3D tensor of shape (code_size, batch_size, num_anchors)
  138. class_predictions_with_background: 3D tensor,
  139. shape (num_classes + 1, batch_size, num_anchors)
  140. anchors: 2D tensor of shape (4, num_anchors)
  141. """
  142. # Dimshuffle: (b, c, h, w) -> (b, h, w, c)
  143. preprocessed_inputs = tf.transpose(preprocessed_inputs, perm=[0, 2, 3, 1])
  144. if use_bfloat16:
  145. with tf.contrib.tpu.bfloat16_scope():
  146. prediction_dict = detection_model.predict(preprocessed_inputs,
  147. true_image_shapes)
  148. else:
  149. prediction_dict = detection_model.predict(preprocessed_inputs,
  150. true_image_shapes)
  151. # Dimshuffle: (batch, anchors, depth) -> (depth, batch, anchors)
  152. return [
  153. tf.transpose(prediction_dict[BOX_ENCODINGS], perm=[2, 0, 1]),
  154. tf.transpose(
  155. prediction_dict[CLASS_PREDICTIONS_WITH_BACKGROUND], perm=[2, 0, 1]),
  156. tf.transpose(prediction_dict[ANCHORS], perm=[1, 0]),
  157. ]
  158. @function.Defun(capture_resource_var_by_value=False)
  159. def predict_tpu():
  160. return tf.contrib.tpu.rewrite(predict_tpu_subgraph,
  161. [preprocessed_inputs, true_image_shapes])
  162. prediction_outputs = tpu_functional.TPUPartitionedCall(
  163. args=predict_tpu.captured_inputs,
  164. device_ordinal=tpu_ops.tpu_ordinal_selector(),
  165. Tout=[o.type for o in predict_tpu.definition.signature.output_arg],
  166. f=predict_tpu)
  167. (preprocessed_inputs, box_encodings, class_predictions_with_background,
  168. anchors) = recover_shape(preprocessed_inputs, prediction_outputs,
  169. shapes_info)
  170. output_tensors = {
  171. 'preprocessed_inputs': preprocessed_inputs,
  172. BOX_ENCODINGS: box_encodings,
  173. CLASS_PREDICTIONS_WITH_BACKGROUND: class_predictions_with_background,
  174. ANCHORS: anchors,
  175. }
  176. if use_bfloat16:
  177. output_tensors = utils.bfloat16_to_float32_nested(output_tensors)
  178. postprocessed_tensors = detection_model.postprocess(output_tensors,
  179. true_image_shapes)
  180. result_tensor_dict = exporter.add_output_tensor_nodes(postprocessed_tensors,
  181. 'inference_op')
  182. return placeholder_tensor, result_tensor_dict