You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

156 lines
6.2 KiB

6 years ago
  1. # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. """tf.data.Dataset builder.
  16. Creates data sources for DetectionModels from an InputReader config. See
  17. input_reader.proto for options.
  18. Note: If users wishes to also use their own InputReaders with the Object
  19. Detection configuration framework, they should define their own builder function
  20. that wraps the build function.
  21. """
  22. import functools
  23. import tensorflow as tf
  24. from object_detection.data_decoders import tf_example_decoder
  25. from object_detection.protos import input_reader_pb2
  26. def make_initializable_iterator(dataset):
  27. """Creates an iterator, and initializes tables.
  28. This is useful in cases where make_one_shot_iterator wouldn't work because
  29. the graph contains a hash table that needs to be initialized.
  30. Args:
  31. dataset: A `tf.data.Dataset` object.
  32. Returns:
  33. A `tf.data.Iterator`.
  34. """
  35. iterator = dataset.make_initializable_iterator()
  36. tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer)
  37. return iterator
  38. def read_dataset(file_read_func, input_files, config):
  39. """Reads a dataset, and handles repetition and shuffling.
  40. Args:
  41. file_read_func: Function to use in tf.contrib.data.parallel_interleave, to
  42. read every individual file into a tf.data.Dataset.
  43. input_files: A list of file paths to read.
  44. config: A input_reader_builder.InputReader object.
  45. Returns:
  46. A tf.data.Dataset of (undecoded) tf-records based on config.
  47. """
  48. # Shard, shuffle, and read files.
  49. filenames = tf.gfile.Glob(input_files)
  50. num_readers = config.num_readers
  51. if num_readers > len(filenames):
  52. num_readers = len(filenames)
  53. tf.logging.warning('num_readers has been reduced to %d to match input file '
  54. 'shards.' % num_readers)
  55. filename_dataset = tf.data.Dataset.from_tensor_slices(filenames)
  56. if config.shuffle:
  57. filename_dataset = filename_dataset.shuffle(
  58. config.filenames_shuffle_buffer_size)
  59. elif num_readers > 1:
  60. tf.logging.warning('`shuffle` is false, but the input data stream is '
  61. 'still slightly shuffled since `num_readers` > 1.')
  62. filename_dataset = filename_dataset.repeat(config.num_epochs or None)
  63. records_dataset = filename_dataset.apply(
  64. tf.contrib.data.parallel_interleave(
  65. file_read_func,
  66. cycle_length=num_readers,
  67. block_length=config.read_block_length,
  68. sloppy=config.shuffle))
  69. if config.shuffle:
  70. records_dataset = records_dataset.shuffle(config.shuffle_buffer_size)
  71. return records_dataset
  72. def build(input_reader_config, batch_size=None, transform_input_data_fn=None):
  73. """Builds a tf.data.Dataset.
  74. Builds a tf.data.Dataset by applying the `transform_input_data_fn` on all
  75. records. Applies a padded batch to the resulting dataset.
  76. Args:
  77. input_reader_config: A input_reader_pb2.InputReader object.
  78. batch_size: Batch size. If batch size is None, no batching is performed.
  79. transform_input_data_fn: Function to apply transformation to all records,
  80. or None if no extra decoding is required.
  81. Returns:
  82. A tf.data.Dataset based on the input_reader_config.
  83. Raises:
  84. ValueError: On invalid input reader proto.
  85. ValueError: If no input paths are specified.
  86. """
  87. if not isinstance(input_reader_config, input_reader_pb2.InputReader):
  88. raise ValueError('input_reader_config not of type '
  89. 'input_reader_pb2.InputReader.')
  90. if input_reader_config.WhichOneof('input_reader') == 'tf_record_input_reader':
  91. config = input_reader_config.tf_record_input_reader
  92. if not config.input_path:
  93. raise ValueError('At least one input path must be specified in '
  94. '`input_reader_config`.')
  95. label_map_proto_file = None
  96. if input_reader_config.HasField('label_map_path'):
  97. label_map_proto_file = input_reader_config.label_map_path
  98. decoder = tf_example_decoder.TfExampleDecoder(
  99. load_instance_masks=input_reader_config.load_instance_masks,
  100. load_multiclass_scores=input_reader_config.load_multiclass_scores,
  101. instance_mask_type=input_reader_config.mask_type,
  102. label_map_proto_file=label_map_proto_file,
  103. use_display_name=input_reader_config.use_display_name,
  104. num_additional_channels=input_reader_config.num_additional_channels)
  105. def process_fn(value):
  106. """Sets up tf graph that decodes, transforms and pads input data."""
  107. processed_tensors = decoder.decode(value)
  108. if transform_input_data_fn is not None:
  109. processed_tensors = transform_input_data_fn(processed_tensors)
  110. return processed_tensors
  111. dataset = read_dataset(
  112. functools.partial(tf.data.TFRecordDataset, buffer_size=8 * 1000 * 1000),
  113. config.input_path[:], input_reader_config)
  114. if input_reader_config.sample_1_of_n_examples > 1:
  115. dataset = dataset.shard(input_reader_config.sample_1_of_n_examples, 0)
  116. # TODO(rathodv): make batch size a required argument once the old binaries
  117. # are deleted.
  118. if batch_size:
  119. num_parallel_calls = batch_size * input_reader_config.num_parallel_batches
  120. else:
  121. num_parallel_calls = input_reader_config.num_parallel_map_calls
  122. # TODO(b/123952794): Migrate to V2 function.
  123. if hasattr(dataset, 'map_with_legacy_function'):
  124. data_map_fn = dataset.map_with_legacy_function
  125. else:
  126. data_map_fn = dataset.map
  127. dataset = data_map_fn(process_fn, num_parallel_calls=num_parallel_calls)
  128. if batch_size:
  129. dataset = dataset.apply(
  130. tf.contrib.data.batch_and_drop_remainder(batch_size))
  131. dataset = dataset.prefetch(input_reader_config.num_prefetch_batches)
  132. return dataset
  133. raise ValueError('Unsupported input_reader_config.')