You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

583 lines
25 KiB

6 years ago
  1. # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. """Functions to generate a list of feature maps based on image features.
  16. Provides several feature map generators that can be used to build object
  17. detection feature extractors.
  18. Object detection feature extractors usually are built by stacking two components
  19. - A base feature extractor such as Inception V3 and a feature map generator.
  20. Feature map generators build on the base feature extractors and produce a list
  21. of final feature maps.
  22. """
  23. import collections
  24. import functools
  25. import tensorflow as tf
  26. from object_detection.utils import ops
  27. slim = tf.contrib.slim
  28. # Activation bound used for TPU v1. Activations will be clipped to
  29. # [-ACTIVATION_BOUND, ACTIVATION_BOUND] when training with
  30. # use_bounded_activations enabled.
  31. ACTIVATION_BOUND = 6.0
  32. def get_depth_fn(depth_multiplier, min_depth):
  33. """Builds a callable to compute depth (output channels) of conv filters.
  34. Args:
  35. depth_multiplier: a multiplier for the nominal depth.
  36. min_depth: a lower bound on the depth of filters.
  37. Returns:
  38. A callable that takes in a nominal depth and returns the depth to use.
  39. """
  40. def multiply_depth(depth):
  41. new_depth = int(depth * depth_multiplier)
  42. return max(new_depth, min_depth)
  43. return multiply_depth
  44. class KerasMultiResolutionFeatureMaps(tf.keras.Model):
  45. """Generates multi resolution feature maps from input image features.
  46. A Keras model that generates multi-scale feature maps for detection as in the
  47. SSD papers by Liu et al: https://arxiv.org/pdf/1512.02325v2.pdf, See Sec 2.1.
  48. More specifically, when called on inputs it performs the following two tasks:
  49. 1) If a layer name is provided in the configuration, returns that layer as a
  50. feature map.
  51. 2) If a layer name is left as an empty string, constructs a new feature map
  52. based on the spatial shape and depth configuration. Note that the current
  53. implementation only supports generating new layers using convolution of
  54. stride 2 resulting in a spatial resolution reduction by a factor of 2.
  55. By default convolution kernel size is set to 3, and it can be customized
  56. by caller.
  57. An example of the configuration for Inception V3:
  58. {
  59. 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''],
  60. 'layer_depth': [-1, -1, -1, 512, 256, 128]
  61. }
  62. When this feature generator object is called on input image_features:
  63. Args:
  64. image_features: A dictionary of handles to activation tensors from the
  65. base feature extractor.
  66. Returns:
  67. feature_maps: an OrderedDict mapping keys (feature map names) to
  68. tensors where each tensor has shape [batch, height_i, width_i, depth_i].
  69. """
  70. def __init__(self,
  71. feature_map_layout,
  72. depth_multiplier,
  73. min_depth,
  74. insert_1x1_conv,
  75. is_training,
  76. conv_hyperparams,
  77. freeze_batchnorm,
  78. name=None):
  79. """Constructor.
  80. Args:
  81. feature_map_layout: Dictionary of specifications for the feature map
  82. layouts in the following format (Inception V2/V3 respectively):
  83. {
  84. 'from_layer': ['Mixed_3c', 'Mixed_4c', 'Mixed_5c', '', '', ''],
  85. 'layer_depth': [-1, -1, -1, 512, 256, 128]
  86. }
  87. or
  88. {
  89. 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''],
  90. 'layer_depth': [-1, -1, -1, 512, 256, 128]
  91. }
  92. If 'from_layer' is specified, the specified feature map is directly used
  93. as a box predictor layer, and the layer_depth is directly infered from
  94. the feature map (instead of using the provided 'layer_depth' parameter).
  95. In this case, our convention is to set 'layer_depth' to -1 for clarity.
  96. Otherwise, if 'from_layer' is an empty string, then the box predictor
  97. layer will be built from the previous layer using convolution
  98. operations. Note that the current implementation only supports
  99. generating new layers using convolutions of stride 2 (resulting in a
  100. spatial resolution reduction by a factor of 2), and will be extended to
  101. a more flexible design. Convolution kernel size is set to 3 by default,
  102. and can be customized by 'conv_kernel_size' parameter (similarily,
  103. 'conv_kernel_size' should be set to -1 if 'from_layer' is specified).
  104. The created convolution operation will be a normal 2D convolution by
  105. default, and a depthwise convolution followed by 1x1 convolution if
  106. 'use_depthwise' is set to True.
  107. depth_multiplier: Depth multiplier for convolutional layers.
  108. min_depth: Minimum depth for convolutional layers.
  109. insert_1x1_conv: A boolean indicating whether an additional 1x1
  110. convolution should be inserted before shrinking the feature map.
  111. is_training: Indicates whether the feature generator is in training mode.
  112. conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
  113. containing hyperparameters for convolution ops.
  114. freeze_batchnorm: Bool. Whether to freeze batch norm parameters during
  115. training or not. When training with a small batch size (e.g. 1), it is
  116. desirable to freeze batch norm update and use pretrained batch norm
  117. params.
  118. name: A string name scope to assign to the model. If 'None', Keras
  119. will auto-generate one from the class name.
  120. """
  121. super(KerasMultiResolutionFeatureMaps, self).__init__(name=name)
  122. self.feature_map_layout = feature_map_layout
  123. self.convolutions = []
  124. depth_fn = get_depth_fn(depth_multiplier, min_depth)
  125. base_from_layer = ''
  126. use_explicit_padding = False
  127. if 'use_explicit_padding' in feature_map_layout:
  128. use_explicit_padding = feature_map_layout['use_explicit_padding']
  129. use_depthwise = False
  130. if 'use_depthwise' in feature_map_layout:
  131. use_depthwise = feature_map_layout['use_depthwise']
  132. for index, from_layer in enumerate(feature_map_layout['from_layer']):
  133. net = []
  134. layer_depth = feature_map_layout['layer_depth'][index]
  135. conv_kernel_size = 3
  136. if 'conv_kernel_size' in feature_map_layout:
  137. conv_kernel_size = feature_map_layout['conv_kernel_size'][index]
  138. if from_layer:
  139. base_from_layer = from_layer
  140. else:
  141. if insert_1x1_conv:
  142. layer_name = '{}_1_Conv2d_{}_1x1_{}'.format(
  143. base_from_layer, index, depth_fn(layer_depth / 2))
  144. net.append(tf.keras.layers.Conv2D(depth_fn(layer_depth / 2),
  145. [1, 1],
  146. padding='SAME',
  147. strides=1,
  148. name=layer_name + '_conv',
  149. **conv_hyperparams.params()))
  150. net.append(
  151. conv_hyperparams.build_batch_norm(
  152. training=(is_training and not freeze_batchnorm),
  153. name=layer_name + '_batchnorm'))
  154. net.append(
  155. conv_hyperparams.build_activation_layer(
  156. name=layer_name))
  157. layer_name = '{}_2_Conv2d_{}_{}x{}_s2_{}'.format(
  158. base_from_layer, index, conv_kernel_size, conv_kernel_size,
  159. depth_fn(layer_depth))
  160. stride = 2
  161. padding = 'SAME'
  162. if use_explicit_padding:
  163. padding = 'VALID'
  164. # We define this function here while capturing the value of
  165. # conv_kernel_size, to avoid holding a reference to the loop variable
  166. # conv_kernel_size inside of a lambda function
  167. def fixed_padding(features, kernel_size=conv_kernel_size):
  168. return ops.fixed_padding(features, kernel_size)
  169. net.append(tf.keras.layers.Lambda(fixed_padding))
  170. # TODO(rathodv): Add some utilities to simplify the creation of
  171. # Depthwise & non-depthwise convolutions w/ normalization & activations
  172. if use_depthwise:
  173. net.append(tf.keras.layers.DepthwiseConv2D(
  174. [conv_kernel_size, conv_kernel_size],
  175. depth_multiplier=1,
  176. padding=padding,
  177. strides=stride,
  178. name=layer_name + '_depthwise_conv',
  179. **conv_hyperparams.params()))
  180. net.append(
  181. conv_hyperparams.build_batch_norm(
  182. training=(is_training and not freeze_batchnorm),
  183. name=layer_name + '_depthwise_batchnorm'))
  184. net.append(
  185. conv_hyperparams.build_activation_layer(
  186. name=layer_name + '_depthwise'))
  187. net.append(tf.keras.layers.Conv2D(depth_fn(layer_depth), [1, 1],
  188. padding='SAME',
  189. strides=1,
  190. name=layer_name + '_conv',
  191. **conv_hyperparams.params()))
  192. net.append(
  193. conv_hyperparams.build_batch_norm(
  194. training=(is_training and not freeze_batchnorm),
  195. name=layer_name + '_batchnorm'))
  196. net.append(
  197. conv_hyperparams.build_activation_layer(
  198. name=layer_name))
  199. else:
  200. net.append(tf.keras.layers.Conv2D(
  201. depth_fn(layer_depth),
  202. [conv_kernel_size, conv_kernel_size],
  203. padding=padding,
  204. strides=stride,
  205. name=layer_name + '_conv',
  206. **conv_hyperparams.params()))
  207. net.append(
  208. conv_hyperparams.build_batch_norm(
  209. training=(is_training and not freeze_batchnorm),
  210. name=layer_name + '_batchnorm'))
  211. net.append(
  212. conv_hyperparams.build_activation_layer(
  213. name=layer_name))
  214. # Until certain bugs are fixed in checkpointable lists,
  215. # this net must be appended only once it's been filled with layers
  216. self.convolutions.append(net)
  217. def call(self, image_features):
  218. """Generate the multi-resolution feature maps.
  219. Executed when calling the `.__call__` method on input.
  220. Args:
  221. image_features: A dictionary of handles to activation tensors from the
  222. base feature extractor.
  223. Returns:
  224. feature_maps: an OrderedDict mapping keys (feature map names) to
  225. tensors where each tensor has shape [batch, height_i, width_i, depth_i].
  226. """
  227. feature_maps = []
  228. feature_map_keys = []
  229. for index, from_layer in enumerate(self.feature_map_layout['from_layer']):
  230. if from_layer:
  231. feature_map = image_features[from_layer]
  232. feature_map_keys.append(from_layer)
  233. else:
  234. feature_map = feature_maps[-1]
  235. for layer in self.convolutions[index]:
  236. feature_map = layer(feature_map)
  237. layer_name = self.convolutions[index][-1].name
  238. feature_map_keys.append(layer_name)
  239. feature_maps.append(feature_map)
  240. return collections.OrderedDict(
  241. [(x, y) for (x, y) in zip(feature_map_keys, feature_maps)])
  242. def multi_resolution_feature_maps(feature_map_layout, depth_multiplier,
  243. min_depth, insert_1x1_conv, image_features,
  244. pool_residual=False):
  245. """Generates multi resolution feature maps from input image features.
  246. Generates multi-scale feature maps for detection as in the SSD papers by
  247. Liu et al: https://arxiv.org/pdf/1512.02325v2.pdf, See Sec 2.1.
  248. More specifically, it performs the following two tasks:
  249. 1) If a layer name is provided in the configuration, returns that layer as a
  250. feature map.
  251. 2) If a layer name is left as an empty string, constructs a new feature map
  252. based on the spatial shape and depth configuration. Note that the current
  253. implementation only supports generating new layers using convolution of
  254. stride 2 resulting in a spatial resolution reduction by a factor of 2.
  255. By default convolution kernel size is set to 3, and it can be customized
  256. by caller.
  257. An example of the configuration for Inception V3:
  258. {
  259. 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''],
  260. 'layer_depth': [-1, -1, -1, 512, 256, 128]
  261. }
  262. Args:
  263. feature_map_layout: Dictionary of specifications for the feature map
  264. layouts in the following format (Inception V2/V3 respectively):
  265. {
  266. 'from_layer': ['Mixed_3c', 'Mixed_4c', 'Mixed_5c', '', '', ''],
  267. 'layer_depth': [-1, -1, -1, 512, 256, 128]
  268. }
  269. or
  270. {
  271. 'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''],
  272. 'layer_depth': [-1, -1, -1, 512, 256, 128]
  273. }
  274. If 'from_layer' is specified, the specified feature map is directly used
  275. as a box predictor layer, and the layer_depth is directly infered from the
  276. feature map (instead of using the provided 'layer_depth' parameter). In
  277. this case, our convention is to set 'layer_depth' to -1 for clarity.
  278. Otherwise, if 'from_layer' is an empty string, then the box predictor
  279. layer will be built from the previous layer using convolution operations.
  280. Note that the current implementation only supports generating new layers
  281. using convolutions of stride 2 (resulting in a spatial resolution
  282. reduction by a factor of 2), and will be extended to a more flexible
  283. design. Convolution kernel size is set to 3 by default, and can be
  284. customized by 'conv_kernel_size' parameter (similarily, 'conv_kernel_size'
  285. should be set to -1 if 'from_layer' is specified). The created convolution
  286. operation will be a normal 2D convolution by default, and a depthwise
  287. convolution followed by 1x1 convolution if 'use_depthwise' is set to True.
  288. depth_multiplier: Depth multiplier for convolutional layers.
  289. min_depth: Minimum depth for convolutional layers.
  290. insert_1x1_conv: A boolean indicating whether an additional 1x1 convolution
  291. should be inserted before shrinking the feature map.
  292. image_features: A dictionary of handles to activation tensors from the
  293. base feature extractor.
  294. pool_residual: Whether to add an average pooling layer followed by a
  295. residual connection between subsequent feature maps when the channel
  296. depth match. For example, with option 'layer_depth': [-1, 512, 256, 256],
  297. a pooling and residual layer is added between the third and forth feature
  298. map. This option is better used with Weight Shared Convolution Box
  299. Predictor when all feature maps have the same channel depth to encourage
  300. more consistent features across multi-scale feature maps.
  301. Returns:
  302. feature_maps: an OrderedDict mapping keys (feature map names) to
  303. tensors where each tensor has shape [batch, height_i, width_i, depth_i].
  304. Raises:
  305. ValueError: if the number entries in 'from_layer' and
  306. 'layer_depth' do not match.
  307. ValueError: if the generated layer does not have the same resolution
  308. as specified.
  309. """
  310. depth_fn = get_depth_fn(depth_multiplier, min_depth)
  311. feature_map_keys = []
  312. feature_maps = []
  313. base_from_layer = ''
  314. use_explicit_padding = False
  315. if 'use_explicit_padding' in feature_map_layout:
  316. use_explicit_padding = feature_map_layout['use_explicit_padding']
  317. use_depthwise = False
  318. if 'use_depthwise' in feature_map_layout:
  319. use_depthwise = feature_map_layout['use_depthwise']
  320. for index, from_layer in enumerate(feature_map_layout['from_layer']):
  321. layer_depth = feature_map_layout['layer_depth'][index]
  322. conv_kernel_size = 3
  323. if 'conv_kernel_size' in feature_map_layout:
  324. conv_kernel_size = feature_map_layout['conv_kernel_size'][index]
  325. if from_layer:
  326. feature_map = image_features[from_layer]
  327. base_from_layer = from_layer
  328. feature_map_keys.append(from_layer)
  329. else:
  330. pre_layer = feature_maps[-1]
  331. pre_layer_depth = pre_layer.get_shape().as_list()[3]
  332. intermediate_layer = pre_layer
  333. if insert_1x1_conv:
  334. layer_name = '{}_1_Conv2d_{}_1x1_{}'.format(
  335. base_from_layer, index, depth_fn(layer_depth / 2))
  336. intermediate_layer = slim.conv2d(
  337. pre_layer,
  338. depth_fn(layer_depth / 2), [1, 1],
  339. padding='SAME',
  340. stride=1,
  341. scope=layer_name)
  342. layer_name = '{}_2_Conv2d_{}_{}x{}_s2_{}'.format(
  343. base_from_layer, index, conv_kernel_size, conv_kernel_size,
  344. depth_fn(layer_depth))
  345. stride = 2
  346. padding = 'SAME'
  347. if use_explicit_padding:
  348. padding = 'VALID'
  349. intermediate_layer = ops.fixed_padding(
  350. intermediate_layer, conv_kernel_size)
  351. if use_depthwise:
  352. feature_map = slim.separable_conv2d(
  353. intermediate_layer,
  354. None, [conv_kernel_size, conv_kernel_size],
  355. depth_multiplier=1,
  356. padding=padding,
  357. stride=stride,
  358. scope=layer_name + '_depthwise')
  359. feature_map = slim.conv2d(
  360. feature_map,
  361. depth_fn(layer_depth), [1, 1],
  362. padding='SAME',
  363. stride=1,
  364. scope=layer_name)
  365. if pool_residual and pre_layer_depth == depth_fn(layer_depth):
  366. feature_map += slim.avg_pool2d(
  367. pre_layer, [3, 3],
  368. padding='SAME',
  369. stride=2,
  370. scope=layer_name + '_pool')
  371. else:
  372. feature_map = slim.conv2d(
  373. intermediate_layer,
  374. depth_fn(layer_depth), [conv_kernel_size, conv_kernel_size],
  375. padding=padding,
  376. stride=stride,
  377. scope=layer_name)
  378. feature_map_keys.append(layer_name)
  379. feature_maps.append(feature_map)
  380. return collections.OrderedDict(
  381. [(x, y) for (x, y) in zip(feature_map_keys, feature_maps)])
  382. def fpn_top_down_feature_maps(image_features,
  383. depth,
  384. use_depthwise=False,
  385. use_explicit_padding=False,
  386. use_bounded_activations=False,
  387. scope=None,
  388. use_native_resize_op=False):
  389. """Generates `top-down` feature maps for Feature Pyramid Networks.
  390. See https://arxiv.org/abs/1612.03144 for details.
  391. Args:
  392. image_features: list of tuples of (tensor_name, image_feature_tensor).
  393. Spatial resolutions of succesive tensors must reduce exactly by a factor
  394. of 2.
  395. depth: depth of output feature maps.
  396. use_depthwise: whether to use depthwise separable conv instead of regular
  397. conv.
  398. use_explicit_padding: whether to use explicit padding.
  399. use_bounded_activations: Whether or not to clip activations to range
  400. [-ACTIVATION_BOUND, ACTIVATION_BOUND]. Bounded activations better lend
  401. themselves to quantized inference.
  402. scope: A scope name to wrap this op under.
  403. use_native_resize_op: If True, uses tf.image.resize_nearest_neighbor op for
  404. the upsampling process instead of reshape and broadcasting implementation.
  405. Returns:
  406. feature_maps: an OrderedDict mapping keys (feature map names) to
  407. tensors where each tensor has shape [batch, height_i, width_i, depth_i].
  408. """
  409. with tf.name_scope(scope, 'top_down'):
  410. num_levels = len(image_features)
  411. output_feature_maps_list = []
  412. output_feature_map_keys = []
  413. padding = 'VALID' if use_explicit_padding else 'SAME'
  414. kernel_size = 3
  415. with slim.arg_scope(
  416. [slim.conv2d, slim.separable_conv2d], padding=padding, stride=1):
  417. top_down = slim.conv2d(
  418. image_features[-1][1],
  419. depth, [1, 1], activation_fn=None, normalizer_fn=None,
  420. scope='projection_%d' % num_levels)
  421. if use_bounded_activations:
  422. top_down = tf.clip_by_value(top_down, -ACTIVATION_BOUND,
  423. ACTIVATION_BOUND)
  424. output_feature_maps_list.append(top_down)
  425. output_feature_map_keys.append(
  426. 'top_down_%s' % image_features[-1][0])
  427. for level in reversed(range(num_levels - 1)):
  428. if use_native_resize_op:
  429. with tf.name_scope('nearest_neighbor_upsampling'):
  430. top_down_shape = top_down.shape.as_list()
  431. top_down = tf.image.resize_nearest_neighbor(
  432. top_down, [top_down_shape[1] * 2, top_down_shape[2] * 2])
  433. else:
  434. top_down = ops.nearest_neighbor_upsampling(top_down, scale=2)
  435. residual = slim.conv2d(
  436. image_features[level][1], depth, [1, 1],
  437. activation_fn=None, normalizer_fn=None,
  438. scope='projection_%d' % (level + 1))
  439. if use_bounded_activations:
  440. residual = tf.clip_by_value(residual, -ACTIVATION_BOUND,
  441. ACTIVATION_BOUND)
  442. if use_explicit_padding:
  443. # slice top_down to the same shape as residual
  444. residual_shape = tf.shape(residual)
  445. top_down = top_down[:, :residual_shape[1], :residual_shape[2], :]
  446. top_down += residual
  447. if use_bounded_activations:
  448. top_down = tf.clip_by_value(top_down, -ACTIVATION_BOUND,
  449. ACTIVATION_BOUND)
  450. if use_depthwise:
  451. conv_op = functools.partial(slim.separable_conv2d, depth_multiplier=1)
  452. else:
  453. conv_op = slim.conv2d
  454. if use_explicit_padding:
  455. top_down = ops.fixed_padding(top_down, kernel_size)
  456. output_feature_maps_list.append(conv_op(
  457. top_down,
  458. depth, [kernel_size, kernel_size],
  459. scope='smoothing_%d' % (level + 1)))
  460. output_feature_map_keys.append('top_down_%s' % image_features[level][0])
  461. return collections.OrderedDict(reversed(
  462. list(zip(output_feature_map_keys, output_feature_maps_list))))
  463. def pooling_pyramid_feature_maps(base_feature_map_depth, num_layers,
  464. image_features, replace_pool_with_conv=False):
  465. """Generates pooling pyramid feature maps.
  466. The pooling pyramid feature maps is motivated by
  467. multi_resolution_feature_maps. The main difference are that it is simpler and
  468. reduces the number of free parameters.
  469. More specifically:
  470. - Instead of using convolutions to shrink the feature map, it uses max
  471. pooling, therefore totally gets rid of the parameters in convolution.
  472. - By pooling feature from larger map up to a single cell, it generates
  473. features in the same feature space.
  474. - Instead of independently making box predictions from individual maps, it
  475. shares the same classifier across different feature maps, therefore reduces
  476. the "mis-calibration" across different scales.
  477. See go/ppn-detection for more details.
  478. Args:
  479. base_feature_map_depth: Depth of the base feature before the max pooling.
  480. num_layers: Number of layers used to make predictions. They are pooled
  481. from the base feature.
  482. image_features: A dictionary of handles to activation tensors from the
  483. feature extractor.
  484. replace_pool_with_conv: Whether or not to replace pooling operations with
  485. convolutions in the PPN. Default is False.
  486. Returns:
  487. feature_maps: an OrderedDict mapping keys (feature map names) to
  488. tensors where each tensor has shape [batch, height_i, width_i, depth_i].
  489. Raises:
  490. ValueError: image_features does not contain exactly one entry
  491. """
  492. if len(image_features) != 1:
  493. raise ValueError('image_features should be a dictionary of length 1.')
  494. image_features = image_features[image_features.keys()[0]]
  495. feature_map_keys = []
  496. feature_maps = []
  497. feature_map_key = 'Base_Conv2d_1x1_%d' % base_feature_map_depth
  498. if base_feature_map_depth > 0:
  499. image_features = slim.conv2d(
  500. image_features,
  501. base_feature_map_depth,
  502. [1, 1], # kernel size
  503. padding='SAME', stride=1, scope=feature_map_key)
  504. # Add a 1x1 max-pooling node (a no op node) immediately after the conv2d for
  505. # TPU v1 compatibility. Without the following dummy op, TPU runtime
  506. # compiler will combine the convolution with one max-pooling below into a
  507. # single cycle, so getting the conv2d feature becomes impossible.
  508. image_features = slim.max_pool2d(
  509. image_features, [1, 1], padding='SAME', stride=1, scope=feature_map_key)
  510. feature_map_keys.append(feature_map_key)
  511. feature_maps.append(image_features)
  512. feature_map = image_features
  513. if replace_pool_with_conv:
  514. with slim.arg_scope([slim.conv2d], padding='SAME', stride=2):
  515. for i in range(num_layers - 1):
  516. feature_map_key = 'Conv2d_{}_3x3_s2_{}'.format(i,
  517. base_feature_map_depth)
  518. feature_map = slim.conv2d(
  519. feature_map, base_feature_map_depth, [3, 3], scope=feature_map_key)
  520. feature_map_keys.append(feature_map_key)
  521. feature_maps.append(feature_map)
  522. else:
  523. with slim.arg_scope([slim.max_pool2d], padding='SAME', stride=2):
  524. for i in range(num_layers - 1):
  525. feature_map_key = 'MaxPool2d_%d_2x2' % i
  526. feature_map = slim.max_pool2d(
  527. feature_map, [2, 2], padding='SAME', scope=feature_map_key)
  528. feature_map_keys.append(feature_map_key)
  529. feature_maps.append(feature_map)
  530. return collections.OrderedDict(
  531. [(x, y) for (x, y) in zip(feature_map_keys, feature_maps)])