You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

418 lines
16 KiB

6 years ago
  1. # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. """Builder function to construct tf-slim arg_scope for convolution, fc ops."""
  16. import tensorflow as tf
  17. from object_detection.core import freezable_batch_norm
  18. from object_detection.protos import hyperparams_pb2
  19. from object_detection.utils import context_manager
  20. slim = tf.contrib.slim
  21. class KerasLayerHyperparams(object):
  22. """
  23. A hyperparameter configuration object for Keras layers used in
  24. Object Detection models.
  25. """
  26. def __init__(self, hyperparams_config):
  27. """Builds keras hyperparameter config for layers based on the proto config.
  28. It automatically converts from Slim layer hyperparameter configs to
  29. Keras layer hyperparameters. Namely, it:
  30. - Builds Keras initializers/regularizers instead of Slim ones
  31. - sets weights_regularizer/initializer to kernel_regularizer/initializer
  32. - converts batchnorm decay to momentum
  33. - converts Slim l2 regularizer weights to the equivalent Keras l2 weights
  34. Contains a hyperparameter configuration for ops that specifies kernel
  35. initializer, kernel regularizer, activation. Also contains parameters for
  36. batch norm operators based on the configuration.
  37. Note that if the batch_norm parameters are not specified in the config
  38. (i.e. left to default) then batch norm is excluded from the config.
  39. Args:
  40. hyperparams_config: hyperparams.proto object containing
  41. hyperparameters.
  42. Raises:
  43. ValueError: if hyperparams_config is not of type hyperparams.Hyperparams.
  44. """
  45. if not isinstance(hyperparams_config,
  46. hyperparams_pb2.Hyperparams):
  47. raise ValueError('hyperparams_config not of type '
  48. 'hyperparams_pb.Hyperparams.')
  49. self._batch_norm_params = None
  50. if hyperparams_config.HasField('batch_norm'):
  51. self._batch_norm_params = _build_keras_batch_norm_params(
  52. hyperparams_config.batch_norm)
  53. self._activation_fn = _build_activation_fn(hyperparams_config.activation)
  54. # TODO(kaftan): Unclear if these kwargs apply to separable & depthwise conv
  55. # (Those might use depthwise_* instead of kernel_*)
  56. # We should probably switch to using build_conv2d_layer and
  57. # build_depthwise_conv2d_layer methods instead.
  58. self._op_params = {
  59. 'kernel_regularizer': _build_keras_regularizer(
  60. hyperparams_config.regularizer),
  61. 'kernel_initializer': _build_initializer(
  62. hyperparams_config.initializer, build_for_keras=True),
  63. 'activation': _build_activation_fn(hyperparams_config.activation)
  64. }
  65. def use_batch_norm(self):
  66. return self._batch_norm_params is not None
  67. def batch_norm_params(self, **overrides):
  68. """Returns a dict containing batchnorm layer construction hyperparameters.
  69. Optionally overrides values in the batchnorm hyperparam dict. Overrides
  70. only apply to individual calls of this method, and do not affect
  71. future calls.
  72. Args:
  73. **overrides: keyword arguments to override in the hyperparams dictionary
  74. Returns: dict containing the layer construction keyword arguments, with
  75. values overridden by the `overrides` keyword arguments.
  76. """
  77. if self._batch_norm_params is None:
  78. new_batch_norm_params = dict()
  79. else:
  80. new_batch_norm_params = self._batch_norm_params.copy()
  81. new_batch_norm_params.update(overrides)
  82. return new_batch_norm_params
  83. def build_batch_norm(self, training=None, **overrides):
  84. """Returns a Batch Normalization layer with the appropriate hyperparams.
  85. If the hyperparams are configured to not use batch normalization,
  86. this will return a Keras Lambda layer that only applies tf.Identity,
  87. without doing any normalization.
  88. Optionally overrides values in the batch_norm hyperparam dict. Overrides
  89. only apply to individual calls of this method, and do not affect
  90. future calls.
  91. Args:
  92. training: if True, the normalization layer will normalize using the batch
  93. statistics. If False, the normalization layer will be frozen and will
  94. act as if it is being used for inference. If None, the layer
  95. will look up the Keras learning phase at `call` time to decide what to
  96. do.
  97. **overrides: batch normalization construction args to override from the
  98. batch_norm hyperparams dictionary.
  99. Returns: Either a FreezableBatchNorm layer (if use_batch_norm() is True),
  100. or a Keras Lambda layer that applies the identity (if use_batch_norm()
  101. is False)
  102. """
  103. if self.use_batch_norm():
  104. return freezable_batch_norm.FreezableBatchNorm(
  105. training=training,
  106. **self.batch_norm_params(**overrides)
  107. )
  108. else:
  109. return tf.keras.layers.Lambda(tf.identity)
  110. def build_activation_layer(self, name='activation'):
  111. """Returns a Keras layer that applies the desired activation function.
  112. Args:
  113. name: The name to assign the Keras layer.
  114. Returns: A Keras lambda layer that applies the activation function
  115. specified in the hyperparam config, or applies the identity if the
  116. activation function is None.
  117. """
  118. if self._activation_fn:
  119. return tf.keras.layers.Lambda(self._activation_fn, name=name)
  120. else:
  121. return tf.keras.layers.Lambda(tf.identity, name=name)
  122. def params(self, include_activation=False, **overrides):
  123. """Returns a dict containing the layer construction hyperparameters to use.
  124. Optionally overrides values in the returned dict. Overrides
  125. only apply to individual calls of this method, and do not affect
  126. future calls.
  127. Args:
  128. include_activation: If False, activation in the returned dictionary will
  129. be set to `None`, and the activation must be applied via a separate
  130. layer created by `build_activation_layer`. If True, `activation` in the
  131. output param dictionary will be set to the activation function
  132. specified in the hyperparams config.
  133. **overrides: keyword arguments to override in the hyperparams dictionary.
  134. Returns: dict containing the layer construction keyword arguments, with
  135. values overridden by the `overrides` keyword arguments.
  136. """
  137. new_params = self._op_params.copy()
  138. new_params['activation'] = None
  139. if include_activation:
  140. new_params['activation'] = self._activation_fn
  141. if self.use_batch_norm() and self.batch_norm_params()['center']:
  142. new_params['use_bias'] = False
  143. else:
  144. new_params['use_bias'] = True
  145. new_params.update(**overrides)
  146. return new_params
  147. def build(hyperparams_config, is_training):
  148. """Builds tf-slim arg_scope for convolution ops based on the config.
  149. Returns an arg_scope to use for convolution ops containing weights
  150. initializer, weights regularizer, activation function, batch norm function
  151. and batch norm parameters based on the configuration.
  152. Note that if no normalization parameters are specified in the config,
  153. (i.e. left to default) then both batch norm and group norm are excluded
  154. from the arg_scope.
  155. The batch norm parameters are set for updates based on `is_training` argument
  156. and conv_hyperparams_config.batch_norm.train parameter. During training, they
  157. are updated only if batch_norm.train parameter is true. However, during eval,
  158. no updates are made to the batch norm variables. In both cases, their current
  159. values are used during forward pass.
  160. Args:
  161. hyperparams_config: hyperparams.proto object containing
  162. hyperparameters.
  163. is_training: Whether the network is in training mode.
  164. Returns:
  165. arg_scope_fn: A function to construct tf-slim arg_scope containing
  166. hyperparameters for ops.
  167. Raises:
  168. ValueError: if hyperparams_config is not of type hyperparams.Hyperparams.
  169. """
  170. if not isinstance(hyperparams_config,
  171. hyperparams_pb2.Hyperparams):
  172. raise ValueError('hyperparams_config not of type '
  173. 'hyperparams_pb.Hyperparams.')
  174. normalizer_fn = None
  175. batch_norm_params = None
  176. if hyperparams_config.HasField('batch_norm'):
  177. normalizer_fn = slim.batch_norm
  178. batch_norm_params = _build_batch_norm_params(
  179. hyperparams_config.batch_norm, is_training)
  180. if hyperparams_config.HasField('group_norm'):
  181. normalizer_fn = tf.contrib.layers.group_norm
  182. affected_ops = [slim.conv2d, slim.separable_conv2d, slim.conv2d_transpose]
  183. if hyperparams_config.HasField('op') and (
  184. hyperparams_config.op == hyperparams_pb2.Hyperparams.FC):
  185. affected_ops = [slim.fully_connected]
  186. def scope_fn():
  187. with (slim.arg_scope([slim.batch_norm], **batch_norm_params)
  188. if batch_norm_params is not None else
  189. context_manager.IdentityContextManager()):
  190. with slim.arg_scope(
  191. affected_ops,
  192. weights_regularizer=_build_slim_regularizer(
  193. hyperparams_config.regularizer),
  194. weights_initializer=_build_initializer(
  195. hyperparams_config.initializer),
  196. activation_fn=_build_activation_fn(hyperparams_config.activation),
  197. normalizer_fn=normalizer_fn) as sc:
  198. return sc
  199. return scope_fn
  200. def _build_activation_fn(activation_fn):
  201. """Builds a callable activation from config.
  202. Args:
  203. activation_fn: hyperparams_pb2.Hyperparams.activation
  204. Returns:
  205. Callable activation function.
  206. Raises:
  207. ValueError: On unknown activation function.
  208. """
  209. if activation_fn == hyperparams_pb2.Hyperparams.NONE:
  210. return None
  211. if activation_fn == hyperparams_pb2.Hyperparams.RELU:
  212. return tf.nn.relu
  213. if activation_fn == hyperparams_pb2.Hyperparams.RELU_6:
  214. return tf.nn.relu6
  215. raise ValueError('Unknown activation function: {}'.format(activation_fn))
  216. def _build_slim_regularizer(regularizer):
  217. """Builds a tf-slim regularizer from config.
  218. Args:
  219. regularizer: hyperparams_pb2.Hyperparams.regularizer proto.
  220. Returns:
  221. tf-slim regularizer.
  222. Raises:
  223. ValueError: On unknown regularizer.
  224. """
  225. regularizer_oneof = regularizer.WhichOneof('regularizer_oneof')
  226. if regularizer_oneof == 'l1_regularizer':
  227. return slim.l1_regularizer(scale=float(regularizer.l1_regularizer.weight))
  228. if regularizer_oneof == 'l2_regularizer':
  229. return slim.l2_regularizer(scale=float(regularizer.l2_regularizer.weight))
  230. if regularizer_oneof is None:
  231. return None
  232. raise ValueError('Unknown regularizer function: {}'.format(regularizer_oneof))
  233. def _build_keras_regularizer(regularizer):
  234. """Builds a keras regularizer from config.
  235. Args:
  236. regularizer: hyperparams_pb2.Hyperparams.regularizer proto.
  237. Returns:
  238. Keras regularizer.
  239. Raises:
  240. ValueError: On unknown regularizer.
  241. """
  242. regularizer_oneof = regularizer.WhichOneof('regularizer_oneof')
  243. if regularizer_oneof == 'l1_regularizer':
  244. return tf.keras.regularizers.l1(float(regularizer.l1_regularizer.weight))
  245. if regularizer_oneof == 'l2_regularizer':
  246. # The Keras L2 regularizer weight differs from the Slim L2 regularizer
  247. # weight by a factor of 2
  248. return tf.keras.regularizers.l2(
  249. float(regularizer.l2_regularizer.weight * 0.5))
  250. raise ValueError('Unknown regularizer function: {}'.format(regularizer_oneof))
  251. def _build_initializer(initializer, build_for_keras=False):
  252. """Build a tf initializer from config.
  253. Args:
  254. initializer: hyperparams_pb2.Hyperparams.regularizer proto.
  255. build_for_keras: Whether the initializers should be built for Keras
  256. operators. If false builds for Slim.
  257. Returns:
  258. tf initializer.
  259. Raises:
  260. ValueError: On unknown initializer.
  261. """
  262. initializer_oneof = initializer.WhichOneof('initializer_oneof')
  263. if initializer_oneof == 'truncated_normal_initializer':
  264. return tf.truncated_normal_initializer(
  265. mean=initializer.truncated_normal_initializer.mean,
  266. stddev=initializer.truncated_normal_initializer.stddev)
  267. if initializer_oneof == 'random_normal_initializer':
  268. return tf.random_normal_initializer(
  269. mean=initializer.random_normal_initializer.mean,
  270. stddev=initializer.random_normal_initializer.stddev)
  271. if initializer_oneof == 'variance_scaling_initializer':
  272. enum_descriptor = (hyperparams_pb2.VarianceScalingInitializer.
  273. DESCRIPTOR.enum_types_by_name['Mode'])
  274. mode = enum_descriptor.values_by_number[initializer.
  275. variance_scaling_initializer.
  276. mode].name
  277. if build_for_keras:
  278. if initializer.variance_scaling_initializer.uniform:
  279. return tf.variance_scaling_initializer(
  280. scale=initializer.variance_scaling_initializer.factor,
  281. mode=mode.lower(),
  282. distribution='uniform')
  283. else:
  284. # In TF 1.9 release and earlier, the truncated_normal distribution was
  285. # not supported correctly. So, in these earlier versions of tensorflow,
  286. # the ValueError will be raised, and we manually truncate the
  287. # distribution scale.
  288. #
  289. # It is insufficient to just set distribution to `normal` from the
  290. # start, because the `normal` distribution in newer Tensorflow versions
  291. # creates a truncated distribution, whereas it created untruncated
  292. # distributions in older versions.
  293. try:
  294. return tf.variance_scaling_initializer(
  295. scale=initializer.variance_scaling_initializer.factor,
  296. mode=mode.lower(),
  297. distribution='truncated_normal')
  298. except ValueError:
  299. truncate_constant = 0.87962566103423978
  300. truncated_scale = initializer.variance_scaling_initializer.factor / (
  301. truncate_constant * truncate_constant
  302. )
  303. return tf.variance_scaling_initializer(
  304. scale=truncated_scale,
  305. mode=mode.lower(),
  306. distribution='normal')
  307. else:
  308. return slim.variance_scaling_initializer(
  309. factor=initializer.variance_scaling_initializer.factor,
  310. mode=mode,
  311. uniform=initializer.variance_scaling_initializer.uniform)
  312. raise ValueError('Unknown initializer function: {}'.format(
  313. initializer_oneof))
  314. def _build_batch_norm_params(batch_norm, is_training):
  315. """Build a dictionary of batch_norm params from config.
  316. Args:
  317. batch_norm: hyperparams_pb2.ConvHyperparams.batch_norm proto.
  318. is_training: Whether the models is in training mode.
  319. Returns:
  320. A dictionary containing batch_norm parameters.
  321. """
  322. batch_norm_params = {
  323. 'decay': batch_norm.decay,
  324. 'center': batch_norm.center,
  325. 'scale': batch_norm.scale,
  326. 'epsilon': batch_norm.epsilon,
  327. # Remove is_training parameter from here and deprecate it in the proto
  328. # once we refactor Faster RCNN models to set is_training through an outer
  329. # arg_scope in the meta architecture.
  330. 'is_training': is_training and batch_norm.train,
  331. }
  332. return batch_norm_params
  333. def _build_keras_batch_norm_params(batch_norm):
  334. """Build a dictionary of Keras BatchNormalization params from config.
  335. Args:
  336. batch_norm: hyperparams_pb2.ConvHyperparams.batch_norm proto.
  337. Returns:
  338. A dictionary containing Keras BatchNormalization parameters.
  339. """
  340. # Note: Although decay is defined to be 1 - momentum in batch_norm,
  341. # decay in the slim batch_norm layers was erroneously defined and is
  342. # actually the same as momentum in the Keras batch_norm layers.
  343. # For context, see: github.com/keras-team/keras/issues/6839
  344. batch_norm_params = {
  345. 'momentum': batch_norm.decay,
  346. 'center': batch_norm.center,
  347. 'scale': batch_norm.scale,
  348. 'epsilon': batch_norm.epsilon,
  349. }
  350. return batch_norm_params