You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

936 lines
36 KiB

6 years ago
  1. # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. # ==============================================================================
  15. """Functions for reading and updating configuration files."""
  16. import os
  17. import tensorflow as tf
  18. from google.protobuf import text_format
  19. from tensorflow.python.lib.io import file_io
  20. from object_detection.protos import eval_pb2
  21. from object_detection.protos import graph_rewriter_pb2
  22. from object_detection.protos import input_reader_pb2
  23. from object_detection.protos import model_pb2
  24. from object_detection.protos import pipeline_pb2
  25. from object_detection.protos import train_pb2
  26. def get_image_resizer_config(model_config):
  27. """Returns the image resizer config from a model config.
  28. Args:
  29. model_config: A model_pb2.DetectionModel.
  30. Returns:
  31. An image_resizer_pb2.ImageResizer.
  32. Raises:
  33. ValueError: If the model type is not recognized.
  34. """
  35. meta_architecture = model_config.WhichOneof("model")
  36. if meta_architecture == "faster_rcnn":
  37. return model_config.faster_rcnn.image_resizer
  38. if meta_architecture == "ssd":
  39. return model_config.ssd.image_resizer
  40. raise ValueError("Unknown model type: {}".format(meta_architecture))
  41. def get_spatial_image_size(image_resizer_config):
  42. """Returns expected spatial size of the output image from a given config.
  43. Args:
  44. image_resizer_config: An image_resizer_pb2.ImageResizer.
  45. Returns:
  46. A list of two integers of the form [height, width]. `height` and `width` are
  47. set -1 if they cannot be determined during graph construction.
  48. Raises:
  49. ValueError: If the model type is not recognized.
  50. """
  51. if image_resizer_config.HasField("fixed_shape_resizer"):
  52. return [
  53. image_resizer_config.fixed_shape_resizer.height,
  54. image_resizer_config.fixed_shape_resizer.width
  55. ]
  56. if image_resizer_config.HasField("keep_aspect_ratio_resizer"):
  57. if image_resizer_config.keep_aspect_ratio_resizer.pad_to_max_dimension:
  58. return [image_resizer_config.keep_aspect_ratio_resizer.max_dimension] * 2
  59. else:
  60. return [-1, -1]
  61. if image_resizer_config.HasField("identity_resizer"):
  62. return [-1, -1]
  63. raise ValueError("Unknown image resizer type.")
  64. def get_configs_from_pipeline_file(pipeline_config_path, config_override=None):
  65. """Reads config from a file containing pipeline_pb2.TrainEvalPipelineConfig.
  66. Args:
  67. pipeline_config_path: Path to pipeline_pb2.TrainEvalPipelineConfig text
  68. proto.
  69. config_override: A pipeline_pb2.TrainEvalPipelineConfig text proto to
  70. override pipeline_config_path.
  71. Returns:
  72. Dictionary of configuration objects. Keys are `model`, `train_config`,
  73. `train_input_config`, `eval_config`, `eval_input_config`. Value are the
  74. corresponding config objects.
  75. """
  76. pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
  77. with tf.gfile.GFile(pipeline_config_path, "r") as f:
  78. proto_str = f.read()
  79. text_format.Merge(proto_str, pipeline_config)
  80. if config_override:
  81. text_format.Merge(config_override, pipeline_config)
  82. return create_configs_from_pipeline_proto(pipeline_config)
  83. def create_configs_from_pipeline_proto(pipeline_config):
  84. """Creates a configs dictionary from pipeline_pb2.TrainEvalPipelineConfig.
  85. Args:
  86. pipeline_config: pipeline_pb2.TrainEvalPipelineConfig proto object.
  87. Returns:
  88. Dictionary of configuration objects. Keys are `model`, `train_config`,
  89. `train_input_config`, `eval_config`, `eval_input_configs`. Value are
  90. the corresponding config objects or list of config objects (only for
  91. eval_input_configs).
  92. """
  93. configs = {}
  94. configs["model"] = pipeline_config.model
  95. configs["train_config"] = pipeline_config.train_config
  96. configs["train_input_config"] = pipeline_config.train_input_reader
  97. configs["eval_config"] = pipeline_config.eval_config
  98. configs["eval_input_configs"] = pipeline_config.eval_input_reader
  99. # Keeps eval_input_config only for backwards compatibility. All clients should
  100. # read eval_input_configs instead.
  101. if configs["eval_input_configs"]:
  102. configs["eval_input_config"] = configs["eval_input_configs"][0]
  103. if pipeline_config.HasField("graph_rewriter"):
  104. configs["graph_rewriter_config"] = pipeline_config.graph_rewriter
  105. return configs
  106. def get_graph_rewriter_config_from_file(graph_rewriter_config_file):
  107. """Parses config for graph rewriter.
  108. Args:
  109. graph_rewriter_config_file: file path to the graph rewriter config.
  110. Returns:
  111. graph_rewriter_pb2.GraphRewriter proto
  112. """
  113. graph_rewriter_config = graph_rewriter_pb2.GraphRewriter()
  114. with tf.gfile.GFile(graph_rewriter_config_file, "r") as f:
  115. text_format.Merge(f.read(), graph_rewriter_config)
  116. return graph_rewriter_config
  117. def create_pipeline_proto_from_configs(configs):
  118. """Creates a pipeline_pb2.TrainEvalPipelineConfig from configs dictionary.
  119. This function performs the inverse operation of
  120. create_configs_from_pipeline_proto().
  121. Args:
  122. configs: Dictionary of configs. See get_configs_from_pipeline_file().
  123. Returns:
  124. A fully populated pipeline_pb2.TrainEvalPipelineConfig.
  125. """
  126. pipeline_config = pipeline_pb2.TrainEvalPipelineConfig()
  127. pipeline_config.model.CopyFrom(configs["model"])
  128. pipeline_config.train_config.CopyFrom(configs["train_config"])
  129. pipeline_config.train_input_reader.CopyFrom(configs["train_input_config"])
  130. pipeline_config.eval_config.CopyFrom(configs["eval_config"])
  131. pipeline_config.eval_input_reader.extend(configs["eval_input_configs"])
  132. if "graph_rewriter_config" in configs:
  133. pipeline_config.graph_rewriter.CopyFrom(configs["graph_rewriter_config"])
  134. return pipeline_config
  135. def save_pipeline_config(pipeline_config, directory):
  136. """Saves a pipeline config text file to disk.
  137. Args:
  138. pipeline_config: A pipeline_pb2.TrainEvalPipelineConfig.
  139. directory: The model directory into which the pipeline config file will be
  140. saved.
  141. """
  142. if not file_io.file_exists(directory):
  143. file_io.recursive_create_dir(directory)
  144. pipeline_config_path = os.path.join(directory, "pipeline.config")
  145. config_text = text_format.MessageToString(pipeline_config)
  146. with tf.gfile.Open(pipeline_config_path, "wb") as f:
  147. tf.logging.info("Writing pipeline config file to %s",
  148. pipeline_config_path)
  149. f.write(config_text)
  150. def get_configs_from_multiple_files(model_config_path="",
  151. train_config_path="",
  152. train_input_config_path="",
  153. eval_config_path="",
  154. eval_input_config_path="",
  155. graph_rewriter_config_path=""):
  156. """Reads training configuration from multiple config files.
  157. Args:
  158. model_config_path: Path to model_pb2.DetectionModel.
  159. train_config_path: Path to train_pb2.TrainConfig.
  160. train_input_config_path: Path to input_reader_pb2.InputReader.
  161. eval_config_path: Path to eval_pb2.EvalConfig.
  162. eval_input_config_path: Path to input_reader_pb2.InputReader.
  163. graph_rewriter_config_path: Path to graph_rewriter_pb2.GraphRewriter.
  164. Returns:
  165. Dictionary of configuration objects. Keys are `model`, `train_config`,
  166. `train_input_config`, `eval_config`, `eval_input_config`. Key/Values are
  167. returned only for valid (non-empty) strings.
  168. """
  169. configs = {}
  170. if model_config_path:
  171. model_config = model_pb2.DetectionModel()
  172. with tf.gfile.GFile(model_config_path, "r") as f:
  173. text_format.Merge(f.read(), model_config)
  174. configs["model"] = model_config
  175. if train_config_path:
  176. train_config = train_pb2.TrainConfig()
  177. with tf.gfile.GFile(train_config_path, "r") as f:
  178. text_format.Merge(f.read(), train_config)
  179. configs["train_config"] = train_config
  180. if train_input_config_path:
  181. train_input_config = input_reader_pb2.InputReader()
  182. with tf.gfile.GFile(train_input_config_path, "r") as f:
  183. text_format.Merge(f.read(), train_input_config)
  184. configs["train_input_config"] = train_input_config
  185. if eval_config_path:
  186. eval_config = eval_pb2.EvalConfig()
  187. with tf.gfile.GFile(eval_config_path, "r") as f:
  188. text_format.Merge(f.read(), eval_config)
  189. configs["eval_config"] = eval_config
  190. if eval_input_config_path:
  191. eval_input_config = input_reader_pb2.InputReader()
  192. with tf.gfile.GFile(eval_input_config_path, "r") as f:
  193. text_format.Merge(f.read(), eval_input_config)
  194. configs["eval_input_configs"] = [eval_input_config]
  195. if graph_rewriter_config_path:
  196. configs["graph_rewriter_config"] = get_graph_rewriter_config_from_file(
  197. graph_rewriter_config_path)
  198. return configs
  199. def get_number_of_classes(model_config):
  200. """Returns the number of classes for a detection model.
  201. Args:
  202. model_config: A model_pb2.DetectionModel.
  203. Returns:
  204. Number of classes.
  205. Raises:
  206. ValueError: If the model type is not recognized.
  207. """
  208. meta_architecture = model_config.WhichOneof("model")
  209. if meta_architecture == "faster_rcnn":
  210. return model_config.faster_rcnn.num_classes
  211. if meta_architecture == "ssd":
  212. return model_config.ssd.num_classes
  213. raise ValueError("Expected the model to be one of 'faster_rcnn' or 'ssd'.")
  214. def get_optimizer_type(train_config):
  215. """Returns the optimizer type for training.
  216. Args:
  217. train_config: A train_pb2.TrainConfig.
  218. Returns:
  219. The type of the optimizer
  220. """
  221. return train_config.optimizer.WhichOneof("optimizer")
  222. def get_learning_rate_type(optimizer_config):
  223. """Returns the learning rate type for training.
  224. Args:
  225. optimizer_config: An optimizer_pb2.Optimizer.
  226. Returns:
  227. The type of the learning rate.
  228. """
  229. return optimizer_config.learning_rate.WhichOneof("learning_rate")
  230. def _is_generic_key(key):
  231. """Determines whether the key starts with a generic config dictionary key."""
  232. for prefix in [
  233. "graph_rewriter_config",
  234. "model",
  235. "train_input_config",
  236. "train_config",
  237. "eval_config"]:
  238. if key.startswith(prefix + "."):
  239. return True
  240. return False
  241. def _check_and_convert_legacy_input_config_key(key):
  242. """Checks key and converts legacy input config update to specific update.
  243. Args:
  244. key: string indicates the target of update operation.
  245. Returns:
  246. is_valid_input_config_key: A boolean indicating whether the input key is to
  247. update input config(s).
  248. key_name: 'eval_input_configs' or 'train_input_config' string if
  249. is_valid_input_config_key is true. None if is_valid_input_config_key is
  250. false.
  251. input_name: always returns None since legacy input config key never
  252. specifies the target input config. Keeping this output only to match the
  253. output form defined for input config update.
  254. field_name: the field name in input config. `key` itself if
  255. is_valid_input_config_key is false.
  256. """
  257. key_name = None
  258. input_name = None
  259. field_name = key
  260. is_valid_input_config_key = True
  261. if field_name == "train_shuffle":
  262. key_name = "train_input_config"
  263. field_name = "shuffle"
  264. elif field_name == "eval_shuffle":
  265. key_name = "eval_input_configs"
  266. field_name = "shuffle"
  267. elif field_name == "train_input_path":
  268. key_name = "train_input_config"
  269. field_name = "input_path"
  270. elif field_name == "eval_input_path":
  271. key_name = "eval_input_configs"
  272. field_name = "input_path"
  273. elif field_name == "append_train_input_path":
  274. key_name = "train_input_config"
  275. field_name = "input_path"
  276. elif field_name == "append_eval_input_path":
  277. key_name = "eval_input_configs"
  278. field_name = "input_path"
  279. else:
  280. is_valid_input_config_key = False
  281. return is_valid_input_config_key, key_name, input_name, field_name
  282. def check_and_parse_input_config_key(configs, key):
  283. """Checks key and returns specific fields if key is valid input config update.
  284. Args:
  285. configs: Dictionary of configuration objects. See outputs from
  286. get_configs_from_pipeline_file() or get_configs_from_multiple_files().
  287. key: string indicates the target of update operation.
  288. Returns:
  289. is_valid_input_config_key: A boolean indicate whether the input key is to
  290. update input config(s).
  291. key_name: 'eval_input_configs' or 'train_input_config' string if
  292. is_valid_input_config_key is true. None if is_valid_input_config_key is
  293. false.
  294. input_name: the name of the input config to be updated. None if
  295. is_valid_input_config_key is false.
  296. field_name: the field name in input config. `key` itself if
  297. is_valid_input_config_key is false.
  298. Raises:
  299. ValueError: when the input key format doesn't match any known formats.
  300. ValueError: if key_name doesn't match 'eval_input_configs' or
  301. 'train_input_config'.
  302. ValueError: if input_name doesn't match any name in train or eval input
  303. configs.
  304. ValueError: if field_name doesn't match any supported fields.
  305. """
  306. key_name = None
  307. input_name = None
  308. field_name = None
  309. fields = key.split(":")
  310. if len(fields) == 1:
  311. field_name = key
  312. return _check_and_convert_legacy_input_config_key(key)
  313. elif len(fields) == 3:
  314. key_name = fields[0]
  315. input_name = fields[1]
  316. field_name = fields[2]
  317. else:
  318. raise ValueError("Invalid key format when overriding configs.")
  319. # Checks if key_name is valid for specific update.
  320. if key_name not in ["eval_input_configs", "train_input_config"]:
  321. raise ValueError("Invalid key_name when overriding input config.")
  322. # Checks if input_name is valid for specific update. For train input config it
  323. # should match configs[key_name].name, for eval input configs it should match
  324. # the name field of one of the eval_input_configs.
  325. if isinstance(configs[key_name], input_reader_pb2.InputReader):
  326. is_valid_input_name = configs[key_name].name == input_name
  327. else:
  328. is_valid_input_name = input_name in [
  329. eval_input_config.name for eval_input_config in configs[key_name]
  330. ]
  331. if not is_valid_input_name:
  332. raise ValueError("Invalid input_name when overriding input config.")
  333. # Checks if field_name is valid for specific update.
  334. if field_name not in [
  335. "input_path", "label_map_path", "shuffle", "mask_type",
  336. "sample_1_of_n_examples"
  337. ]:
  338. raise ValueError("Invalid field_name when overriding input config.")
  339. return True, key_name, input_name, field_name
  340. def merge_external_params_with_configs(configs, hparams=None, kwargs_dict=None):
  341. """Updates `configs` dictionary based on supplied parameters.
  342. This utility is for modifying specific fields in the object detection configs.
  343. Say that one would like to experiment with different learning rates, momentum
  344. values, or batch sizes. Rather than creating a new config text file for each
  345. experiment, one can use a single base config file, and update particular
  346. values.
  347. There are two types of field overrides:
  348. 1. Strategy-based overrides, which update multiple relevant configuration
  349. options. For example, updating `learning_rate` will update both the warmup and
  350. final learning rates.
  351. In this case key can be one of the following formats:
  352. 1. legacy update: single string that indicates the attribute to be
  353. updated. E.g. 'label_map_path', 'eval_input_path', 'shuffle'.
  354. Note that when updating fields (e.g. eval_input_path, eval_shuffle) in
  355. eval_input_configs, the override will only be applied when
  356. eval_input_configs has exactly 1 element.
  357. 2. specific update: colon separated string that indicates which field in
  358. which input_config to update. It should have 3 fields:
  359. - key_name: Name of the input config we should update, either
  360. 'train_input_config' or 'eval_input_configs'
  361. - input_name: a 'name' that can be used to identify elements, especially
  362. when configs[key_name] is a repeated field.
  363. - field_name: name of the field that you want to override.
  364. For example, given configs dict as below:
  365. configs = {
  366. 'model': {...}
  367. 'train_config': {...}
  368. 'train_input_config': {...}
  369. 'eval_config': {...}
  370. 'eval_input_configs': [{ name:"eval_coco", ...},
  371. { name:"eval_voc", ... }]
  372. }
  373. Assume we want to update the input_path of the eval_input_config
  374. whose name is 'eval_coco'. The `key` would then be:
  375. 'eval_input_configs:eval_coco:input_path'
  376. 2. Generic key/value, which update a specific parameter based on namespaced
  377. configuration keys. For example,
  378. `model.ssd.loss.hard_example_miner.max_negatives_per_positive` will update the
  379. hard example miner configuration for an SSD model config. Generic overrides
  380. are automatically detected based on the namespaced keys.
  381. Args:
  382. configs: Dictionary of configuration objects. See outputs from
  383. get_configs_from_pipeline_file() or get_configs_from_multiple_files().
  384. hparams: A `HParams`.
  385. kwargs_dict: Extra keyword arguments that are treated the same way as
  386. attribute/value pairs in `hparams`. Note that hyperparameters with the
  387. same names will override keyword arguments.
  388. Returns:
  389. `configs` dictionary.
  390. Raises:
  391. ValueError: when the key string doesn't match any of its allowed formats.
  392. """
  393. if kwargs_dict is None:
  394. kwargs_dict = {}
  395. if hparams:
  396. kwargs_dict.update(hparams.values())
  397. for key, value in kwargs_dict.items():
  398. tf.logging.info("Maybe overwriting %s: %s", key, value)
  399. # pylint: disable=g-explicit-bool-comparison
  400. if value == "" or value is None:
  401. continue
  402. # pylint: enable=g-explicit-bool-comparison
  403. elif _maybe_update_config_with_key_value(configs, key, value):
  404. continue
  405. elif _is_generic_key(key):
  406. _update_generic(configs, key, value)
  407. else:
  408. tf.logging.info("Ignoring config override key: %s", key)
  409. return configs
  410. def _maybe_update_config_with_key_value(configs, key, value):
  411. """Checks key type and updates `configs` with the key value pair accordingly.
  412. Args:
  413. configs: Dictionary of configuration objects. See outputs from
  414. get_configs_from_pipeline_file() or get_configs_from_multiple_files().
  415. key: String indicates the field(s) to be updated.
  416. value: Value used to override existing field value.
  417. Returns:
  418. A boolean value that indicates whether the override succeeds.
  419. Raises:
  420. ValueError: when the key string doesn't match any of the formats above.
  421. """
  422. is_valid_input_config_key, key_name, input_name, field_name = (
  423. check_and_parse_input_config_key(configs, key))
  424. if is_valid_input_config_key:
  425. update_input_reader_config(
  426. configs,
  427. key_name=key_name,
  428. input_name=input_name,
  429. field_name=field_name,
  430. value=value)
  431. elif field_name == "learning_rate":
  432. _update_initial_learning_rate(configs, value)
  433. elif field_name == "batch_size":
  434. _update_batch_size(configs, value)
  435. elif field_name == "momentum_optimizer_value":
  436. _update_momentum_optimizer_value(configs, value)
  437. elif field_name == "classification_localization_weight_ratio":
  438. # Localization weight is fixed to 1.0.
  439. _update_classification_localization_weight_ratio(configs, value)
  440. elif field_name == "focal_loss_gamma":
  441. _update_focal_loss_gamma(configs, value)
  442. elif field_name == "focal_loss_alpha":
  443. _update_focal_loss_alpha(configs, value)
  444. elif field_name == "train_steps":
  445. _update_train_steps(configs, value)
  446. elif field_name == "label_map_path":
  447. _update_label_map_path(configs, value)
  448. elif field_name == "mask_type":
  449. _update_mask_type(configs, value)
  450. elif field_name == "sample_1_of_n_eval_examples":
  451. _update_all_eval_input_configs(configs, "sample_1_of_n_examples", value)
  452. elif field_name == "eval_num_epochs":
  453. _update_all_eval_input_configs(configs, "num_epochs", value)
  454. elif field_name == "eval_with_moving_averages":
  455. _update_use_moving_averages(configs, value)
  456. elif field_name == "retain_original_images_in_eval":
  457. _update_retain_original_images(configs["eval_config"], value)
  458. elif field_name == "use_bfloat16":
  459. _update_use_bfloat16(configs, value)
  460. else:
  461. return False
  462. return True
  463. def _update_tf_record_input_path(input_config, input_path):
  464. """Updates input configuration to reflect a new input path.
  465. The input_config object is updated in place, and hence not returned.
  466. Args:
  467. input_config: A input_reader_pb2.InputReader.
  468. input_path: A path to data or list of paths.
  469. Raises:
  470. TypeError: if input reader type is not `tf_record_input_reader`.
  471. """
  472. input_reader_type = input_config.WhichOneof("input_reader")
  473. if input_reader_type == "tf_record_input_reader":
  474. input_config.tf_record_input_reader.ClearField("input_path")
  475. if isinstance(input_path, list):
  476. input_config.tf_record_input_reader.input_path.extend(input_path)
  477. else:
  478. input_config.tf_record_input_reader.input_path.append(input_path)
  479. else:
  480. raise TypeError("Input reader type must be `tf_record_input_reader`.")
  481. def update_input_reader_config(configs,
  482. key_name=None,
  483. input_name=None,
  484. field_name=None,
  485. value=None,
  486. path_updater=_update_tf_record_input_path):
  487. """Updates specified input reader config field.
  488. Args:
  489. configs: Dictionary of configuration objects. See outputs from
  490. get_configs_from_pipeline_file() or get_configs_from_multiple_files().
  491. key_name: Name of the input config we should update, either
  492. 'train_input_config' or 'eval_input_configs'
  493. input_name: String name used to identify input config to update with. Should
  494. be either None or value of the 'name' field in one of the input reader
  495. configs.
  496. field_name: Field name in input_reader_pb2.InputReader.
  497. value: Value used to override existing field value.
  498. path_updater: helper function used to update the input path. Only used when
  499. field_name is "input_path".
  500. Raises:
  501. ValueError: when input field_name is None.
  502. ValueError: when input_name is None and number of eval_input_readers does
  503. not equal to 1.
  504. """
  505. if isinstance(configs[key_name], input_reader_pb2.InputReader):
  506. # Updates singular input_config object.
  507. target_input_config = configs[key_name]
  508. if field_name == "input_path":
  509. path_updater(input_config=target_input_config, input_path=value)
  510. else:
  511. setattr(target_input_config, field_name, value)
  512. elif input_name is None and len(configs[key_name]) == 1:
  513. # Updates first (and the only) object of input_config list.
  514. target_input_config = configs[key_name][0]
  515. if field_name == "input_path":
  516. path_updater(input_config=target_input_config, input_path=value)
  517. else:
  518. setattr(target_input_config, field_name, value)
  519. elif input_name is not None and len(configs[key_name]):
  520. # Updates input_config whose name matches input_name.
  521. update_count = 0
  522. for input_config in configs[key_name]:
  523. if input_config.name == input_name:
  524. setattr(input_config, field_name, value)
  525. update_count = update_count + 1
  526. if not update_count:
  527. raise ValueError(
  528. "Input name {} not found when overriding.".format(input_name))
  529. elif update_count > 1:
  530. raise ValueError("Duplicate input name found when overriding.")
  531. else:
  532. key_name = "None" if key_name is None else key_name
  533. input_name = "None" if input_name is None else input_name
  534. field_name = "None" if field_name is None else field_name
  535. raise ValueError("Unknown input config overriding: "
  536. "key_name:{}, input_name:{}, field_name:{}.".format(
  537. key_name, input_name, field_name))
  538. def _update_initial_learning_rate(configs, learning_rate):
  539. """Updates `configs` to reflect the new initial learning rate.
  540. This function updates the initial learning rate. For learning rate schedules,
  541. all other defined learning rates in the pipeline config are scaled to maintain
  542. their same ratio with the initial learning rate.
  543. The configs dictionary is updated in place, and hence not returned.
  544. Args:
  545. configs: Dictionary of configuration objects. See outputs from
  546. get_configs_from_pipeline_file() or get_configs_from_multiple_files().
  547. learning_rate: Initial learning rate for optimizer.
  548. Raises:
  549. TypeError: if optimizer type is not supported, or if learning rate type is
  550. not supported.
  551. """
  552. optimizer_type = get_optimizer_type(configs["train_config"])
  553. if optimizer_type == "rms_prop_optimizer":
  554. optimizer_config = configs["train_config"].optimizer.rms_prop_optimizer
  555. elif optimizer_type == "momentum_optimizer":
  556. optimizer_config = configs["train_config"].optimizer.momentum_optimizer
  557. elif optimizer_type == "adam_optimizer":
  558. optimizer_config = configs["train_config"].optimizer.adam_optimizer
  559. else:
  560. raise TypeError("Optimizer %s is not supported." % optimizer_type)
  561. learning_rate_type = get_learning_rate_type(optimizer_config)
  562. if learning_rate_type == "constant_learning_rate":
  563. constant_lr = optimizer_config.learning_rate.constant_learning_rate
  564. constant_lr.learning_rate = learning_rate
  565. elif learning_rate_type == "exponential_decay_learning_rate":
  566. exponential_lr = (
  567. optimizer_config.learning_rate.exponential_decay_learning_rate)
  568. exponential_lr.initial_learning_rate = learning_rate
  569. elif learning_rate_type == "manual_step_learning_rate":
  570. manual_lr = optimizer_config.learning_rate.manual_step_learning_rate
  571. original_learning_rate = manual_lr.initial_learning_rate
  572. learning_rate_scaling = float(learning_rate) / original_learning_rate
  573. manual_lr.initial_learning_rate = learning_rate
  574. for schedule in manual_lr.schedule:
  575. schedule.learning_rate *= learning_rate_scaling
  576. elif learning_rate_type == "cosine_decay_learning_rate":
  577. cosine_lr = optimizer_config.learning_rate.cosine_decay_learning_rate
  578. learning_rate_base = cosine_lr.learning_rate_base
  579. warmup_learning_rate = cosine_lr.warmup_learning_rate
  580. warmup_scale_factor = warmup_learning_rate / learning_rate_base
  581. cosine_lr.learning_rate_base = learning_rate
  582. cosine_lr.warmup_learning_rate = warmup_scale_factor * learning_rate
  583. else:
  584. raise TypeError("Learning rate %s is not supported." % learning_rate_type)
  585. def _update_batch_size(configs, batch_size):
  586. """Updates `configs` to reflect the new training batch size.
  587. The configs dictionary is updated in place, and hence not returned.
  588. Args:
  589. configs: Dictionary of configuration objects. See outputs from
  590. get_configs_from_pipeline_file() or get_configs_from_multiple_files().
  591. batch_size: Batch size to use for training (Ideally a power of 2). Inputs
  592. are rounded, and capped to be 1 or greater.
  593. """
  594. configs["train_config"].batch_size = max(1, int(round(batch_size)))
  595. def _validate_message_has_field(message, field):
  596. if not message.HasField(field):
  597. raise ValueError("Expecting message to have field %s" % field)
  598. def _update_generic(configs, key, value):
  599. """Update a pipeline configuration parameter based on a generic key/value.
  600. Args:
  601. configs: Dictionary of pipeline configuration protos.
  602. key: A string key, dot-delimited to represent the argument key.
  603. e.g. "model.ssd.train_config.batch_size"
  604. value: A value to set the argument to. The type of the value must match the
  605. type for the protocol buffer. Note that setting the wrong type will
  606. result in a TypeError.
  607. e.g. 42
  608. Raises:
  609. ValueError if the message key does not match the existing proto fields.
  610. TypeError the value type doesn't match the protobuf field type.
  611. """
  612. fields = key.split(".")
  613. first_field = fields.pop(0)
  614. last_field = fields.pop()
  615. message = configs[first_field]
  616. for field in fields:
  617. _validate_message_has_field(message, field)
  618. message = getattr(message, field)
  619. _validate_message_has_field(message, last_field)
  620. setattr(message, last_field, value)
  621. def _update_momentum_optimizer_value(configs, momentum):
  622. """Updates `configs` to reflect the new momentum value.
  623. Momentum is only supported for RMSPropOptimizer and MomentumOptimizer. For any
  624. other optimizer, no changes take place. The configs dictionary is updated in
  625. place, and hence not returned.
  626. Args:
  627. configs: Dictionary of configuration objects. See outputs from
  628. get_configs_from_pipeline_file() or get_configs_from_multiple_files().
  629. momentum: New momentum value. Values are clipped at 0.0 and 1.0.
  630. Raises:
  631. TypeError: If the optimizer type is not `rms_prop_optimizer` or
  632. `momentum_optimizer`.
  633. """
  634. optimizer_type = get_optimizer_type(configs["train_config"])
  635. if optimizer_type == "rms_prop_optimizer":
  636. optimizer_config = configs["train_config"].optimizer.rms_prop_optimizer
  637. elif optimizer_type == "momentum_optimizer":
  638. optimizer_config = configs["train_config"].optimizer.momentum_optimizer
  639. else:
  640. raise TypeError("Optimizer type must be one of `rms_prop_optimizer` or "
  641. "`momentum_optimizer`.")
  642. optimizer_config.momentum_optimizer_value = min(max(0.0, momentum), 1.0)
  643. def _update_classification_localization_weight_ratio(configs, ratio):
  644. """Updates the classification/localization weight loss ratio.
  645. Detection models usually define a loss weight for both classification and
  646. objectness. This function updates the weights such that the ratio between
  647. classification weight to localization weight is the ratio provided.
  648. Arbitrarily, localization weight is set to 1.0.
  649. Note that in the case of Faster R-CNN, this same ratio is applied to the first
  650. stage objectness loss weight relative to localization loss weight.
  651. The configs dictionary is updated in place, and hence not returned.
  652. Args:
  653. configs: Dictionary of configuration objects. See outputs from
  654. get_configs_from_pipeline_file() or get_configs_from_multiple_files().
  655. ratio: Desired ratio of classification (and/or objectness) loss weight to
  656. localization loss weight.
  657. """
  658. meta_architecture = configs["model"].WhichOneof("model")
  659. if meta_architecture == "faster_rcnn":
  660. model = configs["model"].faster_rcnn
  661. model.first_stage_localization_loss_weight = 1.0
  662. model.first_stage_objectness_loss_weight = ratio
  663. model.second_stage_localization_loss_weight = 1.0
  664. model.second_stage_classification_loss_weight = ratio
  665. if meta_architecture == "ssd":
  666. model = configs["model"].ssd
  667. model.loss.localization_weight = 1.0
  668. model.loss.classification_weight = ratio
  669. def _get_classification_loss(model_config):
  670. """Returns the classification loss for a model."""
  671. meta_architecture = model_config.WhichOneof("model")
  672. if meta_architecture == "faster_rcnn":
  673. model = model_config.faster_rcnn
  674. classification_loss = model.second_stage_classification_loss
  675. elif meta_architecture == "ssd":
  676. model = model_config.ssd
  677. classification_loss = model.loss.classification_loss
  678. else:
  679. raise TypeError("Did not recognize the model architecture.")
  680. return classification_loss
  681. def _update_focal_loss_gamma(configs, gamma):
  682. """Updates the gamma value for a sigmoid focal loss.
  683. The configs dictionary is updated in place, and hence not returned.
  684. Args:
  685. configs: Dictionary of configuration objects. See outputs from
  686. get_configs_from_pipeline_file() or get_configs_from_multiple_files().
  687. gamma: Exponent term in focal loss.
  688. Raises:
  689. TypeError: If the classification loss is not `weighted_sigmoid_focal`.
  690. """
  691. classification_loss = _get_classification_loss(configs["model"])
  692. classification_loss_type = classification_loss.WhichOneof(
  693. "classification_loss")
  694. if classification_loss_type != "weighted_sigmoid_focal":
  695. raise TypeError("Classification loss must be `weighted_sigmoid_focal`.")
  696. classification_loss.weighted_sigmoid_focal.gamma = gamma
  697. def _update_focal_loss_alpha(configs, alpha):
  698. """Updates the alpha value for a sigmoid focal loss.
  699. The configs dictionary is updated in place, and hence not returned.
  700. Args:
  701. configs: Dictionary of configuration objects. See outputs from
  702. get_configs_from_pipeline_file() or get_configs_from_multiple_files().
  703. alpha: Class weight multiplier for sigmoid loss.
  704. Raises:
  705. TypeError: If the classification loss is not `weighted_sigmoid_focal`.
  706. """
  707. classification_loss = _get_classification_loss(configs["model"])
  708. classification_loss_type = classification_loss.WhichOneof(
  709. "classification_loss")
  710. if classification_loss_type != "weighted_sigmoid_focal":
  711. raise TypeError("Classification loss must be `weighted_sigmoid_focal`.")
  712. classification_loss.weighted_sigmoid_focal.alpha = alpha
  713. def _update_train_steps(configs, train_steps):
  714. """Updates `configs` to reflect new number of training steps."""
  715. configs["train_config"].num_steps = int(train_steps)
  716. def _update_eval_steps(configs, eval_steps):
  717. """Updates `configs` to reflect new number of eval steps per evaluation."""
  718. configs["eval_config"].num_examples = int(eval_steps)
  719. def _update_all_eval_input_configs(configs, field, value):
  720. """Updates the content of `field` with `value` for all eval input configs."""
  721. for eval_input_config in configs["eval_input_configs"]:
  722. setattr(eval_input_config, field, value)
  723. def _update_label_map_path(configs, label_map_path):
  724. """Updates the label map path for both train and eval input readers.
  725. The configs dictionary is updated in place, and hence not returned.
  726. Args:
  727. configs: Dictionary of configuration objects. See outputs from
  728. get_configs_from_pipeline_file() or get_configs_from_multiple_files().
  729. label_map_path: New path to `StringIntLabelMap` pbtxt file.
  730. """
  731. configs["train_input_config"].label_map_path = label_map_path
  732. _update_all_eval_input_configs(configs, "label_map_path", label_map_path)
  733. def _update_mask_type(configs, mask_type):
  734. """Updates the mask type for both train and eval input readers.
  735. The configs dictionary is updated in place, and hence not returned.
  736. Args:
  737. configs: Dictionary of configuration objects. See outputs from
  738. get_configs_from_pipeline_file() or get_configs_from_multiple_files().
  739. mask_type: A string name representing a value of
  740. input_reader_pb2.InstanceMaskType
  741. """
  742. configs["train_input_config"].mask_type = mask_type
  743. _update_all_eval_input_configs(configs, "mask_type", mask_type)
  744. def _update_use_moving_averages(configs, use_moving_averages):
  745. """Updates the eval config option to use or not use moving averages.
  746. The configs dictionary is updated in place, and hence not returned.
  747. Args:
  748. configs: Dictionary of configuration objects. See outputs from
  749. get_configs_from_pipeline_file() or get_configs_from_multiple_files().
  750. use_moving_averages: Boolean indicating whether moving average variables
  751. should be loaded during evaluation.
  752. """
  753. configs["eval_config"].use_moving_averages = use_moving_averages
  754. def _update_retain_original_images(eval_config, retain_original_images):
  755. """Updates eval config with option to retain original images.
  756. The eval_config object is updated in place, and hence not returned.
  757. Args:
  758. eval_config: A eval_pb2.EvalConfig.
  759. retain_original_images: Boolean indicating whether to retain original images
  760. in eval mode.
  761. """
  762. eval_config.retain_original_images = retain_original_images
  763. def _update_use_bfloat16(configs, use_bfloat16):
  764. """Updates `configs` to reflect the new setup on whether to use bfloat16.
  765. The configs dictionary is updated in place, and hence not returned.
  766. Args:
  767. configs: Dictionary of configuration objects. See outputs from
  768. get_configs_from_pipeline_file() or get_configs_from_multiple_files().
  769. use_bfloat16: A bool, indicating whether to use bfloat16 for training.
  770. """
  771. configs["train_config"].use_bfloat16 = use_bfloat16