You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

127 lines
4.0 KiB

6 years ago
  1. syntax = "proto2";
  2. package object_detection.protos;
  3. // Configuration proto for the convolution op hyperparameters to use in the
  4. // object detection pipeline.
  5. message Hyperparams {
  6. // Operations affected by hyperparameters.
  7. enum Op {
  8. // Convolution, Separable Convolution, Convolution transpose.
  9. CONV = 1;
  10. // Fully connected
  11. FC = 2;
  12. }
  13. optional Op op = 1 [default = CONV];
  14. // Regularizer for the weights of the convolution op.
  15. optional Regularizer regularizer = 2;
  16. // Initializer for the weights of the convolution op.
  17. optional Initializer initializer = 3;
  18. // Type of activation to apply after convolution.
  19. enum Activation {
  20. // Use None (no activation)
  21. NONE = 0;
  22. // Use tf.nn.relu
  23. RELU = 1;
  24. // Use tf.nn.relu6
  25. RELU_6 = 2;
  26. }
  27. optional Activation activation = 4 [default = RELU];
  28. oneof normalizer_oneof {
  29. // Note that if nothing below is selected, then no normalization is applied
  30. // BatchNorm hyperparameters.
  31. BatchNorm batch_norm = 5;
  32. // GroupNorm hyperparameters. This is only supported on a subset of models.
  33. // Note that the current implementation of group norm instantiated in
  34. // tf.contrib.group.layers.group_norm() only supports fixed_size_resizer
  35. // for image preprocessing.
  36. GroupNorm group_norm = 7;
  37. }
  38. // Whether depthwise convolutions should be regularized. If this parameter is
  39. // NOT set then the conv hyperparams will default to the parent scope.
  40. optional bool regularize_depthwise = 6 [default = false];
  41. }
  42. // Proto with one-of field for regularizers.
  43. message Regularizer {
  44. oneof regularizer_oneof {
  45. L1Regularizer l1_regularizer = 1;
  46. L2Regularizer l2_regularizer = 2;
  47. }
  48. }
  49. // Configuration proto for L1 Regularizer.
  50. // See https://www.tensorflow.org/api_docs/python/tf/contrib/layers/l1_regularizer
  51. message L1Regularizer {
  52. optional float weight = 1 [default = 1.0];
  53. }
  54. // Configuration proto for L2 Regularizer.
  55. // See https://www.tensorflow.org/api_docs/python/tf/contrib/layers/l2_regularizer
  56. message L2Regularizer {
  57. optional float weight = 1 [default = 1.0];
  58. }
  59. // Proto with one-of field for initializers.
  60. message Initializer {
  61. oneof initializer_oneof {
  62. TruncatedNormalInitializer truncated_normal_initializer = 1;
  63. VarianceScalingInitializer variance_scaling_initializer = 2;
  64. RandomNormalInitializer random_normal_initializer = 3;
  65. }
  66. }
  67. // Configuration proto for truncated normal initializer. See
  68. // https://www.tensorflow.org/api_docs/python/tf/truncated_normal_initializer
  69. message TruncatedNormalInitializer {
  70. optional float mean = 1 [default = 0.0];
  71. optional float stddev = 2 [default = 1.0];
  72. }
  73. // Configuration proto for variance scaling initializer. See
  74. // https://www.tensorflow.org/api_docs/python/tf/contrib/layers/
  75. // variance_scaling_initializer
  76. message VarianceScalingInitializer {
  77. optional float factor = 1 [default = 2.0];
  78. optional bool uniform = 2 [default = false];
  79. enum Mode {
  80. FAN_IN = 0;
  81. FAN_OUT = 1;
  82. FAN_AVG = 2;
  83. }
  84. optional Mode mode = 3 [default = FAN_IN];
  85. }
  86. // Configuration proto for random normal initializer. See
  87. // https://www.tensorflow.org/api_docs/python/tf/random_normal_initializer
  88. message RandomNormalInitializer {
  89. optional float mean = 1 [default = 0.0];
  90. optional float stddev = 2 [default = 1.0];
  91. }
  92. // Configuration proto for batch norm to apply after convolution op. See
  93. // https://www.tensorflow.org/api_docs/python/tf/contrib/layers/batch_norm
  94. message BatchNorm {
  95. optional float decay = 1 [default = 0.999];
  96. optional bool center = 2 [default = true];
  97. optional bool scale = 3 [default = false];
  98. optional float epsilon = 4 [default = 0.001];
  99. // Whether to train the batch norm variables. If this is set to false during
  100. // training, the current value of the batch_norm variables are used for
  101. // forward pass but they are never updated.
  102. optional bool train = 5 [default = true];
  103. }
  104. // Configuration proto for group normalization to apply after convolution op.
  105. // https://arxiv.org/abs/1803.08494
  106. message GroupNorm {
  107. }