• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2021 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #ifndef MINDSPORE_LITE_INCLUDE_TRAIN_TRAIN_CFG_H_
17 #define MINDSPORE_LITE_INCLUDE_TRAIN_TRAIN_CFG_H_
18 #include <string>
19 
20 namespace mindspore {
21 namespace lite {
22 
23 /// \brief MixPrecisionCfg defined for holding mix precision training configuration.
24 class MixPrecisionCfg {
25  public:
MixPrecisionCfg()26   MixPrecisionCfg() {
27     this->dynamic_loss_scale_ = false;
28     this->loss_scale_ = 128.0f;
29     this->keep_batchnorm_fp32_ = true;
30     this->num_of_not_nan_iter_th_ = 1000;
31     this->is_raw_mix_precision_ = false;
32   }
MixPrecisionCfg(const MixPrecisionCfg & rhs)33   MixPrecisionCfg(const MixPrecisionCfg &rhs) {
34     this->dynamic_loss_scale_ = rhs.dynamic_loss_scale_;
35     this->loss_scale_ = rhs.loss_scale_;
36     this->keep_batchnorm_fp32_ = rhs.keep_batchnorm_fp32_;
37     this->num_of_not_nan_iter_th_ = rhs.num_of_not_nan_iter_th_;
38     this->is_raw_mix_precision_ = rhs.is_raw_mix_precision_;
39   }
40   MixPrecisionCfg &operator=(MixPrecisionCfg const &rhs) {
41     this->dynamic_loss_scale_ = rhs.dynamic_loss_scale_;
42     this->loss_scale_ = rhs.loss_scale_;
43     this->keep_batchnorm_fp32_ = rhs.keep_batchnorm_fp32_;
44     this->num_of_not_nan_iter_th_ = rhs.num_of_not_nan_iter_th_;
45     this->is_raw_mix_precision_ = rhs.is_raw_mix_precision_;
46     return *this;
47   }
48   bool dynamic_loss_scale_ = false;   /**< Enable\disable dynamic loss scale during mix precision training */
49   float loss_scale_;                  /**< Initial loss scale factor  */
50   bool keep_batchnorm_fp32_ = true;   /**< Keep batch norm in FP32 while training */
51   uint32_t num_of_not_nan_iter_th_;   /**< a threshold for modifying loss scale when dynamic loss scale is enabled */
52   bool is_raw_mix_precision_ = false; /**< Is mix precision model export from mindspore  */
53 };
54 
55 /// \brief TrainCfg defined for holding train configuration.
56 class TrainCfg {
57  public:
TrainCfg()58   TrainCfg() { this->loss_name_ = "_loss_fn"; }
TrainCfg(const TrainCfg & rhs)59   TrainCfg(const TrainCfg &rhs) {
60     this->loss_name_ = rhs.loss_name_;
61     this->mix_precision_cfg_ = rhs.mix_precision_cfg_;
62     this->accumulate_gradients_ = rhs.accumulate_gradients_;
63   }
64   TrainCfg &operator=(const TrainCfg &rhs) {
65     this->loss_name_ = rhs.loss_name_;
66     this->mix_precision_cfg_ = rhs.mix_precision_cfg_;
67     this->accumulate_gradients_ = rhs.accumulate_gradients_;
68     return *this;
69   }
70   std::string loss_name_;             /**< Set part of the name that identify a loss kernel */
71   MixPrecisionCfg mix_precision_cfg_; /**< Mix precision configuration */
72   bool accumulate_gradients_ = false; /**< If true gardents are accmulated and can be read by GetGradients */
73 };
74 
75 }  // namespace lite
76 }  // namespace mindspore
77 #endif  // MINDSPORE_LITE_INCLUDE_TRAIN_TRAIN_CFG_H_
78