| /third_party/mindspore/tests/st/model_zoo_tests/yolov3_darknet53/src/ |
| D | lr_scheduler.py | 29 def warmup_step_lr(lr, lr_epochs, steps_per_epoch, warmup_epochs, max_epoch, gamma=0.1): argument 54 def multi_step_lr(lr, milestones, steps_per_epoch, max_epoch, gamma=0.1): argument 58 def step_lr(lr, epoch_size, steps_per_epoch, max_epoch, gamma=0.1): argument 66 def warmup_cosine_annealing_lr(lr, steps_per_epoch, warmup_epochs, max_epoch, T_max, eta_min=0): argument 85 def warmup_cosine_annealing_lr_V2(lr, steps_per_epoch, warmup_epochs, max_epoch, T_max, eta_min=0): argument 116 def warmup_cosine_annealing_lr_sample(lr, steps_per_epoch, warmup_epochs, max_epoch, T_max, eta_min… argument
|
| /third_party/mindspore/mindspore/lite/src/train/ |
| D | lr_scheduler.cc | 32 int MultiplicativeLRLambda(float *lr, int epoch, void *lr_cb_data) { in MultiplicativeLRLambda() 42 int StepLRLambda(float *lr, int epoch, void *lr_cb_data) { in StepLRLambda() 60 float lr = cb_data.session_->GetLearningRate(); in EpochEnd() local
|
| /third_party/selinux/libselinux/src/ |
| D | label.c | 143 struct selabel_lookup_rec *lr, in selabel_fini() 160 struct selabel_lookup_rec *lr; in selabel_lookup_common() local 181 struct selabel_lookup_rec *lr; in selabel_lookup_bm_common() local 241 struct selabel_lookup_rec *lr; in selabel_lookup() local 254 struct selabel_lookup_rec *lr; in selabel_lookup_raw() local 304 struct selabel_lookup_rec *lr; in selabel_lookup_best_match() local 322 struct selabel_lookup_rec *lr; in selabel_lookup_best_match_raw() local
|
| /third_party/icu/icu4c/source/layoutex/ |
| D | plruns.cpp | 426 ULocRuns *lr = (ULocRuns *) localeRuns; in pl_closeLocaleRuns() local 434 const ULocRuns *lr = (const ULocRuns *) localeRuns; in pl_getLocaleRunCount() local 446 ULocRuns *lr = (ULocRuns *) localeRuns; in pl_resetLocaleRuns() local 456 const ULocRuns *lr = (const ULocRuns *) localeRuns; in pl_getLocaleRunLastLimit() local 469 const ULocRuns *lr = (const ULocRuns *) localeRuns; in pl_getLocaleRunLimit() local 482 const ULocRuns *lr = (const ULocRuns *) localeRuns; in pl_getLocaleRunLocale() local 496 ULocRuns *lr = (ULocRuns *) localeRuns; in pl_addLocaleRun() local
|
| /third_party/mindspore/tests/st/auto_monad/ |
| D | test_effect_optimizer.py | 34 def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad): argument 71 def construct(self, beta1_power, lr, beta1, beta2, epsilon, grad): argument 106 def construct(self, lr, rho, epsilon, grad): argument 138 def construct(self, lr, grad): argument 166 def construct(self, lr, grad): argument 194 def construct(self, lr, alpha, sign_decay, beta, grad): argument 261 def construct(self, grad, lr, l1, l2, lr_power): argument 319 def construct(self, lr, grad, momentum): argument 348 def construct(self, lr, logbase, sign_decay, beta, grad): argument 380 def construct(self, lr, l1, l2, grad): argument [all …]
|
| /third_party/mindspore/tests/st/ops/graph_kernel/ |
| D | test_lamb_apply_weight_assign.py | 31 def construct(self, w_norm, g_norm, lr, update): argument 35 def get_output(w_norm, g_norm, lr, update, param, enable_graph_kernel=False): argument
|
| D | test_fused_adam.py | 47 …struct(self, beta1, beta2, one_sub_beta_1, one_sub_beta_2, gradient, eps, weight_decay_tensor, lr): argument 91 …struct(self, beta1, beta2, one_sub_beta_1, one_sub_beta_2, gradient, eps, weight_decay_tensor, lr): argument 119 …(beta1, beta2, one_sub_beta_1, one_sub_beta_2, gradient, eps, weight_decay_tensor, lr, param, m, v, argument
|
| /third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/ |
| D | sgd_impl.cu | 22 const T *momentum, const T *lr, T *param, T *accum, T *stat) { in SGDKernel() 49 void SGD(const int size, const T dampening, const T weight_decay, const bool nesterov, const T *lr,… in SGD()
|
| D | adam_weight_decay_impl.cu | 23 … const float *epsilon, const float *lr, const float *weight_decay, T *m, T *v, in AdamWeightDecayKernel() 40 … const float *beta2, const float *one_sub_beta2, const float *epsilon, const float *lr, in AdamWeightDecay()
|
| /third_party/mindspore/tests/st/ops/gpu/ |
| D | test_sparse_apply_proximal_adagrad_op.py | 27 def __init__(self, var, accum, lr, l1, l2): argument 40 def add_testcase(var, accum, lr, l1, l2, grad, indices): argument
|
| D | test_rmsprop.py | 30 def __init__(self, lr, decay, momentum, epsilon, var, g, mg, rms, mom): argument 49 def __init__(self, lr, decay, momentum, epsilon, var, g, mg, rms, mom): argument
|
| /third_party/mindspore/tests/st/fl/mobile/src/ |
| D | adam.py | 33 def _update_run_kernel(beta1, beta2, eps, lr, weight_decay, param, m, v, gradient, decay_flags, opt… argument 48 def _update_run_op(beta1, beta2, eps, lr, overflow, weight_decay, param, m, v, gradient, decay_flag… argument 109 … beta2_power, beta1, beta2, eps, lr, gradient, param, m, v, ps_parameter, cache_enable): argument 176 beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, param, argument 192 def _run_off_load_opt(opt, beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, param, moment… argument
|
| /third_party/mindspore/tests/st/fl/hybrid_lenet/src/ |
| D | adam.py | 33 def _update_run_kernel(beta1, beta2, eps, lr, weight_decay, param, m, v, gradient, decay_flags, opt… argument 48 def _update_run_op(beta1, beta2, eps, lr, overflow, weight_decay, param, m, v, gradient, decay_flag… argument 109 … beta2_power, beta1, beta2, eps, lr, gradient, param, m, v, ps_parameter, cache_enable): argument 175 beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, param, argument 191 def _run_off_load_opt(opt, beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, param, moment… argument
|
| /third_party/mindspore/tests/st/fl/albert/src/ |
| D | adam.py | 34 def _update_run_kernel(beta1, beta2, eps, lr, weight_decay, param, m, v, gradient, decay_flags, opt… argument 49 def _update_run_op(beta1, beta2, eps, lr, overflow, weight_decay, param, m, v, gradient, decay_flag… argument 110 … beta2_power, beta1, beta2, eps, lr, gradient, param, m, v, ps_parameter, cache_enable): argument 176 beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, param, argument 192 def _run_off_load_opt(opt, beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, param, moment… argument
|
| /third_party/boost/libs/math/test/compile_test/ |
| D | instantiate.hpp | 1001 long double lr = 0.5L; in instantiate_mixed() local 1004 double lr = 0.5L; in instantiate_mixed() local
|
| /third_party/mindspore/mindspore/ccsrc/backend/optimizer/ascend/mindir/ |
| D | optimizer_unify_output.cc | 69 VarPtr lr = std::make_shared<Var>(); in DefinePattern() local 85 VarPtr lr = std::make_shared<Var>(); in DefinePattern() local 115 VarPtr lr = std::make_shared<Var>(); in DefinePattern() local
|
| /third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/gpu/nn/ |
| D | combine_momentum_gpu_kernel.h | 47 T *lr = GetDeviceAddress<T>(inputs, i * input_num_ + 3); in Launch() local 56 T *lr = GetDeviceAddress<T>(inputs, i * input_num_ + 4); in Launch() local
|
| /third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/ |
| D | apply_adagrad_cpu_kernel.cc | 70 const auto *lr = reinterpret_cast<T *>(inputs[2]->addr); in LaunchKernel() local 92 void ApplyAdagradCPUKernel::LaunchApplyAdagrad(T *var, T *accum, const T *lr, const T *gradient, si… in LaunchApplyAdagrad()
|
| D | adam_delta_cpu_kernel.cc | 35 void AdamDeltaCPUKernel::LaunchAdamDelta(T *delta, T *m, T *v, float lr, float beta1, float beta2, … in LaunchAdamDelta() 121 auto lr = reinterpret_cast<float *>(inputs[4]->addr)[0]; in Launch() local
|
| /third_party/boost/libs/geometry/doc/src/examples/algorithms/ |
| D | assign_box_corners.cpp | 29 point ll, lr, ul, ur; in main() local
|
| /third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/ |
| D | adam_fp32.c | 20 int AdamFp32(float *var, float *m, float *v, float lr, float beta1, float beta2, float epsilon, con… in AdamFp32() 89 int AdamDeltaFp32(float *delta, float *m, float *v, float lr, float beta1, float beta2, float epsil… in AdamDeltaFp32() 155 int AdamWeightDecayFp32(float *var, float *m, float *v, float lr, float beta1, float beta2, float e… in AdamWeightDecayFp32() 209 size_t FusedCastAdamFp32(float *var, float *m, float *v, float lr, float beta1, float beta2, float … in FusedCastAdamFp32() 255 size_t FusedCastAdamFp16(int16_t *var16, float *m, float *v, float lr, float beta1, float beta2, fl… in FusedCastAdamFp16()
|
| /third_party/mindspore/mindspore/nn/optim/ |
| D | adam.py | 37 def _update_run_op(beta1, beta2, eps, lr, weight_decay, param, m, v, gradient, decay_flag, optim_fi… argument 94 … beta2_power, beta1, beta2, eps, lr, gradient, param, m, v, ps_parameter, cache_enable): argument 155 beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, param, argument 171 def _run_off_load_opt(opt, beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, param, moment… argument
|
| D | lamb.py | 39 def _update_run_op(beta1, beta2, eps, global_step, lr, weight_decay, param, m, v, gradient, decay_f… argument 121 def _update_run_op_ascend(beta1, beta2, eps, global_step, lr, weight_decay, param, m, v, gradient, … argument
|
| /third_party/mindspore/mindspore/lite/src/cxx_api/callback/ |
| D | lr_scheduler.cc | 26 int StepLRLambda(float *lr, int epoch, void *lr_cb_data) { in StepLRLambda()
|
| /third_party/mindspore/tests/st/ops/cpu/ |
| D | test_rmsprop.py | 30 def __init__(self, lr, decay, momentum, epsilon, var, g, mg, rms, mom): argument 49 def __init__(self, lr, decay, momentum, epsilon, var, g, mg, rms, mom): argument
|