Home
last modified time | relevance | path

Searched defs:beta2 (Results 1 – 25 of 35) sorted by relevance

12

/third_party/mindspore/tests/st/fl/mobile/src/
Dadam.py33 def _update_run_kernel(beta1, beta2, eps, lr, weight_decay, param, m, v, gradient, decay_flags, opt… argument
48 def _update_run_op(beta1, beta2, eps, lr, overflow, weight_decay, param, m, v, gradient, decay_flag… argument
109 … beta2_power, beta1, beta2, eps, lr, gradient, param, m, v, ps_parameter, cache_enable): argument
176 beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, param, argument
192 def _run_off_load_opt(opt, beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, param, moment… argument
200 def _check_param_value(beta1, beta2, eps, prim_name): argument
282 …def __init__(self, params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-6, weight_decay=0.0): argument
392 …def __init__(self, params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-6, weight_decay=0.0): argument
/third_party/mindspore/tests/st/fl/albert/src/
Dadam.py34 def _update_run_kernel(beta1, beta2, eps, lr, weight_decay, param, m, v, gradient, decay_flags, opt… argument
49 def _update_run_op(beta1, beta2, eps, lr, overflow, weight_decay, param, m, v, gradient, decay_flag… argument
110 … beta2_power, beta1, beta2, eps, lr, gradient, param, m, v, ps_parameter, cache_enable): argument
176 beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, param, argument
192 def _run_off_load_opt(opt, beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, param, moment… argument
200 def _check_param_value(beta1, beta2, eps, prim_name): argument
282 …def __init__(self, params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-6, weight_decay=0.0): argument
392 …def __init__(self, params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-6, weight_decay=0.0): argument
/third_party/mindspore/tests/st/fl/hybrid_lenet/src/
Dadam.py33 def _update_run_kernel(beta1, beta2, eps, lr, weight_decay, param, m, v, gradient, decay_flags, opt… argument
48 def _update_run_op(beta1, beta2, eps, lr, overflow, weight_decay, param, m, v, gradient, decay_flag… argument
109 … beta2_power, beta1, beta2, eps, lr, gradient, param, m, v, ps_parameter, cache_enable): argument
175 beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, param, argument
191 def _run_off_load_opt(opt, beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, param, moment… argument
199 def _check_param_value(beta1, beta2, eps, prim_name): argument
281 …def __init__(self, params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-6, weight_decay=0.0): argument
391 …def __init__(self, params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-6, weight_decay=0.0): argument
/third_party/mindspore/mindspore/nn/optim/
Dadam.py37 def _update_run_op(beta1, beta2, eps, lr, weight_decay, param, m, v, gradient, decay_flag, optim_fi… argument
94 … beta2_power, beta1, beta2, eps, lr, gradient, param, m, v, ps_parameter, cache_enable): argument
155 beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, param, argument
171 def _run_off_load_opt(opt, beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, param, moment… argument
179 def _check_param_value(beta1, beta2, eps, prim_name): argument
316 …def __init__(self, params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-8, use_locking=False, argument
483 …def __init__(self, params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-6, weight_decay=0.0): argument
629 …def __init__(self, params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-8, use_locking=False, argument
Dlamb.py39 def _update_run_op(beta1, beta2, eps, global_step, lr, weight_decay, param, m, v, gradient, decay_f… argument
121 def _update_run_op_ascend(beta1, beta2, eps, global_step, lr, weight_decay, param, m, v, gradient, … argument
164 def _check_param_value(beta1, beta2, eps, prim_name): argument
295 def __init__(self, params, learning_rate, beta1=0.9, beta2=0.999, eps=1e-6, weight_decay=0.0): argument
Dlazyadam.py34 …th_sparse(opt, sparse_opt, push, pull, use_locking, use_nesterov, target, beta1_power, beta2_power, argument
81 …ne_number(opt, sparse_opt, push, pull, use_locking, use_nesterov, target, beta1_power, beta2_power, argument
95 def _check_param_value(beta1, beta2, eps, weight_decay, prim_name): argument
236 …def __init__(self, params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-8, use_locking=False, argument
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/
Dadam_impl.cu30 …d ApplyAdamKernel(const size_t size, const T *gradient, const T *beta1_power, const T *beta2_power, in ApplyAdamKernel()
45 … const float *beta1, const float *beta2, const float *epsilon, const float *decay, in AdamWeightDecayKernel()
59 … const float *beta1, const float *beta2, const float *epsilon, const float *decay, in AdamWeightDecayKernel()
72 void ApplyAdam(const size_t size, const T *gradient, const T *beta1_power, const T *beta2_power, co… in ApplyAdam()
79const float *beta2, const float *epsilon, const float *decay, T *variable, T *m, T *v, in AdamWeightDecayOp()
Dadam_weight_decay_impl.cu22 … const float *one_sub_beta1, const float *beta2, const float *one_sub_beta2, in AdamWeightDecayKernel()
40const float *beta2, const float *one_sub_beta2, const float *epsilon, const float *lr, in AdamWeightDecay()
/third_party/mindspore/tests/st/ops/graph_kernel/
Dtest_fused_adam.py47 …def construct(self, beta1, beta2, one_sub_beta_1, one_sub_beta_2, gradient, eps, weight_decay_tens… argument
91 …def construct(self, beta1, beta2, one_sub_beta_1, one_sub_beta_2, gradient, eps, weight_decay_tens… argument
119 def CalFusedAdam(beta1, beta2, one_sub_beta_1, one_sub_beta_2, gradient, eps, weight_decay_tensor, … argument
/third_party/mindspore/mindspore/lite/src/runtime/kernel/arm/fp32_grad/
Dadam.cc34 …float *m, float *v, const float *gradient, float *weight, float beta1, float beta2, float beta1_po… in DoAdam()
73 auto beta2 = reinterpret_cast<float *>(in_tensors_.at(7)->MutableData())[0]; in Execute() local
144 auto beta2 = reinterpret_cast<float *>(in_tensors_.at(7)->MutableData())[0]; in OptimizerStep() local
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/fp32/
Dadam_fp32.c20 int AdamFp32(float *var, float *m, float *v, float lr, float beta1, float beta2, float epsilon, con… in AdamFp32()
89 int AdamDeltaFp32(float *delta, float *m, float *v, float lr, float beta1, float beta2, float epsil… in AdamDeltaFp32()
155 int AdamWeightDecayFp32(float *var, float *m, float *v, float lr, float beta1, float beta2, float e… in AdamWeightDecayFp32()
209 size_t FusedCastAdamFp32(float *var, float *m, float *v, float lr, float beta1, float beta2, float … in FusedCastAdamFp32()
255 size_t FusedCastAdamFp16(int16_t *var16, float *m, float *v, float lr, float beta1, float beta2, fl… in FusedCastAdamFp16()
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/
Dsparse_apply_adam_cpu_kernel.cc34 const auto beta2 = input_params->beta2_; in ComputeAdam() local
63 const auto beta2 = input_params->beta2_; in ComputeMomentum() local
155 auto beta2 = reinterpret_cast<float *>(inputs[7]->addr)[0]; in LaunchKernel() local
Dadam_delta_cpu_kernel.cc35 void AdamDeltaCPUKernel::LaunchAdamDelta(T *delta, T *m, T *v, float lr, float beta1, float beta2, … in LaunchAdamDelta()
123 auto beta2 = reinterpret_cast<float *>(inputs[6]->addr)[0]; in Launch() local
Dadam_weight_decay_cpu_kernel.cc43 T beta2 = static_cast<T>(reinterpret_cast<float *>(inputs[BETA2]->addr)[kScalarIndex]); in LaunchAdamWeightDecay() local
74 auto beta2 = reinterpret_cast<float *>(inputs[BETA2]->addr)[kScalarIndex]; in LaunchAdamWeightDecayNnacl() local
Dadam_cpu_kernel.cc40 T beta2 = static_cast<T>(reinterpret_cast<float *>(inputs[BETA1]->addr)[kScalarIndex]); in LaunchAdam() local
75 float beta2 = reinterpret_cast<float *>(inputs[BETA2]->addr)[kScalarIndex]; in LaunchAdamNnacl() local
Dfused_cast_adam_weight_decay_cpu_kernel.cc32 auto beta2 = reinterpret_cast<float *>(inputs[BETA2]->addr)[kScalarIndex]; in LaunchFusedCastAdamFp32() local
66 auto beta2 = reinterpret_cast<float *>(inputs[BETA2]->addr)[kScalarIndex]; in LaunchFusedCastAdamFp16() local
Dsparse_apply_lazy_adam_cpu_kernel.cc35 const auto beta2 = input_params->beta2_; in ComputeLazyAdam() local
135 auto beta2 = reinterpret_cast<float *>(inputs[7]->addr)[0]; in LaunchKernel() local
/third_party/mindspore/tests/st/ops/cpu/
Dtest_sparse_apply_adam_op.py29 beta2 = 0.999 variable
/third_party/mindspore/tests/st/ops/ascend/test_aicpu_ops/
Dtest_fused_sparse_lazy_adam.py29 beta2 = 0.999 variable
Dtest_fused_sparse_adam.py29 beta2 = 0.999 variable
/third_party/mindspore/tests/st/ops/gpu/
Dtest_adam_fusion.py46 def construct(self, beta1, beta2, gradient, eps, weight_decay_tensor, lr): argument
/third_party/mindspore/tests/ut/python/ir/
Dtest_row_tensor.py161 def _update_run_op_for_map_row_tensor(beta1, beta2, eps, lr, weight_decay_tensor, param, argument
167 def _update_run_op_for_map_tensor(beta1, beta2, eps, lr, weight_decay_tensor, param, argument
199 def _check_param_value(beta1, beta2, eps, weight_decay, prim_name): argument
212 … def __init__(self, params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-6, weight_decay=0.0, argument
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/gpu/nn/
Dfused_adam_weight_decay.h67 float *beta2 = GetDeviceAddress<float>(inputs, 2); in Launch() local
/third_party/ffmpeg/libavcodec/
Drv40dsp.c572 int beta, int beta2, in rv40_loop_filter_strength()
607 int beta, int beta2, int edge, in rv40_h_loop_filter_strength()
614 int beta, int beta2, int edge, in rv40_v_loop_filter_strength()
/third_party/mindspore/tests/st/auto_monad/
Dtest_auto_monad_expression.py34 def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad): argument

12