Home
last modified time | relevance | path

Searched defs:beta1_power (Results 1 – 20 of 20) sorted by relevance

/third_party/mindspore/mindspore/lite/src/runtime/kernel/arm/fp32_grad/
Dadam.cc34 …at *m, float *v, const float *gradient, float *weight, float beta1, float beta2, float beta1_power, in DoAdam()
69 auto beta1_power = reinterpret_cast<float *>(in_tensors_.at(3)->MutableData())[0]; in Execute() local
140 auto beta1_power = reinterpret_cast<float *>(in_tensors_.at(3)->MutableData())[0]; in OptimizerStep() local
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/
Dadam_cpu_kernel.cc36 float beta1_power = reinterpret_cast<float *>(inputs[BETA1_POWER]->addr)[kScalarIndex]; in LaunchAdam() local
71 float beta1_power = reinterpret_cast<float *>(inputs[BETA1_POWER]->addr)[kScalarIndex]; in LaunchAdamNnacl() local
Dadam_delta_cpu_kernel.cc116 auto beta1_power = reinterpret_cast<float *>(inputs[2]->addr)[0]; in Launch() local
Dsparse_apply_lazy_adam_cpu_kernel.cc128 auto beta1_power = reinterpret_cast<float *>(inputs[3]->addr)[0]; in LaunchKernel() local
Dsparse_apply_adam_cpu_kernel.cc148 auto beta1_power = reinterpret_cast<float *>(inputs[3]->addr)[0]; in LaunchKernel() local
/third_party/mindspore/tests/st/ops/cpu/
Dtest_sparse_apply_adam_op.py25 beta1_power = 0.9 variable
/third_party/mindspore/tests/st/ops/ascend/test_aicpu_ops/
Dtest_fused_sparse_lazy_adam.py25 beta1_power = 0.9 variable
Dtest_fused_sparse_adam.py25 beta1_power = 0.9 variable
/third_party/mindspore/tests/st/fl/mobile/src/
Dadam.py108 …f _run_opt_with_sparse(opt, sparse_opt, push, pull, use_locking, use_nesterov, target, beta1_power, argument
176 beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, param, argument
192 def _run_off_load_opt(opt, beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, param, moment… argument
/third_party/mindspore/tests/st/fl/albert/src/
Dadam.py109 …f _run_opt_with_sparse(opt, sparse_opt, push, pull, use_locking, use_nesterov, target, beta1_power, argument
176 beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, param, argument
192 def _run_off_load_opt(opt, beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, param, moment… argument
/third_party/mindspore/tests/st/fl/hybrid_lenet/src/
Dadam.py108 …f _run_opt_with_sparse(opt, sparse_opt, push, pull, use_locking, use_nesterov, target, beta1_power, argument
175 beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, param, argument
191 def _run_off_load_opt(opt, beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, param, moment… argument
/third_party/mindspore/mindspore/nn/optim/
Dlazyadam.py34 …th_sparse(opt, sparse_opt, push, pull, use_locking, use_nesterov, target, beta1_power, beta2_power, argument
81 …ne_number(opt, sparse_opt, push, pull, use_locking, use_nesterov, target, beta1_power, beta2_power, argument
Dadam.py93 …f _run_opt_with_sparse(opt, sparse_opt, push, pull, use_locking, use_nesterov, target, beta1_power, argument
155 beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, param, argument
171 def _run_off_load_opt(opt, beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, param, moment… argument
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/
Dadam_impl.cu30 __global__ void ApplyAdamKernel(const size_t size, const T *gradient, const T *beta1_power, const T… in ApplyAdamKernel()
72 void ApplyAdam(const size_t size, const T *gradient, const T *beta1_power, const T *beta2_power, co… in ApplyAdam()
/third_party/mindspore/tests/st/auto_monad/
Dtest_auto_monad_expression.py34 def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad): argument
Dtest_effect_optimizer.py34 def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad): argument
71 def construct(self, beta1_power, lr, beta1, beta2, epsilon, grad): argument
466 def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, indices): argument
536 def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad, indices): argument
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/gpu/nn/
Dadam_gpu_kernel.h57 T *beta1_power = GetDeviceAddress<T>(inputs, 3); in Launch() local
/third_party/mindspore/tests/ut/python/optimizer/
Dtest_auto_grad.py292 def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad): argument
303 def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad): argument
333 def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad): argument
344 def construct(self, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad): argument
/third_party/mindspore/mindspore/ccsrc/ps/
Doptimizer_info_builder.cc184 …AddressPtr beta1_power = GenInputAddrPtr<float>(kSparseAdam, "beta1_power", const_cast<float *>(va… in BuildInputs() local
Doptimizer_info.cc306const AddressPtr &beta1_power, const AddressPtr &beta2_power, in SparseAdamOptimInfo()