| /third_party/mindspore/mindspore/core/ops/ |
| D | apply_momentum.cc | 28 void ApplyMomentum::Init(const bool use_nesterov, const bool use_locking, const float gradient_scal… in Init() 38 void ApplyMomentum::set_use_locking(const bool use_locking) { in set_use_locking()
|
| D | adam.cc | 55 void Adam::Init(const bool use_locking, const bool use_nesterov) { in Init() 60 void Adam::set_use_locking(const bool use_locking) { (void)this->AddAttr(kUseLocking, MakeValue(use… in set_use_locking()
|
| /third_party/mindspore/mindspore/nn/optim/ |
| D | lazyadam.py | 34 def _run_opt_with_sparse(opt, sparse_opt, push, pull, use_locking, use_nesterov, target, beta1_powe… argument 81 def _run_opt_with_one_number(opt, sparse_opt, push, pull, use_locking, use_nesterov, target, beta1_… argument 236 …def __init__(self, params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-8, use_locking=False, argument
|
| D | proximal_ada_grad.py | 44 def _check_param_value(accum, l1, l2, use_locking, prim_name=None): argument 173 use_locking=False, loss_scale=1.0, weight_decay=0.0): argument
|
| D | adam.py | 93 def _run_opt_with_sparse(opt, sparse_opt, push, pull, use_locking, use_nesterov, target, beta1_powe… argument 154 def _run_opt_with_one_number(opt, sparse_opt, push, pull, use_locking, use_nesterov, target, argument 316 …def __init__(self, params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-8, use_locking=False, argument 629 …def __init__(self, params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-8, use_locking=False, argument
|
| D | ftrl.py | 59 def _check_param(initial_accum, lr_power, l1, l2, use_locking, prim_name=None): argument 198 use_locking=False, loss_scale=1.0, weight_decay=0.0): argument
|
| D | rmsprop.py | 182 use_locking=False, centered=False, loss_scale=1.0, weight_decay=0.0): argument
|
| /third_party/mindspore/tests/st/fl/mobile/src/ |
| D | adam.py | 108 def _run_opt_with_sparse(opt, sparse_opt, push, pull, use_locking, use_nesterov, target, beta1_powe… argument 175 def _run_opt_with_one_number(opt, sparse_opt, push, pull, use_locking, use_nesterov, target, argument
|
| /third_party/mindspore/tests/st/fl/hybrid_lenet/src/ |
| D | adam.py | 108 def _run_opt_with_sparse(opt, sparse_opt, push, pull, use_locking, use_nesterov, target, beta1_powe… argument 174 def _run_opt_with_one_number(opt, sparse_opt, push, pull, use_locking, use_nesterov, target, argument
|
| /third_party/mindspore/tests/st/fl/albert/src/ |
| D | adam.py | 109 def _run_opt_with_sparse(opt, sparse_opt, push, pull, use_locking, use_nesterov, target, beta1_powe… argument 175 def _run_opt_with_one_number(opt, sparse_opt, push, pull, use_locking, use_nesterov, target, argument
|
| /third_party/mindspore/tests/ut/python/ops/ |
| D | test_ops.py | 416 def __init__(self, ref_shape, dtype=np.float32, use_locking=False): argument 429 def __init__(self, dtype=np.float32, use_locking=False): argument 442 def __init__(self, dtype=np.float32, use_locking=False): argument 455 def __init__(self, ref_shape, dtype=np.float32, use_locking=False): argument 507 def __init__(self, ref_shape, dtype=np.float32, use_locking=False): argument 520 def __init__(self, ref_shape, dtype=np.float32, use_locking=False): argument 533 def __init__(self, ref_shape, dtype=np.float32, use_locking=False): argument 1019 def __init__(self, use_locking=False): argument 1034 def __init__(self, rho, momentum, epsilon, use_locking=False): argument
|
| /third_party/mindspore/mindspore/ops/operations/ |
| D | nn_ops.py | 2658 def __init__(self, use_nesterov=False, use_locking=False, gradient_scale=1.0): argument 3121 def __init__(self, use_locking=False): argument 3240 def __init__(self, use_locking=False): argument 4589 def __init__(self, use_locking=False, use_nesterov=False): argument 4697 def __init__(self, use_locking=False): argument 4810 def __init__(self, use_locking=False, use_nesterov=False): argument 4946 def __init__(self, use_locking=False, use_nesterov=False): argument 5097 def __init__(self, use_locking=False, use_nesterov=False): argument 5207 def __init__(self, lr, l1, l2, lr_power, use_locking=False): argument 5322 def __init__(self, use_locking=False): argument [all …]
|
| D | array_ops.py | 59 def __init__(self, use_locking=False): argument 100 def __init__(self, use_locking=False): argument 3989 def __init__(self, use_locking=True): argument 4049 def __init__(self, use_locking=True): argument 4273 def __init__(self, use_locking=False): argument 4382 def __init__(self, use_locking=False): argument
|
| D | inner_ops.py | 539 def __init__(self, use_locking=False): argument
|
| /third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/ |
| D | sparse_ftrl_impl.cu | 86 … const float learning_rate_power, const bool use_locking, T *variable, T *accumulation, in CalSparseApplyFtrl()
|