Home
last modified time | relevance | path

Searched defs:use_locking (Results 1 – 15 of 15) sorted by relevance

/third_party/mindspore/mindspore/core/ops/
Dapply_momentum.cc28 void ApplyMomentum::Init(const bool use_nesterov, const bool use_locking, const float gradient_scal… in Init()
38 void ApplyMomentum::set_use_locking(const bool use_locking) { in set_use_locking()
Dadam.cc55 void Adam::Init(const bool use_locking, const bool use_nesterov) { in Init()
60 void Adam::set_use_locking(const bool use_locking) { (void)this->AddAttr(kUseLocking, MakeValue(use… in set_use_locking()
/third_party/mindspore/mindspore/nn/optim/
Dlazyadam.py34 def _run_opt_with_sparse(opt, sparse_opt, push, pull, use_locking, use_nesterov, target, beta1_powe… argument
81 def _run_opt_with_one_number(opt, sparse_opt, push, pull, use_locking, use_nesterov, target, beta1_… argument
236 …def __init__(self, params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-8, use_locking=False, argument
Dproximal_ada_grad.py44 def _check_param_value(accum, l1, l2, use_locking, prim_name=None): argument
173 use_locking=False, loss_scale=1.0, weight_decay=0.0): argument
Dadam.py93 def _run_opt_with_sparse(opt, sparse_opt, push, pull, use_locking, use_nesterov, target, beta1_powe… argument
154 def _run_opt_with_one_number(opt, sparse_opt, push, pull, use_locking, use_nesterov, target, argument
316 …def __init__(self, params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-8, use_locking=False, argument
629 …def __init__(self, params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-8, use_locking=False, argument
Dftrl.py59 def _check_param(initial_accum, lr_power, l1, l2, use_locking, prim_name=None): argument
198 use_locking=False, loss_scale=1.0, weight_decay=0.0): argument
Drmsprop.py182 use_locking=False, centered=False, loss_scale=1.0, weight_decay=0.0): argument
/third_party/mindspore/tests/st/fl/mobile/src/
Dadam.py108 def _run_opt_with_sparse(opt, sparse_opt, push, pull, use_locking, use_nesterov, target, beta1_powe… argument
175 def _run_opt_with_one_number(opt, sparse_opt, push, pull, use_locking, use_nesterov, target, argument
/third_party/mindspore/tests/st/fl/hybrid_lenet/src/
Dadam.py108 def _run_opt_with_sparse(opt, sparse_opt, push, pull, use_locking, use_nesterov, target, beta1_powe… argument
174 def _run_opt_with_one_number(opt, sparse_opt, push, pull, use_locking, use_nesterov, target, argument
/third_party/mindspore/tests/st/fl/albert/src/
Dadam.py109 def _run_opt_with_sparse(opt, sparse_opt, push, pull, use_locking, use_nesterov, target, beta1_powe… argument
175 def _run_opt_with_one_number(opt, sparse_opt, push, pull, use_locking, use_nesterov, target, argument
/third_party/mindspore/tests/ut/python/ops/
Dtest_ops.py416 def __init__(self, ref_shape, dtype=np.float32, use_locking=False): argument
429 def __init__(self, dtype=np.float32, use_locking=False): argument
442 def __init__(self, dtype=np.float32, use_locking=False): argument
455 def __init__(self, ref_shape, dtype=np.float32, use_locking=False): argument
507 def __init__(self, ref_shape, dtype=np.float32, use_locking=False): argument
520 def __init__(self, ref_shape, dtype=np.float32, use_locking=False): argument
533 def __init__(self, ref_shape, dtype=np.float32, use_locking=False): argument
1019 def __init__(self, use_locking=False): argument
1034 def __init__(self, rho, momentum, epsilon, use_locking=False): argument
/third_party/mindspore/mindspore/ops/operations/
Dnn_ops.py2658 def __init__(self, use_nesterov=False, use_locking=False, gradient_scale=1.0): argument
3121 def __init__(self, use_locking=False): argument
3240 def __init__(self, use_locking=False): argument
4589 def __init__(self, use_locking=False, use_nesterov=False): argument
4697 def __init__(self, use_locking=False): argument
4810 def __init__(self, use_locking=False, use_nesterov=False): argument
4946 def __init__(self, use_locking=False, use_nesterov=False): argument
5097 def __init__(self, use_locking=False, use_nesterov=False): argument
5207 def __init__(self, lr, l1, l2, lr_power, use_locking=False): argument
5322 def __init__(self, use_locking=False): argument
[all …]
Darray_ops.py59 def __init__(self, use_locking=False): argument
100 def __init__(self, use_locking=False): argument
3989 def __init__(self, use_locking=True): argument
4049 def __init__(self, use_locking=True): argument
4273 def __init__(self, use_locking=False): argument
4382 def __init__(self, use_locking=False): argument
Dinner_ops.py539 def __init__(self, use_locking=False): argument
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/
Dsparse_ftrl_impl.cu86 … const float learning_rate_power, const bool use_locking, T *variable, T *accumulation, in CalSparseApplyFtrl()