Home
last modified time | relevance | path

Searched refs:SGD (Results 1 – 25 of 438) sorted by relevance

12345678910>>...18

/third_party/mindspore/mindspore/core/ops/
Dsgd.cc21 void SGD::Init(const float dampening, const float weight_decay, const bool nesterov) { in Init()
27 void SGD::set_dampening(const float dampening) { in set_dampening()
32 void SGD::set_weight_decay(const float weight_decay) { (void)AddAttr(kWeightDecay, MakeValue(weight… in set_weight_decay()
34 void SGD::set_nesterov(const bool nesterov) { (void)AddAttr(kNesterov, MakeValue(nesterov)); } in set_nesterov()
36 float SGD::get_dampening() const { in get_dampening()
41 float SGD::get_weight_decay() const { in get_weight_decay()
46 bool SGD::get_nesterov() const { in get_nesterov()
50 REGISTER_PRIMITIVE_C(kNameSGD, SGD);
Dsgd.h30 class MS_CORE_API SGD : public PrimitiveC {
33 SGD() : PrimitiveC(kNameSGD) {} in SGD() function
35 ~SGD() = default;
36 MS_DECLARE_PARENT(SGD, PrimitiveC);
60 using PrimSGD = std::shared_ptr<SGD>;
/third_party/mindspore/tests/ut/python/nn/optim/
Dtest_optimizer.py21 from mindspore.nn.optim import Optimizer, SGD, Adam, AdamWeightDecay
67SGD(params, learning_rate=0.1, momentum=-0.1, dampening=0, weight_decay=0, nesterov=False)
69SGD(params, learning_rate=0.12, momentum=-0.1, dampening=0, weight_decay=0, nesterov=False)
70 SGD(params)
86 SGD(None)
103 SGD(paramsTensor)
Dtest_lars.py58 SGD = Momentum(net.trainable_params(), lr, 0.9)
59 optimizer = LARS(SGD, epsilon=1e-08, coefficient=0.02, use_clip=True,
75 SGD = Momentum(net.trainable_params(), lr, 0.9)
76 optimizer = LARS(SGD, epsilon=1e-08, coefficient=0.02,
/third_party/mindspore/tests/mindspore_test_framework/apps/
Dtest_model_loss.py23 from ..utils.model_util import SquaredLoss, SGD
31 'opt': SGD(network.trainable_params(), 0.001, 20),
/third_party/flutter/skia/third_party/externals/icu/source/data/curr/
Dfa_AF.txt53 SGD{
54 "SGD",
Dzh_Hant_HK.txt193 SGD{
194 "SGD",
413 SGD{
Ddz.txt241 SGD{
242 "SGD",
Dps.txt497 SGD{
498 "SGD",
499 "SGD",
722 SGD{"$"}
1238 SGD{
1239 one{"SGD"}
1240 other{"SGD"}
/third_party/icu/icu4c/source/data/curr/
Dfa_AF.txt54 SGD{
55 "SGD",
Dzh_Hant_HK.txt198 SGD{
199 "SGD",
417 SGD{
Ddz.txt242 SGD{
243 "SGD",
Dmt.txt458 SGD{
459 "SGD",
460 "SGD",
670 SGD{"$"}
/third_party/skia/third_party/externals/icu/source/data/curr/
Dfa_AF.txt54 SGD{
55 "SGD",
Dzh_Hant_HK.txt198 SGD{
199 "SGD",
417 SGD{
Ddz.txt242 SGD{
243 "SGD",
Dmt.txt458 SGD{
459 "SGD",
460 "SGD",
670 SGD{"$"}
/third_party/mindspore/mindspore/nn/optim/
Dsgd.py35 class SGD(Optimizer): class
142 super(SGD, self).__init__(learning_rate, params, weight_decay, loss_scale)
171 self.opt = P.SGD(dampening, weight_decay, nesterov)
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/
Dsgd_cpu_kernel.h45 MS_REG_CPU_KERNEL_T(SGD,
56 MS_REG_CPU_KERNEL_T(SGD,
/third_party/mindspore/tests/ut/python/optimizer/
Dtest_optimizer_with_parameter_groups.py24 from mindspore.nn.optim import Momentum, SGD, RMSProp, Adam
200 opt = SGD(group_params, learning_rate=0.1, weight_decay=default_weight_decay)
241 opt = SGD(group_params)
263 opt = SGD(group_params)
282 opt = SGD(group_params)
300 opt = SGD(group_params, learning_rate=0.1, weight_decay=0.0)
338 opt = SGD(group_params, learning_rate=default_lr, weight_decay=default_wd)
373 SGD(group_params)
/third_party/mindspore/mindspore/ccsrc/transform/graph_ir/op_declare/
Dnn_training_ops_declare.h62 DECLARE_OP_ADAPTER(SGD)
63 DECLARE_OP_USE_OUTPUT(SGD)
Dnn_training_ops_declare.cc142 INPUT_MAP(SGD) = {{1, INPUT_DESC(parameters)}, {2, INPUT_DESC(gradient)}, {3, INPUT_DESC(learning_r…
144 ATTR_MAP(SGD) = {{"dampening", ATTR_DESC(dampening, AnyTraits<float>())},
147 OUTPUT_MAP(SGD) = {{0, OUTPUT_DESC(parameters)}};
148 REG_ADPT_DESC(SGD, kNameSGD, ADPT_DESC(SGD))
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/gpu/cuda_impl/
Dsgd_impl.cu49 void SGD(const int size, const T dampening, const T weight_decay, const bool nesterov, const T *lr,… in SGD() function
55 template void SGD(const int size, const float dampening, const float weight_decay, const bool neste…
/third_party/mindspore/tests/st/ops/gpu/
Dtest_sgd_op.py24 from mindspore.nn.optim import SGD
56 …optimizer = SGD(filter(lambda x: x.requires_grad, net.get_parameters()), learning_rate, momentum, …
/third_party/mindspore/tests/st/ops/cpu/
Dtest_sgd_op.py24 from mindspore.nn.optim import SGD
56 …optimizer = SGD(filter(lambda x: x.requires_grad, net.get_parameters()), learning_rate, momentum, …

12345678910>>...18