| /third_party/mindspore/mindspore-src/source/mindspore/ccsrc/plugin/device/gpu/kernel/cuda_impl/cuda_ops/ |
| D | adagrad_v2_impl.cu | 32 const S *learning_rate, const T *gradient) { in ApplyAdagradV2Kernel() 43 const half *learning_rate, const half *gradient) { in ApplyAdagradV2Kernel() 54 const float *learning_rate, const half *gradient) { in ApplyAdagradV2Kernel() 65 const double *learning_rate, const half *gradient) { in ApplyAdagradV2Kernel() 76 const half *learning_rate, const double *gradient) { in ApplyAdagradV2Kernel() 87 const float *learning_rate, const double *gradient) { in ApplyAdagradV2Kernel() 98 const half *learning_rate, const float *gradient) { in ApplyAdagradV2Kernel() 109 const double *learning_rate, const float *gradient) { in ApplyAdagradV2Kernel() 120 const S *learning_rate, const T *gradient) { in ApplyAdagradV2Kernel_() 128 const half *learning_rate, const half *gradient) { in ApplyAdagradV2Kernel_() [all …]
|
| D | adagrad_impl.cu | 85 const G *gradient, T *variable, T *accumulation) { in ApplyAdagradKernel() 96 const half *gradient, half *variable, half *accumulation) { in ApplyAdagradKernel() 107 const float *gradient, float *variable, float *accumulation) { in ApplyAdagradKernel() 118 const half *gradient, half *variable, half *accumulation) { in ApplyAdagradKernel() 129 const float *gradient, float *variable, float *accumulation) { in ApplyAdagradKernel() 140 const double *gradient, double *variable, double *accumulation) { in ApplyAdagradKernel() 151 const double *gradient, double *variable, double *accumulation) { in ApplyAdagradKernel() 162 const double *gradient, double *variable, double *accumulation) { in ApplyAdagradKernel() 172 …ApplyAdagrad(const size_t size, const bool update_slots, const S *learning_rate, const G *gradient, in ApplyAdagrad()
|
| D | softmarginloss_grad_impl.cu | 22 … const size_t input_size, const T norm, T *gradient) { in SoftMarginLossGradReductionNoneKernel() 32 … const size_t input_size, const half norm, half *gradient) { in SoftMarginLossGradReductionNoneKernel() 44 … const size_t input_size, const T norm, T *gradient) { in SoftMarginLossGradReductionOtherKernel() 54 … const size_t input_size, const half norm, half *gradient) { in SoftMarginLossGradReductionOtherKernel() 66 … const T norm, const ReductionMode &reduction, T *gradient, const uint32_t &device_id, in SoftMarginLossGrad()
|
| D | adadelta_impl.cu | 32 … const G *gradient, T *variable, T *accumulation, T *accumulation_update) { in ApplyAdadeltaKernal() 45 … const float *epsilon, const half *gradient, half *variable, half *accumulation, in ApplyAdadeltaKernal() 61 … const float *epsilon, const half *gradient, float *variable, float *accumulation, in ApplyAdadeltaKernal() 76 const float *gradient, float *variable, float *accumulation, in ApplyAdadeltaKernal() 92 … const float *epsilon, const float *gradient, half *variable, half *accumulation, in ApplyAdadeltaKernal() 109 const float *gradient, half *variable, half *accumulation, in ApplyAdadeltaKernal() 124 const half *gradient, float *variable, float *accumulation, in ApplyAdadeltaKernal() 141 const half *gradient, double *variable, double *accumulation, in ApplyAdadeltaKernal() 158 … const float *epsilon, const float *gradient, double *variable, double *accumulation, in ApplyAdadeltaKernal() 172 const float *epsilon, const double *gradient, double *variable, in ApplyAdadeltaKernal() [all …]
|
| D | apply_add_sign_impl.cu | 27 … const S *alpha, const S *sign_decay, const S *beta, const G *gradient) { in ApplyAddSignKernel() 38 const half *gradient) { in ApplyAddSignKernel() 53 const half *gradient) { in ApplyAddSignKernel() 64 … const half *alpha, const half *sign_decay, const half *beta, const float *gradient) { in ApplyAddSignKernel() 76 … const half *alpha, const half *sign_decay, const half *beta, const half *gradient) { in ApplyAddSignKernel() 89 … const half *alpha, const half *sign_decay, const half *beta, const half *gradient) { in ApplyAddSignKernel() 102 … const S *sign_decay, const S *beta, const G *gradient, const uint32_t &device_id, in ApplyAddSign()
|
| D | apply_power_sign_impl.cu | 43 … const S logbase, const S sign_decay, const S beta, const G *gradient) { in ApplyPowerSignKernel() 54 const half *gradient) { in ApplyPowerSignKernel() 69 const half *gradient) { in ApplyPowerSignKernel() 80 … const half logbase, const half sign_decay, const half beta, const half *gradient) { in ApplyPowerSignKernel() 94 const float *gradient) { in ApplyPowerSignKernel() 106 … const half logbase, const half sign_decay, const half beta, const half *gradient) { in ApplyPowerSignKernel() 119 … const S sign_decay, const S beta, const G *gradient, const uint32_t &device_id, in ApplyPowerSign()
|
| D | adamax_impl.cu | 43 … const S *b2, const S *eps, const G *gradient, T *variable, T *m, T *v) { in ApplyAdamaxKernal() 54 … const float *b2, const float *eps, const half *gradient, half *variable, half *m, in ApplyAdamaxKernal() 67 … const float *b2, const float *eps, const half *gradient, float *variable, float *m, in ApplyAdamaxKernal() 79 … const half *b2, const half *eps, const float *gradient, float *variable, float *m, in ApplyAdamaxKernal() 92 … const half *b2, const half *eps, const double *gradient, double *variable, double *m, in ApplyAdamaxKernal() 105 … const float *b2, const float *eps, const double *gradient, double *variable, in ApplyAdamaxKernal() 117 … const float *b2, const float *eps, const float *gradient, half *variable, half *m, in ApplyAdamaxKernal() 130 … const float *b2, const float *eps, const float *gradient, double *variable, double *m, in ApplyAdamaxKernal() 142 … const half *b2, const half *eps, const half *gradient, double *variable, double *m, in ApplyAdamaxKernal() 155 … const half *b2, const half *eps, const half *gradient, float *variable, float *m, in ApplyAdamaxKernal() [all …]
|
| D | adam_impl.cu | 31 __global__ void ApplyAdamKernel(const size_t size, const int64_t batch_size, const T *gradient, con… in ApplyAdamKernel() 51 __global__ void AdamWeightDecayKernel(const size_t size, const float *gradient, const float *learni… in AdamWeightDecayKernel() 65 __global__ void AdamWeightDecayKernel(const size_t size, const half *gradient, const float *learnin… in AdamWeightDecayKernel() 79 __global__ void AdamWeightDecayKernel(const size_t size, const half *gradient, const float *learnin… in AdamWeightDecayKernel() 97 cudaError_t ApplyAdam(const size_t size, const int64_t batch_size, const T *gradient, const T *beta… in ApplyAdam() 106 cudaError_t AdamWeightDecayOp(const size_t size, const S *gradient, const float *learning_rate, con… in AdamWeightDecayOp()
|
| D | momentum_impl.cu | 198 … const G *gradient, const S *momentum, bool use_nesterov, cudaStream_t cuda_stream) { in MomentumUpdateVariable() 211 … const S *learning_rate, const G *gradient, const S *momentum, in FusedWeightDecayScaleMomentum() 220 const G *gradient, const S *momentum, cudaStream_t cuda_stream) { in FusedScaleMomentum() 228 const S *learning_rate, const G *gradient, const S *momentum, in FusedWeightDecayMomentum() 238 … T **accumulation, S **learning_rate, G **gradient, S **momentum) { in CombineFusedMomentumScaleKernel() 249 … T **variable, T **accumulation, S **learning_rate, G **gradient, S **momentum, in CombineFusedScaleMomentum() 263 … S **learning_rate, G **gradient, S **momentum) { in CombineFusedMomentumWeightDecayScaleKernel() 276 S **learning_rate, G **gradient, S **momentum, in CombineFusedWeightDecayScaleMomentum()
|
| D | sparse_apply_centered_rms_prop_impl.cu | 38 … const T *gradient, const S *indices, T *variable, T *mean_grad, in SparseApplyCenteredRMSPropUpdate() 60 … double *momentum, const double *gradient, const S *indices, in SparseApplyCenteredRMSPropUpdate() 83 … const half *gradient, const S *indices, half *variable, in SparseApplyCenteredRMSPropUpdate() 112 … T *learning_rate, T *decay_rate, T *epsilon, T *momentum, const T *gradient, in CalSparseApplyCenteredRMSProp()
|
| D | adam_weight_decay_impl.cu | 23 T *param, T *gradient) { in AdamWeightDecayKernel() 41 T *gradient, cudaStream_t stream) { in AdamWeightDecay()
|
| D | sparse_apply_adagrad_impl.cu | 32 … const bool update_slots, const T *gradient, const S *indices, T *variable, in SparseApplyAdagradUpdate() 52 … const bool update_slots, const T *gradient, const S *indices, T *variable, in CalSparseApplyAdagrad()
|
| D | sparse_apply_adagrad_v2_impl.cu | 32 … const float epsilon, const bool update_slots, const T *gradient, in SparseApplyAdagradV2Update() 53 … const float epsilon, const bool update_slots, const T *gradient, const S *indices, in CalSparseApplyAdagradV2()
|
| D | sparse_apply_r_m_s_prop_impl.cu | 39 … const T *gradient, const S *indices, T *variable, T *ms, T *mom) { in SparseApplyRMSPropUpdate() 56 … const float momentum, const float epsilon, const T *learning_rate, const T *gradient, in CalSparseApplyRMSProp()
|
| /third_party/mindspore/mindspore-src/source/mindspore/python/mindspore/nn/optim/ |
| D | adam.py | 45 beta2_power, beta1, beta2, eps, lr, gradient, params, m, v, argument 90 … beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, params, m, v, argument 130 … beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, params, moment1, argument 147 beta1, beta2, eps, lr, gradient, params, m, v): argument 185 … beta2_power, beta1, beta2, eps, lr, gradient, params, m, v): argument 217 … beta2_power, beta1, beta2, eps, lr, gradient, params, moment1, moment2): argument 227 def _update_run_op(beta1, beta2, eps, lr, weight_decay, param, m, v, gradient, decay_flag, optim_fi… argument 285 beta2_power, beta1, beta2, eps, lr, gradient, param, m, v, argument 344 … beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, param, moment1, moment2, argument 362 gradient, param, m, v): argument [all …]
|
| D | ftrl.py | 32 gradient, weight, moment, cache_enable, argument 84 gradient, weight, moment, cache_enable, argument 103 gradient, weight, moment, cache_enable, argument 117 gradient, weight, moment, cache_enable): argument 129 gradient, weight, moment, cache_enable): argument 140 gradient, weight, moment, cache_enable): argument
|
| D | momentum.py | 33 def _tensor_run_opt_ext(opt, momentum, learning_rate, gradient, weight, moment): argument 40 def _tensor_run_opt_ext_dist(opt, momentum, learning_rate, gradient, weight, moment, argument
|
| D | lazyadam.py | 39 … beta1, beta2, eps, lr, gradient, params, m, v, ps_parameter, cache_enable, argument 92 beta2_power, beta1, beta2, eps, lr, gradient, params, m, v, argument 133 … beta1, beta2, eps, lr, gradient, params, moment1, moment2, ps_parameter, cache_enable, argument 154 beta1, beta2, eps, lr, gradient, params, m, v, ps_parameter, cache_enable): argument 202 … beta2_power, beta1, beta2, eps, lr, gradient, params, m, v, ps_parameter, argument 235 … beta1, beta2, eps, lr, gradient, params, moment1, moment2, ps_parameter, cache_enable): argument
|
| /third_party/mindspore/mindspore-src/source/tests/st/ops/graph_kernel/ |
| D | test_fused_adam.py | 47 …def construct(self, beta1, beta2, one_sub_beta_1, one_sub_beta_2, gradient, eps, weight_decay_tens… argument 91 …def construct(self, beta1, beta2, one_sub_beta_1, one_sub_beta_2, gradient, eps, weight_decay_tens… argument 119 def CalFusedAdam(beta1, beta2, one_sub_beta_1, one_sub_beta_2, gradient, eps, weight_decay_tensor, … argument
|
| /third_party/mindspore/mindspore-src/source/tests/st/dyn_shape_dev/ |
| D | test_adam_weight_decay.py | 34 def construct(self, param, m, v, lr, beta1, beta2, eps, weight_decay, gradient): argument 41 def adam_weight_decay_forward_func(param, m, v, lr, beta1, beta2, eps, weight_decay, gradient): argument
|
| /third_party/mindspore/mindspore-src/source/mindspore/lite/src/litert/kernel/cpu/fp32_grad/ |
| D | sgd.cc | 33 int DoSgd(float *weight, float *accumulate, float *gradient, float learning_rate, float dampening, … in DoSgd() 60 int DoSgdInit(float *weight, float *accumulate, float *gradient, float learning_rate, float moment,… in DoSgdInit() 92 auto gradient = reinterpret_cast<float *>(in_tensors_.at(1)->MutableData()); in DoExecute() local 116 auto gradient = reinterpret_cast<float *>(in_tensors_.at(1)->MutableData()); in ExecuteInit() local
|
| /third_party/mindspore/mindspore-src/source/tests/st/ops/ascend/ |
| D | test_adam_weight_decay.py | 32 def construct(self, param, m, v, lr, beta1, beta2, eps, weight_decay, gradient): argument 45 def construct(self, param, m, v, lr, beta1, beta2, eps, weight_decay, gradient): argument
|
| /third_party/mindspore/mindspore-src/source/tests/st/ops/ |
| D | test_lamb_op.py | 45 def __init__(self, param, m, v, gradient): argument 108 def __init__(self, param, m, v, gradient): argument 136 def __init__(self, param, m, v, gradient): argument
|
| /third_party/skia/m133/gm/ |
| D | clipshader.cpp | 51 auto gradient = SkGradientShader::MakeRadial( variable
|
| /third_party/skia/tests/ |
| D | ShaderImageFilterTest.cpp | 36 sk_sp<SkShader> gradient = SkGradientShader::MakeRadial( in test_unscaled() local 84 sk_sp<SkShader> gradient = SkGradientShader::MakeRadial( in test_scaled() local
|