Home
last modified time | relevance | path

Searched refs:summed_grad (Results 1 – 6 of 6) sorted by relevance

/third_party/mindspore/tests/ut/cpp/kernel/cpu/
Dsparse_optimizer_cpu_kernel_test.cc50 std::vector<float> summed_grad(12); in TEST_F() local
54 SparseGradient<int> unique_grad({summed_grad.data(), unique_indices.data(), 6}); in TEST_F()
103 std::vector<float> summed_grad(12); in TEST_F() local
106 SparseGradient<int> unique_grad({summed_grad.data(), unique_indices.data(), 6}); in TEST_F()
/third_party/mindspore/mindspore/lite/test/ut/src/runtime/kernel/
Dcommon_utils_test.cc50 std::vector<float> summed_grad(12); in TEST_F() local
54 SparseGradient unique_grad({summed_grad.data(), unique_indices.data(), 6}); in TEST_F()
103 std::vector<float> summed_grad(12); in TEST_F() local
106 SparseGradient unique_grad({summed_grad.data(), unique_indices.data(), 6}); in TEST_F()
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/
Dsparse_apply_lazy_adam_cpu_kernel.cc49 auto summed_grad = unique_sparse_grad.value_[k]; in ComputeLazyAdam() local
50 m[j] = beta1 * m[j] + (1 - beta1) * summed_grad; in ComputeLazyAdam()
51 v[j] = beta2 * v[j] + (1 - beta2) * summed_grad * summed_grad; in ComputeLazyAdam()
53 var[j] -= lr * (m[j] * beta1 + (1 - beta1) * summed_grad) / (std::sqrt(v[j]) + epsilon); in ComputeLazyAdam()
Dsparse_apply_ftrl_cpu_kernel.cc48 auto summed_grad = unique_sparse_grad.value_[k]; in ComputeFtrl() local
49 auto accum_new = accum[j] + summed_grad * summed_grad; in ComputeFtrl()
53 linear[j] += summed_grad - (y - std::sqrt(accum[j])) / lr * var[j]; in ComputeFtrl()
56 linear[j] += summed_grad - (y - std::pow(accum[j], -lr_power)) / lr * var[j]; in ComputeFtrl()
Dsparse_apply_adam_cpu_kernel.cc47 auto summed_grad = unique_sparse_grad.value_[k]; in ComputeAdam() local
48 m[j] += (1 - beta1) * summed_grad; in ComputeAdam()
49 v[j] += (1 - beta2) * summed_grad * summed_grad; in ComputeAdam()
51 m_t[j] = m[j] * beta1 + (1 - beta1) * summed_grad; in ComputeAdam()
Dsparse_apply_proximal_adagrad_cpu_kernel.cc46 auto summed_grad = unique_sparse_grad.value_[k]; in ComputeProximalAdagrad() local
47 accum[j] += summed_grad * summed_grad; in ComputeProximalAdagrad()
50 prox_v -= summed_grad * learning_rate; in ComputeProximalAdagrad()