Searched refs:summed_grad (Results 1 – 6 of 6) sorted by relevance
50 std::vector<float> summed_grad(12); in TEST_F() local54 SparseGradient<int> unique_grad({summed_grad.data(), unique_indices.data(), 6}); in TEST_F()103 std::vector<float> summed_grad(12); in TEST_F() local106 SparseGradient<int> unique_grad({summed_grad.data(), unique_indices.data(), 6}); in TEST_F()
50 std::vector<float> summed_grad(12); in TEST_F() local54 SparseGradient unique_grad({summed_grad.data(), unique_indices.data(), 6}); in TEST_F()103 std::vector<float> summed_grad(12); in TEST_F() local106 SparseGradient unique_grad({summed_grad.data(), unique_indices.data(), 6}); in TEST_F()
49 auto summed_grad = unique_sparse_grad.value_[k]; in ComputeLazyAdam() local50 m[j] = beta1 * m[j] + (1 - beta1) * summed_grad; in ComputeLazyAdam()51 v[j] = beta2 * v[j] + (1 - beta2) * summed_grad * summed_grad; in ComputeLazyAdam()53 var[j] -= lr * (m[j] * beta1 + (1 - beta1) * summed_grad) / (std::sqrt(v[j]) + epsilon); in ComputeLazyAdam()
48 auto summed_grad = unique_sparse_grad.value_[k]; in ComputeFtrl() local49 auto accum_new = accum[j] + summed_grad * summed_grad; in ComputeFtrl()53 linear[j] += summed_grad - (y - std::sqrt(accum[j])) / lr * var[j]; in ComputeFtrl()56 linear[j] += summed_grad - (y - std::pow(accum[j], -lr_power)) / lr * var[j]; in ComputeFtrl()
47 auto summed_grad = unique_sparse_grad.value_[k]; in ComputeAdam() local48 m[j] += (1 - beta1) * summed_grad; in ComputeAdam()49 v[j] += (1 - beta2) * summed_grad * summed_grad; in ComputeAdam()51 m_t[j] = m[j] * beta1 + (1 - beta1) * summed_grad; in ComputeAdam()
46 auto summed_grad = unique_sparse_grad.value_[k]; in ComputeProximalAdagrad() local47 accum[j] += summed_grad * summed_grad; in ComputeProximalAdagrad()50 prox_v -= summed_grad * learning_rate; in ComputeProximalAdagrad()