/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/ps/ |
D | sparse_apply_adam_ps_kernel.cc | 36 std::vector<size_t> &var_shape = *(shape_vec[0]); in InitKernel() local 42 Shard(&var_shape, 0); in InitKernel() 45 if (var_shape.empty()) { in InitKernel() 48 if (!IsSameShape(var_shape, m_shape)) { in InitKernel() 51 if (!IsSameShape(var_shape, v_shape)) { in InitKernel() 54 if (var_shape.size() != grad_shape.size()) { in InitKernel() 57 var_first_dim_size_ = var_shape[0]; in InitKernel() 58 for (size_t i = 1; i < var_shape.size(); ++i) { in InitKernel() 59 if (var_shape[i] != grad_shape[i]) { in InitKernel() 62 var_outer_dim_size_ *= var_shape[i]; in InitKernel()
|
D | sparse_apply_lazy_adam_ps_kernel.cc | 37 std::vector<size_t> &var_shape = *(shape_vec[0]); in InitKernel() local 43 Shard(&var_shape, 0); in InitKernel() 47 if (var_shape.empty()) { in InitKernel() 50 if (var_shape.size() != grad_shape.size()) { in InitKernel() 53 if (!IsSameShape(var_shape, m_shape)) { in InitKernel() 56 if (!IsSameShape(var_shape, v_shape)) { in InitKernel() 59 var_first_dim_size_ = var_shape[0]; in InitKernel() 60 for (size_t i = 1; i < var_shape.size(); ++i) { in InitKernel() 61 if (var_shape[i] != grad_shape[i]) { in InitKernel() 64 var_outer_dim_size_ *= var_shape[i]; in InitKernel()
|
D | sparse_apply_ftrl_ps_kernel.cc | 34 std::vector<size_t> var_shape = *(shape_vec[0]); in InitKernel() local 40 Shard(&var_shape, 0); in InitKernel() 44 if (var_shape.size() != grad_shape.size()) { in InitKernel() 47 if (var_shape.empty()) { in InitKernel() 50 var_first_dim_size_ = var_shape[0]; in InitKernel() 53 for (size_t i = 1; i < var_shape.size(); ++i) { in InitKernel() 54 if (var_shape[i] != grad_shape[i]) { in InitKernel() 57 var_outer_dim_size_ *= var_shape[i]; in InitKernel()
|
D | pull_kernel.h | 58 auto var_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); in Init() local 59 for (size_t i = 0; i < var_shape.size(); i++) { in Init() 60 var_size_ *= var_shape[i]; in Init()
|
/third_party/mindspore/mindspore/ops/operations/ |
D | inner_ops.py | 262 def infer_shape(self, grad_shape, v_shape, m_shape, var_shape, beta1_shape, sub1_shape, argument 264 validator.check("var_shape", var_shape, "m_shape", m_shape, Rel.EQ, self.name) 265 validator.check("var_shape", var_shape, "v_shape", v_shape, Rel.EQ, self.name) 266 validator.check("var_shape", var_shape, "grad_shape", grad_shape, Rel.EQ, self.name) 324 def infer_shape(self, w_norm_shape, g_norm_shape, lr_shape, update_shape, var_shape): argument 325 validator.check("var_shape", var_shape, "update_shape", update_shape, Rel.EQ, self.name) 326 return var_shape 543 def infer_shape(self, var_shape, m_shape, v_shape, lr_shape, beta1_shape, beta2_shape, argument 545 validator.check("var_shape", var_shape, "m_shape", m_shape, Rel.EQ, self.name) 546 validator.check("var_shape", var_shape, "v_shape", v_shape, Rel.EQ, self.name) [all …]
|
D | nn_ops.py | 3128 …def infer_shape(self, var_shape, mean_square_shape, moment_shape, learning_rate_shape, grad_shape,… argument 3130 … validator.check("var_shape", var_shape, "mean_square_shape", mean_square_shape, Rel.EQ, self.name) 3131 validator.check("var_shape", var_shape, "moment_shape", moment_shape, Rel.EQ, self.name) 3132 validator.check("var_shape", var_shape, "grad_shape", grad_shape, Rel.EQ, self.name) 3133 return var_shape 3245 … def infer_shape(self, var_shape, mean_gradient_shape, mean_square_shape, moment_shape, grad_shape, argument 3247 …validator.check("var_shape", var_shape, "mean_gradient_shape", mean_gradient_shape, Rel.EQ, self.n… 3248 … validator.check("var_shape", var_shape, "mean_square_shape", mean_square_shape, Rel.EQ, self.name) 3249 validator.check("var_shape", var_shape, "moment_shape", moment_shape, Rel.EQ, self.name) 3250 validator.check("var_shape", var_shape, "grad_shape", grad_shape, Rel.EQ, self.name) [all …]
|
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/ |
D | sparse_apply_ftrl_cpu_kernel.cc | 88 std::vector<size_t> var_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); in InitKernel() local 93 if (var_shape.empty()) { in InitKernel() 96 if (!IsSameShape(var_shape, accum_shape)) { in InitKernel() 99 if (!IsSameShape(var_shape, linear_shape)) { in InitKernel() 102 if (var_shape.size() != grad_shape.size()) { in InitKernel() 106 var_first_dim_size_ = var_shape[0]; in InitKernel() 107 for (size_t i = 1; i < var_shape.size(); ++i) { in InitKernel() 108 if (var_shape[i] != grad_shape[i]) { in InitKernel() 111 var_outer_dim_size_ *= var_shape[i]; in InitKernel()
|
D | sparse_apply_lazy_adam_cpu_kernel.cc | 84 std::vector<size_t> var_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); in InitKernel() local 89 if (var_shape.empty()) { in InitKernel() 92 if (!IsSameShape(var_shape, m_shape)) { in InitKernel() 95 if (!IsSameShape(var_shape, v_shape)) { in InitKernel() 98 if (var_shape.size() != grad_shape.size()) { in InitKernel() 102 var_first_dim_size_ = var_shape[0]; in InitKernel() 103 for (size_t i = 1; i < var_shape.size(); ++i) { in InitKernel() 104 if (var_shape[i] != grad_shape[i]) { in InitKernel() 107 var_outer_dim_size_ *= var_shape[i]; in InitKernel()
|
D | sparse_apply_proximal_adagrad_cpu_kernel.cc | 84 std::vector<size_t> var_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); in InitKernel() local 91 if (var_shape.empty()) { in InitKernel() 94 if (!IsSameShape(var_shape, accum_shape)) { in InitKernel() 97 if (var_shape.size() != grad_shape.size()) { in InitKernel() 100 var_first_dim_size_ = var_shape[0]; in InitKernel() 101 for (size_t i = 1; i < var_shape.size(); ++i) { in InitKernel() 102 if (var_shape[i] != grad_shape[i]) { in InitKernel() 105 var_outer_dim_size_ *= var_shape[i]; in InitKernel()
|
D | sparse_apply_adam_cpu_kernel.cc | 105 std::vector<size_t> var_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); in InitKernel() local 110 if (var_shape.empty()) { in InitKernel() 113 if (!IsSameShape(var_shape, m_shape)) { in InitKernel() 116 if (!IsSameShape(var_shape, v_shape)) { in InitKernel() 119 if (var_shape.size() != grad_shape.size()) { in InitKernel() 122 var_first_dim_size_ = var_shape[0]; in InitKernel() 123 for (size_t i = 1; i < var_shape.size(); ++i) { in InitKernel() 124 if (var_shape[i] != grad_shape[i]) { in InitKernel() 127 var_outer_dim_size_ *= var_shape[i]; in InitKernel()
|
D | fused_cast_adam_weight_decay_cpu_kernel.cc | 97 std::vector<size_t> var_shape = AnfAlgo::GetInputDeviceShape(kernel_node, VAR); in InitKernel() local 109 for (size_t i : var_shape) { in InitKernel()
|
/third_party/mindspore/mindspore/core/ops/ |
D | adam.cc | 31 …auto var_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex0]->GetShap… in AdamInfer() local 35 CheckAndConvertUtils::Check("var_shape", var_shape, kEqual, "m_shape", m_shape, prim_name); in AdamInfer() 36 CheckAndConvertUtils::Check("var_shape", var_shape, kEqual, "v_shape", v_shape, prim_name); in AdamInfer() 37 CheckAndConvertUtils::Check("var_shape", var_shape, kEqual, "grad_shape", grad_shape, prim_name); in AdamInfer() 48 auto output0 = std::make_shared<abstract::AbstractTensor>(infer_var_type, var_shape); in AdamInfer()
|
D | sparse_apply_r_m_s_prop.cc | 40 auto var_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(var_shape_ptr)[kShape]; in InferShape() local 56 CheckAndConvertUtils::Check(elem.first, elem.second, kEqual, "var shape", var_shape, prim_name); in InferShape() 63 …(void)CheckAndConvertUtils::CheckInteger("dimension of var", SizeToLong(var_shape.size()), kGreate… in InferShape() 66 …Utils::Check("indices shape", indices_shape[0], kEqual, "the first dimension of var", var_shape[0], in InferShape()
|
D | apply_adagrad_d_a.cc | 38 auto var_shape = input_args[0]->BuildShape(); in InferShape() local 56 …std::vector<abstract::BaseShapePtr>{var_shape, gradient_accumulator_shape, gradient_squared_accumu… in InferShape()
|
/third_party/mindspore/mindspore/ccsrc/ps/ |
D | worker.cc | 111 std::vector<int64_t> &var_shape = key_to_optim_shapes_[key][0]; in Push() local 112 int64_t first_dim_size = var_shape[0]; in Push() 113 …int64_t outer_dim_size = std::accumulate(var_shape.begin() + 1, var_shape.end(), 1, std::multiplie… in Push()
|