/third_party/mindspore/mindspore/ops/_op_impl/_custom_op/ |
D | _basic.py | 28 m_shape = shape_a[shape_len - 1] 31 m_shape = shape_a[shape_len - 2] 40 return m_shape, km_shape, n_shape, kn_shape 43 def _check_mn_shape(m_shape, n_shape, km_shape, kn_shape): argument 45 if m_shape == 1 and n_shape == 1: 52 if m_shape % cce.BLOCK_IN != 0 and m_shape != 1: 56 if m_shape != 1: 69 def _check_bias(shape_bias, shape_a, shape_b, m_shape, n_shape): argument 75 if (is_gevm or is_gemv) and shape_bias[0] != m_shape * n_shape: 80 if [i for i in shape_bias[-2:]] != [m_shape, n_shape]: [all …]
|
D | matmul_cube_impl.py | 162 m_shape = shape_a[len(shape_a) - 2] 178 if not trans_a and m_shape == 1: 186 m_shape = shape_a[len(shape_a) - 2] 193 shape_a_temp = (m_shape // block_reduce, km_shape // block_in, block_reduce, block_in) 195 shape_a_temp = (m_shape // block_in, km_shape // block_reduce, block_in, block_reduce)
|
D | matmul_cube_fracz_left_cast_impl.py | 54 def _get_block(trans_a, trans_b, m_shape, n_shape, km_shape, kn_shape): argument 61 if not trans_a and m_shape == 1: 153 m_shape = shape_aa[len(shape_aa) - 2] 161 block_in0, block_out0 = _get_block(trans_a, trans_b, m_shape, n_shape, km_shape, kn_shape) 164 shape_aa_tmp = (m_shape // block_reduce, km_shape // block_in0, block_reduce, block_in0) 166 shape_aa_tmp = (m_shape // block_in0, km_shape // block_reduce, block_in0, block_reduce)
|
D | matmul_cube_dense_left_impl.py | 111 m_shape = shape_a[len(shape_a) - 2] 125 if not trans_a and m_shape == 1: 135 shape_a_temp = (m_shape // block_reduce, km_shape // block_in, block_reduce, block_in) 137 shape_a_temp = (m_shape // block_in, km_shape // block_reduce, block_in, block_reduce)
|
/third_party/mindspore/mindspore/ops/operations/ |
D | inner_ops.py | 262 def infer_shape(self, grad_shape, v_shape, m_shape, var_shape, beta1_shape, sub1_shape, argument 264 validator.check("var_shape", var_shape, "m_shape", m_shape, Rel.EQ, self.name) 267 return m_shape, v_shape, m_shape 441 def infer_shape(self, d_shape, s_shape, v_shape, a_shape, l_shape, g_shape, m_shape): argument 543 def infer_shape(self, var_shape, m_shape, v_shape, lr_shape, beta1_shape, beta2_shape, argument 545 validator.check("var_shape", var_shape, "m_shape", m_shape, Rel.EQ, self.name) 548 return var_shape, m_shape, v_shape
|
D | nn_ops.py | 2667 def infer_shape(self, v_shape, a_shape, l_shape, g_shape, m_shape): argument 4595 … def infer_shape(self, var_shape, m_shape, v_shape, beta1_power_shape, beta2_power_shape, lr_shape, argument 4597 validator.check("var_shape", var_shape, "m_shape", m_shape, Rel.EQ, self.name) 4600 return var_shape, m_shape, v_shape 4702 def infer_shape(self, var_shape, m_shape, v_shape, lr_shape, beta1_shape, beta2_shape, argument 4704 validator.check("var_shape", var_shape, "m_shape", m_shape, Rel.EQ, self.name) 4707 return var_shape, m_shape, v_shape 4815 def infer_shape(self, m_shape, v_shape, beta1_power_shape, beta2_power_shape, lr_shape, argument 4817 validator.check("grad_shape", grad_shape, "m_shape", m_shape, Rel.EQ, self.name) 4955 … def infer_shape(self, var_shape, m_shape, v_shape, beta1_power_shape, beta2_power_shape, lr_shape, argument [all …]
|
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/gpu/nn/ |
D | adam_weight_decay_gpu_kernel.h | 85 auto m_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); in Init() local 88 …is_null_input_ = CHECK_NULL_INPUT(variable_shape) || CHECK_NULL_INPUT(m_shape) || CHECK_NULL_INPUT… in Init() 99 for (size_t i = 0; i < m_shape.size(); i++) { in Init() 100 m_size_ *= m_shape[i]; in Init()
|
D | adam_gpu_kernel.h | 88 auto m_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); in Init() local 91 …is_null_input_ = CHECK_NULL_INPUT(variable_shape) || CHECK_NULL_INPUT(m_shape) || CHECK_NULL_INPUT… in Init() 102 for (size_t i = 0; i < m_shape.size(); i++) { in Init() 103 m_size_ *= m_shape[i]; in Init()
|
/third_party/mindspore/mindspore/core/ops/ |
D | adam.cc | 32 …auto m_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[kInputIndex1]->GetShapeT… in AdamInfer() local 35 CheckAndConvertUtils::Check("var_shape", var_shape, kEqual, "m_shape", m_shape, prim_name); in AdamInfer() 49 auto output1 = std::make_shared<abstract::AbstractTensor>(infer_m_type, m_shape); in AdamInfer()
|
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/ps/ |
D | sparse_apply_adam_ps_kernel.cc | 37 std::vector<size_t> &m_shape = *(shape_vec[1]); in InitKernel() local 43 Shard(&m_shape, 0); in InitKernel() 48 if (!IsSameShape(var_shape, m_shape)) { in InitKernel()
|
D | sparse_apply_lazy_adam_ps_kernel.cc | 38 std::vector<size_t> &m_shape = *(shape_vec[1]); in InitKernel() local 44 Shard(&m_shape, 0); in InitKernel() 53 if (!IsSameShape(var_shape, m_shape)) { in InitKernel()
|
/third_party/boost/boost/math/distributions/ |
D | gamma.hpp | 78 : m_shape(l_shape), m_scale(l_scale) in gamma_distribution() 86 return m_shape; in shape() 97 RealType m_shape; // distribution shape member in boost::math::gamma_distribution
|
D | pareto.hpp | 140 : m_scale(l_scale), m_shape(l_shape) in pareto_distribution() 153 return m_shape; in shape() 158 RealType m_shape; // distribution shape (k) or alpha member in boost::math::pareto_distribution
|
D | inverse_gamma.hpp | 95 : m_shape(l_shape), m_scale(l_scale) in inverse_gamma_distribution() 105 return m_shape; in shape() 116 RealType m_shape; // distribution shape member in boost::math::inverse_gamma_distribution
|
D | weibull.hpp | 77 : m_shape(l_shape), m_scale(l_scale) in weibull_distribution() 85 return m_shape; in shape() 96 RealType m_shape; // distribution shape member in boost::math::weibull_distribution
|
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/ |
D | adam_delta_cpu_kernel.cc | 64 std::vector<size_t> m_shape = AnfAlgo::GetInputDeviceShape(kernel_node, 0); in InitKernel() local 68 if (!IsSameShape(delta_shape, m_shape)) { in InitKernel()
|
D | sparse_apply_lazy_adam_cpu_kernel.cc | 85 std::vector<size_t> m_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); in InitKernel() local 92 if (!IsSameShape(var_shape, m_shape)) { in InitKernel()
|
D | sparse_apply_adam_cpu_kernel.cc | 106 std::vector<size_t> m_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); in InitKernel() local 113 if (!IsSameShape(var_shape, m_shape)) { in InitKernel()
|