/third_party/mindspore/mindspore/ops/_op_impl/_custom_op/ |
D | matmul_cube_impl.py | 82 shape_a, shape_b, trans_a, trans_b = _get_shape_a_b(input_x1, input_x2, trans_a, trans_b) 93 matmul_vector_cce(shape_a, shape_b, src_dtype, trans_a, trans_b, shape_bias, kernel_name) 95 _shape_check(shape_a, shape_b, shape_bias, src_dtype, trans_a, trans_b) 97 block_in, block_out = _get_block(shape_a, shape_b, trans_a, trans_b) 99 … block_in, block_out, shape_a, shape_b) 130 shape_b = input_x2.get("ori_shape") 135 if shape_b and len(shape_b) < 2: 136 shape_b = input_x2.get("shape") 139 shape_b = list(shape_b) 143 shape_b = _get_input_shape(shape_b) [all …]
|
D | matmul_cube_dense_left_impl.py | 53 shape_b = input_x2.get("ori_shape") 57 n, c, h, w = shape_b 62 shape_b = [n, c1 * h * w * c0] 72 shape_b = [c1 * h * w * c0, c1 * h * w * c0] 75 shape_a = [shape_b[0], shape_b[0]] 78 shape_b = [shape_a[1], shape_a[1]] 81 shape_b = list(shape_b) 84 shape_b = _get_input_shape(shape_b) 87 util.check_shape_rule(shape_b) 89 util.check_shape_size(shape_b, SHAPE_SIZE_LIMIT) [all …]
|
D | _basic.py | 24 def _get_km_kn_shape(shape_a, shape_b, trans_a, trans_b): argument 35 kn_shape = shape_b[shape_len - 1] 36 n_shape = shape_b[shape_len - 2] 38 kn_shape = shape_b[shape_len - 2] 39 n_shape = shape_b[shape_len - 1] 69 def _check_bias(shape_bias, shape_a, shape_b, m_shape, n_shape): argument 72 is_gemv = True if shape_b[-2] == 1 or shape_b[-1] == 1 else False 86 def _shape_check(shape_a, shape_b, shape_bias, src_dtype, trans_a, trans_b): argument 115 if shape_len != len(shape_b): 122 m_shape, km_shape, n_shape, kn_shape = _get_km_kn_shape(shape_a, shape_b, trans_a, trans_b) [all …]
|
/third_party/mindspore/mindspore/ccsrc/backend/optimizer/graph_kernel/ |
D | insert_pad.cc | 38 auto TransANotTransB = [](const vec &shape_a, const vec &shape_b, vec *pad_shape_a, vec *pad_shape_… in __anonfe5e5bc60302() argument 43 N = shape_b[size - 1]; in __anonfe5e5bc60302() 53 auto TransATransB = [](const vec &shape_a, const vec &shape_b, vec *pad_shape_a, vec *pad_shape_b) { in __anonfe5e5bc60402() argument 58 N = shape_b[size - 2]; in __anonfe5e5bc60402() 68 auto NotTransATransB = [](const vec &shape_a, const vec &shape_b, vec *pad_shape_a, vec *pad_shape_… in __anonfe5e5bc60502() argument 73 N = shape_b[size - 2]; in __anonfe5e5bc60502() 83 auto NotTransANotTransB = [](const vec &shape_a, const vec &shape_b, vec *pad_shape_a, vec *pad_sha… in __anonfe5e5bc60602() argument 88 N = shape_b[size - 1]; in __anonfe5e5bc60602() 115 auto shape_b = AnfAlgo::GetInputDeviceShape(matmul, 1); in NeedPad() local 130 std::tie(K, M, N, pad_K, pad_M, pad_N) = f(shape_a, shape_b, pad_shape_a, pad_shape_b); in NeedPad() [all …]
|
/third_party/mindspore/mindspore/_extends/graph_kernel/expanders/ |
D | matmul.py | 34 self.shape_b = self.inputs[1]['shape'] 41 k_b = self.shape_b[-1] if self.transpose_b else self.shape_b[-2] 66 shape_b_trans = transpose(self.shape_b)
|
/third_party/mindspore/mindspore/nn/probability/distribution/_utils/ |
D | utils.py | 242 def raise_broadcast_error(shape_a, shape_b): argument
|
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/ |
D | common_utils.h | 88 bool IsSameShape(const std::vector<size_t> &shape_a, const std::vector<size_t> &shape_b);
|
D | common_utils.cc | 545 bool IsSameShape(const std::vector<size_t> &shape_a, const std::vector<size_t> &shape_b) { in IsSameShape() argument 546 if (shape_a.size() != shape_b.size()) { in IsSameShape() 550 if (shape_a[i] != shape_b[i]) { in IsSameShape()
|
/third_party/mindspore/mindspore/numpy/ |
D | math_ops.py | 2876 shape_b = F.shape(b) 2879 kron_shape = _seq_prod(shape_a, shape_b) 2881 a = F.tile(a, _add_unit_axes(shape_b, 2*ndim, False)) 2947 shape_b = F.shape(b) 2951 b_has_z = shape_b[-1] == 3 2952 shape_out = _infer_out_shape(shape_a[:-1], shape_b[:-1]) 2968 b_slice_size = shape_b[:-1] + (1,)
|