/third_party/mindspore/tests/ut/python/parallel/ |
D | test_gathernd_further.py | 48 def __init__(self, w1_shape, indices_shape, strategy1=None, strategy2=None, strategy3=None): argument 52 self.indices = Tensor(np.ones(indices_shape), dtype=ms.int32) 64 def __init__(self, w1_shape, indices_shape, strategy1=None, strategy2=None, strategy3=None): argument 68 self.indices = Tensor(np.ones(indices_shape), dtype=ms.int32) 79 def __init__(self, w1_shape, indices_shape, strategy1=None, strategy2=None, strategy3=None): argument 83 self.indices = Tensor(np.ones(indices_shape), dtype=ms.int32) 114 indices_shape = [8, 4, 2, 1] 118 net = Net(w1_shape, indices_shape, strategy1, strategy2, strategy3) 126 indices_shape = [8, 4, 2, 2] 130 net = Net(w1_shape, indices_shape, strategy1, strategy2, strategy3) [all …]
|
/third_party/mindspore/mindspore/lite/test/ut/src/runtime/kernel/opencl/ |
D | gather_tests.cc | 33 std::vector<int> indices_shape = {5}; in TEST_F() local 44 {indices_shape, indices, CONST_TENSOR, kNumberTypeInt32}, in TEST_F() 52 std::vector<int> indices_shape = {1}; in TEST_F() local 63 {indices_shape, indices, CONST_TENSOR, kNumberTypeInt32}, in TEST_F() 71 std::vector<int> indices_shape = {1}; in TEST_F() local 82 {indices_shape, indices, VAR, kNumberTypeInt32}, in TEST_F() 90 std::vector<int> indices_shape = {2}; in TEST_F() local 112 {indices_shape, indices_datas[i], CONST_TENSOR, data_types[i]}, in TEST_F() 121 std::vector<int> indices_shape = {2}; in TEST_F() local 143 {indices_shape, indices_datas[i], VAR, data_types[i]}, in TEST_F() [all …]
|
/third_party/mindspore/mindspore/lite/src/runtime/kernel/arm/fp32/ |
D | scatter_nd_fp32.cc | 72 auto indices_shape = indices->shape(); in ReSize() local 79 for (size_t i = 0; i < indices_shape.size() - 1; i++) { in ReSize() 80 if (update_shape.at(i) != indices_shape.at(i)) { in ReSize() 85 for (size_t i = 0; i < shape->ElementsNum() - (indices_shape.size() - 1); i++) { in ReSize() 86 if (update_shape.at(i + indices_shape.size() - 1) != shape_data[i + indices_shape.size() - 1]) { in ReSize() 87 MS_LOG(ERROR) << "Value of " << i + indices_shape.size() - 1 in ReSize() 95 for (int i = indices_shape.size() - 1; i < update_rank; i++) { in ReSize() 108 num_unit_ *= update_shape.at(indices_shape.size() - 2); in ReSize() 109 for (int i = indices_shape.size() - 3; i >= 0; i--) { in ReSize()
|
D | gatherNd_fp32.cc | 54 auto indices_shape = indices_tensor->shape(); in ReSize() local 55 int indices_rank = indices_shape.size(); in ReSize() 58 count_ *= indices_shape[i]; in ReSize() 79 auto indices_shape = indices_tensor->shape(); in InitOffset() local 81 int indices_rank = indices_shape.size(); in InitOffset() 83 int idx_lastshape = indices_shape[indices_rank - 1]; in InitOffset()
|
D | scatter_nd_update_fp32.cc | 70 auto indices_shape = indices->shape(); in ReSize() local 73 for (int i = indices_shape.size() - 1; i < update_rank; i++) { in ReSize() 87 num_unit_ *= update_shape.at(indices_shape.size() - 2); in ReSize() 88 for (int i = indices_shape.size() - 3; i >= 0; i--) { in ReSize()
|
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/gpu/arrays/ |
D | scatter_nd_functor_gpu_kernel.h | 89 auto indices_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); in Init() local 91 auto index_depth = indices_shape.back(); in Init() 95 if (indices_shape.size() < 2) { in Init() 98 if (updates_shape.size() != indices_shape.size() - 1 + input_shape.size() - index_depth) { in Init() 101 for (size_t i = 0; i < indices_shape.size() - 1; ++i) { in Init() 102 if (updates_shape[i] != indices_shape[i]) { in Init() 108 for (size_t i = 0; i < indices_shape.size(); i++) { in Init() 109 indices_size_ *= indices_shape[i]; in Init() 122 for (size_t i = indices_shape.size() - 1; i < updates_shape.size(); ++i) { in Init() 126 num_units_ *= updates_shape[indices_shape.size() - 2]; in Init() [all …]
|
/third_party/mindspore/mindspore/lite/test/ut/src/runtime/kernel/arm/fp32/ |
D | one_hot_fp32_test.cc | 29 …void Prepare(const std::vector<int> &indices_shape, int *indices_data, int *depth, float *off_on_v… 57 void TestOneHotFp32::Prepare(const std::vector<int> &indices_shape, int *indices_data, int *depth, … in Prepare() argument 61 indices_tensor_.set_shape(indices_shape); in Prepare() 84 std::vector<int> indices_shape{3, 3}; in TEST_F() local 91 Prepare(indices_shape, indices, depth, off_on, -1, output_shape, out_data, 2); in TEST_F() 103 std::vector<int> indices_shape{3, 3}; in TEST_F() local 110 Prepare(indices_shape, indices, depth, off_on, 1, output_shape, out_data, 2); in TEST_F() 122 std::vector<int> indices_shape{3, 3}; in TEST_F() local 129 Prepare(indices_shape, indices, depth, off_on, 0, output_shape, out_data, 2); in TEST_F()
|
/third_party/mindspore/mindspore/ops/operations/ |
D | sparse_ops.py | 68 indices_shape = indices['shape'] 69 if len(indices_shape) != 2: 73 if len(values_shape) != 1 or values_shape[0] != indices_shape[0]: 82 if len(sparse_shape_v) != indices_shape[1]: 158 indices_shape = indices['shape'] 159 if len(indices_shape) != 2 or indices_shape[1] != 2: 163 if len(values_shape) != 1 or values_shape[0] != indices_shape[0]:
|
D | array_ops.py | 52 def _check_scatter_shape(self, x_shape, indices_shape, updates_shape, prim_name): argument 53 if indices_shape != [-1] and updates_shape and updates_shape != indices_shape + x_shape[1:]: 65 def infer_shape(self, x_shape, indices_shape, updates_shape): argument 66 self._check_scatter_shape(x_shape, indices_shape, updates_shape, self.name) 86 def _check_scatter_shape(self, x_shape, indices_shape, updates_shape, prim_name): argument 92 if np.any(np.array(indices_shape) == -1) or np.any(np.array(updates_shape) == -1): 94 … elif indices_shape != [-1] and updates_shape and updates_shape != indices_shape + x_shape[1:]: 106 def check_shape(self, x_shape, indices_shape, updates_shape): argument 107 self._check_scatter_shape(x_shape, indices_shape, updates_shape, self.name) 120 def _check_scatter_shape(self, x_shape, indices_shape, updates_shape, prim_name): argument [all …]
|
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/ |
D | scatter_nd_cpu_kernel.cc | 60 auto indices_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); in InitKernel() local 62 auto indices_unit_rank = indices_shape.back(); in InitKernel() 66 if (indices_shape.size() < kMinIndiceRank) { in InitKernel() 69 if (updates_shape.size() != indices_shape.size() - 1 + shape.size() - indices_unit_rank) { in InitKernel() 72 for (size_t i = 0; i < indices_shape.size() - 1; ++i) { in InitKernel() 73 if (updates_shape[i] != indices_shape[i]) { in InitKernel() 78 for (size_t i = indices_shape.size() - 1; i < updates_shape.size(); ++i) { in InitKernel() 82 num_units_ *= updates_shape[indices_shape.size() - 2]; in InitKernel() 83 for (int i = SizeToInt(indices_shape.size()) - 3; i >= 0; i--) { in InitKernel()
|
D | scatter_nd_update_cpu_kernel.cc | 63 auto indices_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); in InitKernel() local 65 auto indices_unit_rank = indices_shape.back(); in InitKernel() 69 if (indices_shape.size() < kMinIndiceRank) { in InitKernel() 72 if (updates_shape.size() != indices_shape.size() - 1 + shape.size() - indices_unit_rank) { in InitKernel() 75 for (size_t i = 0; i < indices_shape.size() - 1; ++i) { in InitKernel() 76 if (updates_shape[i] != indices_shape[i]) { in InitKernel() 82 for (size_t i = indices_shape.size() - 1; i < updates_shape.size(); ++i) { in InitKernel() 86 num_units_ *= updates_shape[indices_shape.size() - 2]; in InitKernel() 87 for (int i = SizeToInt(indices_shape.size()) - 3; i >= 0; i--) { in InitKernel()
|
D | sparse_to_dense_cpu_kernal.cc | 33 auto indices_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); in InitKernel() local 34 if (indices_shape.size() != kIndicesShapeSize) { in InitKernel() 36 << indices_shape.size() << "-D"; in InitKernel() 39 if (values_shape.size() != 1 || values_shape[0] != indices_shape[0]) { in InitKernel()
|
D | sparse_tensor_dense_matmul_cpu_kernel.cc | 37 auto indices_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, INDICES); in InitKernel() local 38 if (indices_shape.size() != kIndicesSizeNum && indices_shape[1] != kIndices2rdDimNum) { in InitKernel() 42 << indices_shape; in InitKernel() 45 if (values_shape.size() != 1 || values_shape[0] != indices_shape[0]) { in InitKernel()
|
D | embedding_look_up_cpu_kernel.cc | 71 std::vector<size_t> indices_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); in InitKernel() local 72 for (const auto &shape : indices_shape) { in InitKernel() 100 std::vector<size_t> indices_shape = AnfAlgo::GetPrevNodeOutputInferShape(node, 1); in LaunchKernel() local 101 for (const auto &shape : indices_shape) { in LaunchKernel()
|
/third_party/mindspore/mindspore/core/ops/ |
D | gather_nd.cc | 37 …auto indices_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[… in InferShape() local 39 auto indices_rank = indices_shape.size(); in InferShape() 41 indices_shape[indices_rank - 1], prim_name); in InferShape() 44 output_shape.push_back(indices_shape[i]); in InferShape() 46 for (size_t i = LongToSize(indices_shape[indices_rank - 1]); i < input_rank; ++i) { in InferShape()
|
/third_party/mindspore/mindspore/lite/src/runtime/kernel/arm/int8/ |
D | gatherNd_int8.cc | 59 auto indices_shape = indices_tensor->shape(); in ReSize() local 60 int indices_rank = static_cast<size_t>(indices_shape.size()); in ReSize() 63 count_ *= indices_shape[i]; in ReSize() 87 auto indices_shape = indices_tensor->shape(); in InitOffset() local 88 int indices_rank = static_cast<size_t>(indices_shape.size()); in InitOffset() 95 int idx_lastshape = indices_shape.at(indices_rank - 1); in InitOffset()
|
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/ps/ |
D | embedding_look_up_proxy_kernel.cc | 33 auto indices_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); in InitKernel() local 55 (void)std::transform(indices_shape.begin(), indices_shape.end(), std::back_inserter(values), in InitKernel() 60 << ", indices_shape:" << indices_shape << ", output_shape:" << output_shape; in InitKernel() 64 …mindspore::ps::Worker::GetInstance().InitPSEmbeddingTable(key_, input_shape, indices_shape, output… in InitKernel()
|
D | sparse_apply_adam_ps_kernel.cc | 40 const std::vector<size_t> &indices_shape = *(shape_vec[10]); in InitKernel() local 64 if (indices_shape.size() != 1) { in InitKernel() 67 indices_size_ = indices_shape[0]; in InitKernel() 85 const std::vector<size_t> &indices_shape = shapes[0]; in ReInit() local 86 indices_size_ = indices_shape[0]; in ReInit()
|
D | sparse_apply_lazy_adam_ps_kernel.cc | 41 const std::vector<size_t> &indices_shape = *(shape_vec[10]); in InitKernel() local 66 if (indices_shape.size() != 1) { in InitKernel() 69 indices_size_ = indices_shape[0]; in InitKernel() 86 const std::vector<size_t> &indices_shape = shapes[0]; in ReInit() local 87 indices_size_ = indices_shape[0]; in ReInit()
|
D | sparse_apply_ftrl_ps_kernel.cc | 38 std::vector<size_t> indices_shape = *(shape_vec[4]); in InitKernel() local 59 if (indices_shape.size() != 1) { in InitKernel() 62 indices_size_ = indices_shape[0]; in InitKernel() 96 const std::vector<size_t> &indices_shape = shapes[0]; in ReInit() local 97 indices_size_ = indices_shape[0]; in ReInit()
|
D | embedding_look_up_ps_kernel.cc | 49 auto indices_shape = *(shape_vec[1]); in InitKernel() local 51 for (auto shape : indices_shape) { in InitKernel() 74 const auto &indices_shape = shapes[0]; in ReInit() local 75 indices_lens_ = indices_shape[0]; in ReInit()
|
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/gpu/nn/ |
D | sparse_ftrl_gpu_kernel.h | 85 auto indices_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 4); in Init() local 88 CHECK_NULL_INPUT(indices_shape); in Init() 113 for (size_t i = 0; i < indices_shape.size(); i++) { in Init() 114 indices_size_ *= indices_shape[i]; in Init() 122 num_index_ = indices_shape[0]; in Init()
|
D | sparse_apply_proximal_adagrad_kernel.h | 84 auto indices_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 6); in Init() local 87 CHECK_NULL_INPUT(indices_shape); in Init() 109 for (size_t i = 0; i < indices_shape.size(); i++) { in Init() 110 indices_size_ *= indices_shape[i]; in Init()
|
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/nnacl/infer/ |
D | gather_infer.c | 53 int indices_shape[MAX_SHAPE_SIZE]; in GatherInferShape() local 55 ShapeSet(indices_shape, &indices_shape_size, indices->shape_, indices->shape_size_); in GatherInferShape() 71 ret = ShapeInsert(out_shape, &out_shape_size, axis, indices_shape[i]); in GatherInferShape()
|
/third_party/mindspore/tests/st/pynative/ |
D | test_pynative_embeddinglookup.py | 56 …def __init__(self, params_shape, indices_shape, offset=0, low=0, high=2, dtype=np.float32, ids_typ… argument 59 self.indices_np = np.random.randint(low, high, size=indices_shape).astype(ids_type) 73 …fact = EmbeddingLookupFactory(params_shape=(2, 4), indices_shape=(2, 3), low=1, high=3, offset=10,…
|