/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/ |
D | dynamic_assign_cpu_kernel.cc | 65 auto input_x_shape = AnfAlgo::GetPrevNodeOutputInferShape(node, 0); in LaunchKernel() local 68 for (size_t i = 0; i < input_x_shape.size(); ++i) { in LaunchKernel() 69 batch_size_ *= input_x_shape[i]; in LaunchKernel() 72 if (input_x_shape.size() != input_y_shape.size()) { in LaunchKernel() 75 for (size_t i = 0; i < input_x_shape.size(); ++i) { in LaunchKernel() 76 if (input_x_shape[i] != input_y_shape[i]) { in LaunchKernel() 100 …(void)std::transform(input_x_shape.begin(), input_x_shape.end(), std::back_inserter(shape_tmp), Si… in LaunchKernel()
|
D | assign_cpu_kernel.cc | 41 auto input_x_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); in InitKernel() local 43 if (input_x_shape.size() != input_y_shape.size()) { in InitKernel() 46 for (size_t i = 0; i < input_x_shape.size(); ++i) { in InitKernel() 47 if (input_x_shape[i] != input_y_shape[i]) { in InitKernel() 50 batch_size_ *= input_x_shape[i]; in InitKernel()
|
D | map_uniform_cpu_kernel.cc | 58 auto input_x_shape = AnfAlgo::GetPrevNodeOutputInferShape(node, 0); in LaunchKernel() local 60 for (size_t i = 0; i < input_x_shape.size(); ++i) { in LaunchKernel() 61 batch_size_ *= input_x_shape[i]; in LaunchKernel()
|
D | l2normalize_grad_cpu_kernel.cc | 79 auto input_x_shape = input_shape_list_[0]; in CheckInputShape() local 80 if (input_x_shape.size() != 0) { in CheckInputShape() 81 if (std::any_of(input_x_shape.begin(), input_x_shape.end(), [](size_t i) { return i == 0; })) { in CheckInputShape()
|
/third_party/mindspore/mindspore/ops/_op_impl/_custom_op/ |
D | fused_abs_max1_impl.py | 76 def shape0(tik_instance, input_x_shape, input_x, res): argument 79 for val in input_x_shape: 100 def shape1(tik_instance, input_x_shape, ori_shape, input_x, res): argument 123 for val in input_x_shape: 146 def shape2(tik_instance, input_x_shape, input_x, res): argument 149 for val in input_x_shape: 242 def shape3(tik_instance, input_x_shape, ori_shape, input_x, res): argument 250 for val in input_x_shape: 275 def shape4(tik_instance, input_x_shape, input_x, res): argument 278 for val in input_x_shape: [all …]
|
D | matrix_combine_impl.py | 36 input_x_shape = input_x.get("shape") 44 input_x = tik_instance.Tensor("float32", input_x_shape, name="input_x", scope=tik.scope_gm) 48 matrix_dim = input_x_shape[0] * input_x_shape[1] 49 if input_x_shape[0] == 1 and input_x_shape[1] == 64: 62 tik_instance.for_range(0, input_x_shape[0]) as i:
|
D | transpose02314_impl.py | 42 def _error_feedback(input_x_shape): argument 56 if input_x_shape not in support_shape: 57 raise RuntimeError("input_shape %s is not supported" % str(input_x_shape)) 63 input_x_shape = input_x.get("shape") 65 input_x_shape = tuple(input_x_shape) 67 _error_feedback(input_x_shape) 71 input_x = tik_instance.Tensor("float16", input_x_shape, name="input_x", scope=tik.scope_gm) 75 if tuple(input_x_shape) == (32, 4, 112, 112, 16): 77 elif tuple(input_x_shape) == (32, 4, 56, 56, 16): 79 elif tuple(input_x_shape) == (32, 16, 56, 56, 16): [all …]
|
D | cholesky_trsm_impl.py | 36 input_x_shape = input_x.get("shape") 39 matrix_dim = input_x_shape[0] 50 input_x = tik_instance.Tensor("float32", input_x_shape, name="input_x", scope=tik.scope_gm)
|
D | img2col_impl.py | 820 input_x_shape = input_x.get("shape") 822 n_shape, c1_shape, h_shape, w_shape, c0_shape = input_x_shape 829 input_shape = (tuple(input_x_shape), input_x_dtype, (filter_h, filter_w), (stride_h, stride_w)) 860 input_x = tik_instance.Tensor("float16", input_x_shape, name="input_x", scope=tik.scope_gm)
|
/third_party/mindspore/mindspore/core/ops/ |
D | reduce.cc | 36 std::vector<int64_t> infer_shape_reduce(std::vector<int64_t> input_x_shape, const ValuePtr axis_val… in infer_shape_reduce() argument 38 int64_t dim = SizeToLong(input_x_shape.size()); in infer_shape_reduce() 63 (void)out_shape.emplace_back(input_x_shape[LongToSize(i)]); in infer_shape_reduce() 73 …auto input_x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[0]->BuildShape())[… in InferShape() local 76 auto out_shape = infer_shape_reduce(input_x_shape, axis_value, keep_dims); in InferShape()
|
/third_party/mindspore/mindspore/ops/operations/ |
D | _embedding_cache_ops.py | 50 def check_shape(self, input_x_shape, indices_shape, update_shape, max_num_shape): argument 95 def check_shape(self, input_x_shape, max_num_shape, offset_shape): argument 302 def check_shape(self, input_x_shape, cum_sum_arr_shape, shift_idx_shape): argument 303 return input_x_shape
|
D | array_ops.py | 3824 def infer_shape(self, input_x_shape, indices_shape, updates_shape): argument 3829 if indices_shape[-1] > len(input_x_shape): 3835 updates_shape_check = indices_shape[:-1] + input_x_shape[indices_shape[-1]:] 3843 return input_x_shape 3909 def infer_shape(self, input_x_shape, indices_shape, updates_shape): argument 3914 if indices_shape[-1] > len(input_x_shape): 3920 updates_shape_check = indices_shape[:-1] + input_x_shape[indices_shape[-1]:] 3928 return input_x_shape 6236 def infer_shape(self, input_x_shape, indices_shape, updates_shape): argument 6241 if indices_shape[-1] > len(input_x_shape): [all …]
|
D | _quant_ops.py | 226 def infer_shape(self, input_x_shape, alpha_shape, quant_max_shape): argument 227 validator.check_int(len(input_x_shape), 1, Rel.GE, "input_x rank", self.name) 230 return input_x_shape 383 def infer_shape(self, input_x_shape, alpha_shape, quant_max_shape): argument 384 if self.is_ascend and len(input_x_shape) not in self.ascend_support_x_rank: 387 validator.check_int(len(input_x_shape), 1, Rel.GE, "input_x rank", self.name) 388 if len(input_x_shape) == 1: 391 …validator.check_equal_int(alpha_shape[0], input_x_shape[self.channel_axis], "alpha rank", self.nam… 393 return input_x_shape
|
D | nn_ops.py | 3861 def infer_shape(self, input_x_shape, weight_shape): argument 3862 input_x_dim = len(input_x_shape) 3869 channel_num = input_x_shape[1] 3877 return input_x_shape
|
/third_party/mindspore/mindspore/core/ops/grad/ |
D | soft_shrink_grad.cc | 39 …auto input_x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[… in SoftShrinkGradInferShape() local 41 …ils::Check("input_grad_shape", input_grad_shape, kEqual, "input_x_shape", input_x_shape, prim_name, in SoftShrinkGradInferShape()
|
D | hsigmoid_grad.cc | 43 …auto input_x_shape = CheckAndConvertUtils::ConvertShapePtrToShapeMap(input_args[1]->BuildShape())[… in InferShape() local 44 …CheckAndConvertUtils::Check("grads_shape", grads_shape, kEqual, "input_x_shape", input_x_shape, pr… in InferShape()
|
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/tbe/ |
D | tbe_adapter.cc | 260 auto input_x_shape = AnfAlgo::GetOutputInferShape(anf_node, 0); in GenTopKV2IndicesTensorInfo() local 261 size_t last_dim = input_x_shape[input_x_shape.size() - 1]; in GenTopKV2IndicesTensorInfo()
|
/third_party/mindspore/mindspore/ccsrc/transform/express_ir/ |
D | onnx_exporter.cc | 1069 auto input_x_shape = dyn_cast<abstract::Shape>(node->input(kOneNum)->Shape()); in ExportPrimBatchMatMul() local 1079 for (size_t i = 0; i < input_x_shape->shape().size() - kTwoNum; i++) { in ExportPrimBatchMatMul() 1082 attr_proto->add_ints(SizeToLong(input_x_shape->shape().size()) - IntToLong(kOneNum)); in ExportPrimBatchMatMul() 1083 attr_proto->add_ints(SizeToLong(input_x_shape->shape().size()) - IntToLong(kTwoNum)); in ExportPrimBatchMatMul()
|