Home
last modified time | relevance | path

Searched refs:kDim1 (Results 1 – 13 of 13) sorted by relevance

/third_party/mindspore/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/
Davgpool_3d_grad_fusion.cc88 auto kh = k_size[kDim1]; in IsVectorImpl()
90 bool flag1 = kd >= fd + pad_list[kDim0] + pad_list[kDim1]; in IsVectorImpl()
133 auto pad_d = pad_list[kDim0] + pad_list[kDim1]; in ConstructMultiplier()
140 for (int64_t cc = 0; cc < grad_shape[kDim1]; cc++) { in ConstructMultiplier()
152 … valid_h = start_h + kernel_size[kDim1] <= len_h ? kernel_size[kDim1] : len_h - start_h; in ConstructMultiplier()
157 … valid_h = std::min(start_h + kernel_size[kDim1], pad_list[kDim2] + ori_input_shape[kDim3]) - in ConstructMultiplier()
171 start_h += strides[kDim1]; in ConstructMultiplier()
222 auto kh = kernel_size[kDim1]; in Process()
224 auto fc = origin_input_shape[kDim1]; in Process()
246 …"strides", MakeValue(std::vector<int64_t>{dim_one, dim_one, strides[kDim0], strides[kDim1], stride… in Process()
Davgpool_3d_fusion.cc65 *kh = kernel_size[kDim1]; in GetKernelSize()
93 *sh = kernel_size[kDim1]; in GetStrideSize()
169 auto pad_d = pad_list[kDim0] + pad_list[kDim1]; in ConstructMultiplier()
265 auto fc = SizeToLong(dims_in[kDim1]); in Process()
/third_party/mindspore/mindspore/ccsrc/backend/optimizer/ascend/mindir/
Davg_pool_grad_unify_mindir.cc83 std::vector<float> tmp_zero_vector(in_shape_after_padding_2d[kDim1], 0.0); in GetAssistInputMatrix()
84 std::vector<float> tmp_one_vector(in_shape_after_padding_2d[kDim1], 1.0); in GetAssistInputMatrix()
85 for (int64_t i = 0; i < in_shape_after_padding_2d[kDim1]; ++i) { in GetAssistInputMatrix()
86 if (i < pad_left || i >= (in_shape_after_padding_2d[kDim1] - pad_right)) { in GetAssistInputMatrix()
134 std::vector<int64_t> output_shape = {x_shape[kDim0], x_shape[kDim1], h_output, w_output}; in CreateMeanMatrixValueNode()
137 for (int64_t i = 0; i < output_shape[kDim0] * output_shape[kDim1]; ++i) { in CreateMeanMatrixValueNode()
165 std::vector<int64_t> kernel_shape = {1, x_shape[kDim1], k_size[kDim2], k_size[kDim3]}; in CreateKernelMatrixValueNode()
Dmaxpool_with_argmax_unify_mindir.cc71 argmax_shape[kDim2] = LongToSize(ksize[kDim1] * ksize[kDim2]); in Process()
106 argmax_shape[kDim2] = LongToSize(ksize[kDim1] * ksize[kDim2]); in Process()
Dconv2d_unify_mindir.cc65 auto in_channel = in_shape[kDim1]; in NeedUpdate()
66 auto out_channel = out_shape[kDim1]; in NeedUpdate()
119 std::swap(out_shape[kDim0], out_shape[kDim1]); in CreateTranspose()
Dbn_grad_unify_mindir.cc39 bn_grad_node_inputs[kDim1], in CreateNewBatchNormGrad()
/third_party/mindspore/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/
Ddynamic_rnn_grad_fission_v2.cc59 kDimMultiNum * (((origin_input9_shape[kDim1] + kCubeSize - 1) / kCubeSize) * kCubeSize)}; in CreateTLoopNode()
60 std::vector<size_t> output1_dims{input_i_shape[kDim1], input_i_shape[kDim2]}; in CreateTLoopNode()
80 …std::vector<size_t> split_v_output0_shape{IntToSize(1), origin_output2_shape[kDim1], origin_output… in CreateTLoopNode()
81 …e_t> split_v_output1_shape{IntToSize(1), origin_output3_shape[kDim0], origin_output3_shape[kDim1]}; in CreateTLoopNode()
88 … SizeToLong((origin_output3_shape[kDim1] + kCubeSize - 1) / kCubeSize * kCubeSize)}), in CreateTLoopNode()
280 …{{origin_input7_shape[kDim0], origin_input7_shape[kDim1], kDimMultiNum * origin_input7_shape[kDim2… in AddLSTMInputGradNode()
303 …std::vector<size_t> shape1 = {origin_input6_shape[kDim0] - 1, origin_input6_shape[kDim1], origin_i… in CreateSplitV()
304 std::vector<size_t> shape2 = {1, origin_input6_shape[kDim1], origin_input6_shape[kDim2]}; in CreateSplitV()
368 std::vector<size_t> shape = {origin_output0_shape[kDim0], origin_output0_shape[kDim1], in CreateConcat()
403 std::vector<size_t> shape = {origin_input0_shape[kDim0], origin_input0_shape[kDim1], in CreateConcatNodeT1()
Dcdist_fission.cc110 auto x_shape = AnfAlgo::GetOutputInferShape(cdist_inputs[kDim1], 0); in Process()
113 …auto broadcast_input_x = AddBroadCastToNode(graph, cdist_inputs[kDim1], kInputXDimP, broadcast_to_… in Process()
143 auto broadcast_grad = AddBroadCastToNode(graph, cdist_grad_inputs[kDim1], 0, broadcast_to_shape); in Process()
Dspace_to_depth_split.cc38 int64_t input_channel = SizeToLong(x_shape[kDim1]); in CreateTensor()
39 int64_t assist_input_channel = SizeToLong(x_shape[kDim1]) * block_size * block_size; in CreateTensor()
Ddynamic_gru_v2_grad_fission.cc179 std::vector<size_t> concat_output_shape = {t_size, out_dims[kDim1], out_dims[kDim2]}; in AddTConcatNode()
246 shape_tmp = {{IntToSize(1), ori_shape[kDim0], ori_shape[kDim1]}}; in CreateHReshape()
474 batch_size = AnfAlgo::GetOutputInferShape(input_h, 0)[kDim1]; in Process()
Dbn_split.cc59 std::vector<size_t> bn_training_reduce_shape = {bn_shape_i0[kDim1]}; in CreateOutputsOfBNTrainingReduce()
/third_party/mindspore/mindspore/ccsrc/common/
Dtrans.cc652 auto w1 = (shape[shape.size() - kDim1] - 1) / kCubeSize + 1; in FracNZDeviceShape()
672 int64_t w_shape = shape[shape.size() - kDim1]; in FracNZDeviceDynamicShape()
723 auto dim_last1 = shape[shape.size() - kDim1];
773 device_shape[shape.size() - kDim1] = Shape::SHP_ANY;
779 device_shape[shape.size() - kDim1] = n_num * DivCeil(hidden_size, C0);
1418 hw_shape->push_back(host_shape[size - kDim1]); in TransShapeToNz()
/third_party/mindspore/mindspore/ccsrc/utils/
Dutils.h561 kDim1, enumerator