Home
last modified time | relevance | path

Searched refs:kDim2 (Results 1 – 15 of 15) sorted by relevance

/third_party/mindspore/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/
Davgpool_3d_grad_fusion.cc84 auto fd = fp_shape[kDim2]; in IsVectorImpl()
89 auto kw = k_size[kDim2]; in IsVectorImpl()
91 bool flag2 = kh >= fh + pad_list[kDim2] + pad_list[kDim3]; in IsVectorImpl()
134 auto pad_h = pad_list[kDim2] + pad_list[kDim3]; in ConstructMultiplier()
136 auto len_d = ori_input_shape[kDim2] + pad_d; in ConstructMultiplier()
142 for (int64_t di = 0; di < grad_shape[kDim2]; di++) { in ConstructMultiplier()
153 … valid_w = start_w + kernel_size[kDim2] <= len_w ? kernel_size[kDim2] : len_w - start_w; in ConstructMultiplier()
155 … valid_d = std::min(start_d + kernel_size[kDim0], pad_list[kDim0] + ori_input_shape[kDim2]) - in ConstructMultiplier()
157 … valid_h = std::min(start_h + kernel_size[kDim1], pad_list[kDim2] + ori_input_shape[kDim3]) - in ConstructMultiplier()
158 std::max(pad_list[kDim2], start_h); in ConstructMultiplier()
[all …]
Davgpool_3d_fusion.cc66 *kw = kernel_size[kDim2]; in GetKernelSize()
69 *kd = kernel_size[kDim2]; in GetKernelSize()
94 *sw = kernel_size[kDim2]; in GetStrideSize()
97 *sd = kernel_size[kDim2]; in GetStrideSize()
170 auto pad_h = pad_list[kDim2] + pad_list[kDim3]; in ConstructMultiplier()
187 … auto vaild_h = GetInterSection(start_h, start_h + kh, pad_list[kDim2], pad_list[kDim2] + fh); in ConstructMultiplier()
266 auto fd = SizeToLong(dims_in[kDim2]); in Process()
269 auto dout = SizeToLong(dims_out[kDim2]); in Process()
/third_party/mindspore/mindspore/ccsrc/backend/optimizer/ascend/mindir/
Dmaxpool_with_argmax_unify_mindir.cc71 argmax_shape[kDim2] = LongToSize(ksize[kDim1] * ksize[kDim2]); in Process()
72 …argmax_shape[kDim3] = (output_shape[kDim2] * output_shape[kDim3] + kAlignBytes - 1) / kAlignBytes … in Process()
105 …argmax_shape[kDim3] = (argmax_shape[kDim2] * argmax_shape[kDim3] + kAlignBytes - 1) / kAlignBytes … in Process()
106 argmax_shape[kDim2] = LongToSize(ksize[kDim1] * ksize[kDim2]); in Process()
Davg_pool_grad_unify_mindir.cc81 std::vector<int64_t> in_shape_after_padding_2d = {x_shape[kDim2] + pad_top + pad_bottom, in GetAssistInputMatrix()
112 …windowed_output_size(x_shape[kDim2], k_size[kDim2], stride[kDim2], pad_mode, &pad_top, &pad_bottom… in CreateMeanMatrixValueNode()
122 for (int64_t i = h * stride[kDim2]; i < h * stride[kDim2] + k_size[kDim2]; ++i) { in CreateMeanMatrixValueNode()
139 auto dst_size = LongToSize(output_shape[kDim2]) * LongToSize(output_shape[kDim3]) * kFloat32Len; in CreateMeanMatrixValueNode()
165 std::vector<int64_t> kernel_shape = {1, x_shape[kDim1], k_size[kDim2], k_size[kDim3]}; in CreateKernelMatrixValueNode()
Dbn_grad_unify_mindir.cc40 bn_grad_node_inputs[kDim2], in CreateNewBatchNormGrad()
/third_party/mindspore/mindspore/ccsrc/backend/optimizer/ascend/ir_fission/
Ddynamic_rnn_grad_fission_v2.cc60 std::vector<size_t> output1_dims{input_i_shape[kDim1], input_i_shape[kDim2]}; in CreateTLoopNode()
80 …e_t> split_v_output0_shape{IntToSize(1), origin_output2_shape[kDim1], origin_output2_shape[kDim2]}; in CreateTLoopNode()
87 … SizeToLong((origin_output2_shape[kDim2] + kCubeSize - 1) / kCubeSize * kCubeSize), in CreateTLoopNode()
280 …rigin_input7_shape[kDim0], origin_input7_shape[kDim1], kDimMultiNum * origin_input7_shape[kDim2]}}, in AddLSTMInputGradNode()
303 … shape1 = {origin_input6_shape[kDim0] - 1, origin_input6_shape[kDim1], origin_input6_shape[kDim2]}; in CreateSplitV()
304 std::vector<size_t> shape2 = {1, origin_input6_shape[kDim1], origin_input6_shape[kDim2]}; in CreateSplitV()
369 origin_output0_shape[kDim2] + h_concat_output_shape[kDim2]}; in CreateConcat()
404 origin_input0_shape[kDim2] + shape_tmp[kDim2]}; in CreateConcatNodeT1()
424 …d::vector<size_t> shape = {concat_shape[kDim0], concat_shape[kDim2], lstm_input_grad_shape[kDim2]}; in CreateBatchMatMul()
509 auto out_shape = {AnfAlgo::GetOutputInferShape(lstm_input_grad, 0)[kDim2]}; in CreateDbReduceSum()
[all …]
Dcdist_fission.cc111 auto y_shape = AnfAlgo::GetOutputInferShape(cdist_inputs[kDim2], 0); in Process()
114 …auto broadcast_input_y = AddBroadCastToNode(graph, cdist_inputs[kDim2], kInputYDimR, broadcast_to_… in Process()
140 auto x_shape = AnfAlgo::GetOutputInferShape(cdist_grad_inputs[kDim2], 0); in Process()
144 …auto broadcast_input_x = AddBroadCastToNode(graph, cdist_grad_inputs[kDim2], kInputXDimP, broadcas… in Process()
Ddynamic_gru_v2_grad_fission.cc179 std::vector<size_t> concat_output_shape = {t_size, out_dims[kDim1], out_dims[kDim2]}; in AddTConcatNode()
326 AnfAlgo::SetNodeAttr("split_dim", MakeValue(SizeToLong(kDim2)), split_vd); in CreateDgateHSplitVDNode()
355 AnfAlgo::SetNodeAttr(kAttrAxis, MakeValue(SizeToLong(kDim2)), concat_op); in CreateDgateXConcatDNode()
475 hidden_size = AnfAlgo::GetOutputInferShape(input_h, 0)[kDim2]; in Process()
476 input_size = AnfAlgo::GetOutputInferShape(input_x, 0)[kDim2]; in Process()
Dsingle_batch_norm_fission.cc43 auto bn_input1 = bn_cnode->input(kDim2); in CreateBNTrainingReduce()
Dmax_pool3d_grad_grad_fission.cc43 int64_t d = ksize[kDim2]; in CreateTensor()
Dspace_to_depth_split.cc50 int64_t window_size = assist_input_shape[kDim2] * assist_input_shape[kDim3]; in CreateTensor()
Dgather_v2_ds_fission.cc139 …aram_shape.empty() || indice_shape.empty() || AnfAlgo::IsDynamicShape(origin_node->input(kDim2))) { in CheckInputs()
/third_party/mindspore/mindspore/ccsrc/common/
Dtrans.cc651 auto h1 = (shape[shape.size() - kDim2] - 1) / kCubeSize + 1; in FracNZDeviceShape()
669 (void)std::copy(shape.begin(), shape.end() - kDim2, std::back_inserter(device_shape)); in FracNZDeviceDynamicShape()
671 int64_t h_shape = shape[shape.size() - kDim2]; in FracNZDeviceDynamicShape()
724 auto dim_last2 = shape[shape.size() - kDim2];
738 device_shape[shape.size() - kDim2] = DivCeil(dim_last2, NUM16);
740 device_shape[shape.size() - kDim2] = DivCeil(input_size, NUM16) + DivCeil(hidden_size, NUM16);
764 device_shape[shape.size() - kDim2] = Shape::SHP_ANY;
766 device_shape[shape.size() - kDim2] = DivCeil(dim_last2, NUM16);
768 device_shape[shape.size() - kDim2] = DivCeil(input_size, NUM16) + DivCeil(hidden_size, NUM16);
1408 if (size < kDim2) { in TransShapeToNz()
[all …]
/third_party/mindspore/mindspore/lite/tools/converter/quantizer/
Dquantize_util.cc48 constexpr int kDim2 = 2; variable
226 if (weight_shape.size() < kDim2) { // do not quant single dim tensors in CanTensorQuantized()
726 …primitive.value.type == schema::PrimitiveType_MatMul && static_cast<int>(shapes.size()) == kDim2) { in CalQuantAssitInfo()
/third_party/mindspore/mindspore/ccsrc/utils/
Dutils.h562 kDim2, enumerator