/third_party/mindspore/mindspore/core/ops/ |
D | conv2d.cc | 34 if ((shape[i] < 0) && (shape[i] != Shape::SHP_ANY)) { in CheckShapeAnyAndPositive() 79 if (x_h != Shape::SHP_ANY) { in Conv2DPadFunction() 86 if (x_w != Shape::SHP_ANY) { in Conv2DPadFunction() 98 if (x_h == Shape::SHP_ANY) { in Conv2DPadFunction() 99 output_hw->push_back(Shape::SHP_ANY); in Conv2DPadFunction() 100 pad_list->push_back(Shape::SHP_ANY); in Conv2DPadFunction() 101 pad_list->push_back(Shape::SHP_ANY); in Conv2DPadFunction() 110 if (x_w == Shape::SHP_ANY) { in Conv2DPadFunction() 111 output_hw->push_back(Shape::SHP_ANY); in Conv2DPadFunction() 112 pad_list->push_back(Shape::SHP_ANY); in Conv2DPadFunction() [all …]
|
D | dynamic_broadcast_gradient_args.cc | 38 if (input_shape[0] == abstract::Shape::SHP_ANY) { in CheckInputsAndGetShape() 63 ShapeVector shape{abstract::Shape::SHP_ANY}; in Infer()
|
D | batch_matmul.cc | 79 …ll_of(x_shp.begin(), x_shp.end(), [](int64_t value) { return value != abstract::Shape::SHP_ANY; }); in BatchMatmulInferShape() 81 …ll_of(y_shp.begin(), y_shp.end(), [](int64_t value) { return value != abstract::Shape::SHP_ANY; }); in BatchMatmulInferShape() 102 output.push_back(abstract::Shape::SHP_ANY); in BatchMatmulInferShape()
|
D | mat_mul.cc | 67 …ll_of(x_shp.begin(), x_shp.end(), [](int64_t value) { return value != abstract::Shape::SHP_ANY; }); in MatMulInferShape() 69 …ll_of(y_shp.begin(), y_shp.end(), [](int64_t value) { return value != abstract::Shape::SHP_ANY; }); in MatMulInferShape()
|
D | max_pool.cc | 106 int64_t out_h = abstract::Shape::SHP_ANY; in InferShape() 107 int64_t out_w = abstract::Shape::SHP_ANY; in InferShape()
|
D | avg_pool.cc | 111 int64_t out_h = abstract::Shape::SHP_ANY; in InferShape() 112 int64_t out_w = abstract::Shape::SHP_ANY; in InferShape()
|
D | tile.cc | 44 if (infer_shape[i] == abstract::Shape::SHP_ANY) { in GetInferShape()
|
D | bias_add.cc | 65 [](int64_t value) { return value != abstract::Shape::SHP_ANY; }); in InferShape()
|
D | dropout_gen_mask.cc | 135 ShapeVector any_shape{abstract::Shape::SHP_ANY}; in InferShape()
|
/third_party/mindspore/mindspore/core/ops/grad/ |
D | conv2d_backprop_input.cc | 55 int64_t pad_top = abstract::Shape::SHP_ANY; in SetPadList() 56 int64_t pad_bottom = abstract::Shape::SHP_ANY; in SetPadList() 57 int64_t pad_left = abstract::Shape::SHP_ANY; in SetPadList() 58 int64_t pad_right = abstract::Shape::SHP_ANY; in SetPadList() 59 if (dout_shape_norm[kInputIndex2] != abstract::Shape::SHP_ANY && in SetPadList() 60 x_size_v[kInputIndex2] != abstract::Shape::SHP_ANY) { in SetPadList() 67 if (dout_shape_norm[kInputIndex3] != abstract::Shape::SHP_ANY && in SetPadList() 68 x_size_v[kInputIndex3] != abstract::Shape::SHP_ANY) { in SetPadList() 124 out_shape.push_back(abstract::Shape::SHP_ANY); in Conv2DBackpropInputInferShape()
|
D | conv2d_backprop_filter.cc | 95 out_shape.push_back(abstract::Shape::SHP_ANY); in Conv2DBackpropFilterInferShape()
|
/third_party/mindspore/mindspore/core/abstract/ |
D | utils.cc | 62 if (dims[i] != Shape::SHP_ANY) { in CalculateDynamicShape() 66 if (shape1->shape()[i] != Shape::SHP_ANY && shape2->shape()[i] != Shape::SHP_ANY) { in CalculateDynamicShape() 71 if (shape1->shape()[i] == Shape::SHP_ANY && shape2->shape()[i] != Shape::SHP_ANY) { in CalculateDynamicShape() 80 if (shape1->shape()[i] != Shape::SHP_ANY && shape2->shape()[i] == Shape::SHP_ANY) { in CalculateDynamicShape() 127 if (shape1->shape()[i] == Shape::SHP_ANY) { in ShapeJoin() 131 dims[i] = Shape::SHP_ANY; in ShapeJoin()
|
D | prim_nn.cc | 167 if ((x_shape[c_axis] != Shape::SHP_ANY) && (arg_shape[0] != x_shape[c_axis])) { in InferImplBatchNorm() 274 if ((x_shape[c_axis] != Shape::SHP_ANY) && (w_shape[c_axis] != Shape::SHP_ANY) && in InferImplConv2D() 280 if ((w_shape[n_axis] != Shape::SHP_ANY) && (w_shape[n_axis] != out_channel)) { in InferImplConv2D() 286 if ((w_shape[h_axis] != Shape::SHP_ANY) && (w_shape[h_axis] != kernel_size[0])) { in InferImplConv2D() 289 if ((w_shape[w_axis] != Shape::SHP_ANY) && (w_shape[w_axis] != kernel_size[1])) { in InferImplConv2D() 308 if (x_shape[h_axis] == Shape::SHP_ANY) { in InferImplConv2D() 309 output_hw[0] = Shape::SHP_ANY; in InferImplConv2D() 311 if (x_shape[w_axis] == Shape::SHP_ANY) { in InferImplConv2D() 312 output_hw[1] = Shape::SHP_ANY; in InferImplConv2D() 367 …std::all_of(x_shape.begin(), x_shape.end(), [](int64_t value) { return value != Shape::SHP_ANY; }); in InferImplBiasAdd() [all …]
|
D | dshape.cc | 88 …if (shape_[i] == SHP_ANY && min_shape_.size() == shape_.size() && max_shape_.size() == shape_.size… in DumpText() 103 const int64_t Shape::SHP_ANY; member in mindspore::abstract::Shape 106 shape_[i] = SHP_ANY; in Broaden()
|
D | prim_arrays.cc | 146 ShapeVector ids_shape = {Shape::SHP_ANY}; in InferImplUnique() 192 ShapeVector ids_shape = {Shape::SHP_ANY}; in InferImplPadAndShift() 263 …e = std::any_of(x_shape.begin(), x_shape.end(), [](int64_t dim) { return dim == Shape::SHP_ANY; }); in InferImplUnsortedSegmentSum() 265 …ent_ids_shape.begin(), segment_ids_shape.end(), [](int64_t dim) { return dim == Shape::SHP_ANY; }); in InferImplUnsortedSegmentSum() 316 …e = std::any_of(x_shape.begin(), x_shape.end(), [](int64_t dim) { return dim == Shape::SHP_ANY; }); in InferImplUnsortedSegmentMax() 318 …ent_ids_shape.begin(), segment_ids_shape.end(), [](int64_t dim) { return dim == Shape::SHP_ANY; }); in InferImplUnsortedSegmentMax() 368 …e = std::any_of(x_shape.begin(), x_shape.end(), [](int64_t dim) { return dim == Shape::SHP_ANY; }); in InferImplUnsortedSegmentMin() 370 …ent_ids_shape.begin(), segment_ids_shape.end(), [](int64_t dim) { return dim == Shape::SHP_ANY; }); in InferImplUnsortedSegmentMin() 450 shape.emplace_back(Shape::SHP_ANY); in InferImplMapCacheIdx() 530 shape.emplace_back(Shape::SHP_ANY); in InferImplSubAndFilter() [all …]
|
D | prim_maths.cc | 295 …n = std::all_of(x_shp.begin(), x_shp.end(), [](int64_t value) { return value != Shape::SHP_ANY; }); in InferImplMatMul() 296 …n = std::all_of(y_shp.begin(), y_shp.end(), [](int64_t value) { return value != Shape::SHP_ANY; }); in InferImplMatMul() 362 …n = std::all_of(x_shp.begin(), x_shp.end(), [](int64_t value) { return value != Shape::SHP_ANY; }); in InferImplBatchMatMul() 363 …n = std::all_of(y_shp.begin(), y_shp.end(), [](int64_t value) { return value != Shape::SHP_ANY; }); in InferImplBatchMatMul() 383 output.push_back(Shape::SHP_ANY); in InferImplBatchMatMul()
|
D | dshape.h | 68 static const int64_t SHP_ANY = -1;
|
D | prim_others.cc | 424 ShapeVector tensor_out_shape = {Shape::SHP_ANY, tensor_in_shape[1]}; in InferImplAllSwap() 562 ShapeVector inferred_shape(input_rank, Shape::SHP_ANY); in InferImplGpuConvertToDynamicShape()
|
D | param_validator.cc | 180 if ((shape[i] < 0) && (shape[i] != Shape::SHP_ANY)) { in CheckShapeAnyAndPositive()
|
/third_party/mindspore/mindspore/ccsrc/common/ |
D | trans.cc | 222 …ny_of(shape_list.begin(), shape_list.end(), [](int64_t shape) { return shape == Shape::SHP_ANY; }); in HasShapeDynamic() 317 device_shape.push_back(Shape::SHP_ANY); in FracZDeviceDynamicShape() 322 if (shape[kN] == Shape::SHP_ANY) { in FracZDeviceDynamicShape() 323 device_shape.push_back(Shape::SHP_ANY); in FracZDeviceDynamicShape() 354 const int64_t C1 = (shape[kC] == Shape::SHP_ANY) ? Shape::SHP_ANY : (shape[kC] + tmp - 1) / tmp; in Nc1hwc0DeviceDynamicShape() 388 const int64_t C1 = (shape[1] == Shape::SHP_ANY) ? Shape::SHP_ANY : (shape[1] + tmp - 1) / tmp; in Ndc1hwc0DeviceDynamicShape() 422 device_shape.push_back(Shape::SHP_ANY); in Fracz3DDeviceDynamicShape() 428 const int64_t N1 = (shape[0] == Shape::SHP_ANY) ? Shape::SHP_ANY : (shape[0] + tmp - 1) / tmp; in Fracz3DDeviceDynamicShape() 455 shape[kC] == Shape::SHP_ANY ? device_shape.push_back(Shape::SHP_ANY) in C1hwncoc0DeviceDynamicShape() 489 first_dim = Shape::SHP_ANY; in FracZc04DeviceDynamicShape() [all …]
|
/third_party/mindspore/tests/ut/cpp/pipeline/static_analysis/ |
D | prim_test.cc | 338 auto a = UTPrimUtils::ShapeOf({Shape::SHP_ANY, Shape::SHP_ANY}); in TEST_F() 339 auto b = UTPrimUtils::ShapeOf({Shape::SHP_ANY}); in TEST_F() 340 std::vector<Any> expected{Shape::SHP_ANY, Shape::SHP_ANY}; in TEST_F() 347 std::vector<ValuePtr> element_list = {MakeValue(Shape::SHP_ANY), MakeValue(Shape::SHP_ANY)}; in TEST_F()
|
/third_party/mindspore/mindspore/core/ops/fusion/ |
D | avg_pool_fusion.cc | 80 int64_t out_h = abstract::Shape::SHP_ANY; in InferShape() 81 int64_t out_w = abstract::Shape::SHP_ANY; in InferShape()
|
D | max_pool_fusion.cc | 77 int64_t out_h = abstract::Shape::SHP_ANY; in InferShape() 78 int64_t out_w = abstract::Shape::SHP_ANY; in InferShape()
|
/third_party/mindspore/mindspore/ccsrc/backend/optimizer/ascend/mindir/ |
D | dropout_unify_mindir.cc | 165 ShapeVector mask_shp = {abstract::Shape::SHP_ANY}; in CreateDropoutGenMaskCNode()
|