1 /** 2 * Copyright 2019 Huawei Technologies Co., Ltd 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 #ifndef TESTS_UT_PARALLEL_TENSOR_LAYOUT_UT_UTIL_LAYOUT_GEN_H_ 17 #define TESTS_UT_PARALLEL_TENSOR_LAYOUT_UT_UTIL_LAYOUT_GEN_H_ 18 19 #include <map> 20 #include <tuple> 21 #include <vector> 22 23 #include "frontend/parallel/tensor_layout/tensor_layout.h" 24 #include "frontend/parallel/step_parallel.h" 25 26 namespace mindspore { 27 namespace parallel { 28 29 std::vector<Shape> combine(const Shape &in, int64_t target); 30 31 void GenerateValidShapeBySizeAndDim(int64_t pow_size, int64_t dim, std::vector<Shape> *out); 32 33 void GenerateValidShapeBySize(int64_t pow_size, std::vector<Shape> *out); 34 35 TensorMap GenerateTensorMap(const int64_t &map_size, const Shape &pos_index, const Shape &pos_value); 36 37 void GenerateValidTensorMap(const DeviceArrangement &device_arrangement, const TensorMap &tensor_shape, 38 std::vector<TensorMap> *tensor_map_list); 39 40 void GenerateValidLayoutByDeviceSizeAndTensorSize( 41 int64_t device_pow_size, int64_t tensor_pow_size, int64_t max_device_dim, int64_t max_shape_dim, 42 std::vector<std::tuple<DeviceArrangement, TensorMap, TensorShape>> *layout_list); 43 44 size_t ComputeNoneNumber(const TensorMap &tensor_map); 45 46 bool ShapeIsDividedByDevice(const DeviceArrangement &device_arrangement, const TensorMap &tensor_map, 47 const TensorShape &tensor_shape); 48 49 bool CheckLayoutValid(const DeviceArrangement &device_arrangement, const TensorMap &tensor_map, 50 const TensorShape &tensor_shape); 51 52 void ComputeAccumDeviceTOAccumShapeMap(const DeviceArrangement &device_arrangement, const TensorMap &tensor_map, 53 const TensorShape &tensor_shape, 54 std::map<int64_t, int64_t> *accum_device_to_accum_shape_map); 55 56 void LayoutTransferValidLayoutChangeCheck(const DeviceArrangement &in_device_arrangement, 57 const TensorMap &in_tensor_map, const TensorShape &in_tensor_shape, 58 const DeviceArrangement &out_device_arrangement, 59 const TensorMap &out_tensor_map, const TensorShape &out_tensor_shape); 60 61 void ValidLayoutChangeCheck(const DeviceArrangement &in_device_arrangement, const TensorMap &in_tensor_map, 62 const TensorShape &in_tensor_shape, const DeviceArrangement &out_device_arrangement, 63 const TensorMap &out_tensor_map, const TensorShape &out_tensor_shape); 64 65 } // namespace parallel 66 } // namespace mindspore 67 #endif // TESTS_UT_PARALLEL_TENSOR_LAYOUT_UT_UTIL_LAYOUT_GEN_H_ 68