1 /** 2 * Copyright 2024 Huawei Technologies Co., Ltd 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 #ifndef MINDSPORE_CCSRC_FRONTEND_PARALLEL_TENSOR_LAYOUT_TENSOR_UTILS_H_ 17 #define MINDSPORE_CCSRC_FRONTEND_PARALLEL_TENSOR_LAYOUT_TENSOR_UTILS_H_ 18 #include <map> 19 #include <vector> 20 #include "frontend/parallel/ops_info/operator_info.h" 21 22 namespace mindspore::parallel { 23 Status GetFactors(const TensorLayout &layout, Array *array); 24 void InitShapeVec(const Shape &src_shape, Shape *tgt_shape); 25 int64_t GetTensorSize(const Shape &shape); 26 int64_t GetLeastFactorWithoutConstDims(const Shape &to_shape, const Array &to_factors); 27 bool UseStrictMode(const Shape &from_shape, const Shape &to_shape); 28 bool RecordDimsChange(size_t key, int64_t value, std::map<size_t, int64_t> *memo, bool update = false); 29 void IntroduceConstraints(const Shape &expected_tgt_shape, Shape *tgt_shape); 30 bool ForwardMatching(const Shape &src_shape, const Shape &expected_tgt_shape, Shape *tgt_shape, 31 const Array &tgt_factors); 32 bool BackwardMatching(const Shape &expected_tgt_shape, Shape *tgt_shape, const Array &tgt_factors); 33 bool CheckDynamicShape(const TensorLayout &from_in, const TensorLayout &to_in); 34 bool SolveCombination(const Shape &src_shape_arr, size_t src_index, 35 const std::vector<std::vector<int64_t>> &enum_numbers, size_t offset, int64_t target, 36 std::vector<int64_t> *candidates_values); 37 void UnifyFromAndToShape(Shape *new_from_shape, Shape *new_to_shape, const TensorLayout &from_in, 38 const TensorLayout &to_in, ReplacementMemo *from_dims_replace_memo); 39 } // namespace mindspore::parallel 40 #endif // MINDSPORE_CCSRC_FRONTEND_PARALLEL_TENSOR_LAYOUT_TENSOR_UTILS_H_ 41