• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2019 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef MINDSPORE_CCSRC_FRONTEND_PARALLEL_TENSOR_LAYOUT_CONSTRUCT_OPERATOR_H_
18 #define MINDSPORE_CCSRC_FRONTEND_PARALLEL_TENSOR_LAYOUT_CONSTRUCT_OPERATOR_H_
19 
20 #include <string>
21 #include <utility>
22 #include <vector>
23 
24 #include "ir/value.h"
25 #include "frontend/parallel/ops_info/operator_info.h"
26 #include "frontend/parallel/status.h"
27 
28 namespace mindspore {
29 namespace parallel {
30 using Args = std::vector<std::int64_t>;
31 
32 class ConstructOperator {
33  public:
34   const int64_t DEFAULT = 0;
ConstructOperator()35   ConstructOperator() : dev_size_(0) {}
36   ~ConstructOperator() = default;
37   Status Init(const RankList &dev_list, const Shape &dev_matrix_shape);
38   OperatorVector SkipRedisReshapeOP(const Shape &shape);
39   Status ReshapeOP(const Shape &shape);
40   Status StridedSliceOP(const Args &args);
41   Status AllGatherOP(int64_t dev_dim);
42   Status SplitOP(int64_t split_count);
43   Status ConcatOP(int64_t concat_dim);
44   Status AlltoAllOP(const Args &args);
GetOperator()45   Operator GetOperator() const { return op_; }
UpdateTensorShape(const Shape & tensor_shape)46   void UpdateTensorShape(const Shape &tensor_shape) { tensor_shape_ = tensor_shape; }
47 
48  private:
49   Operator op_;
50   size_t dev_size_;
51   Shape tensor_shape_;
52   RankList dev_list_;
53   Shape dev_matrix_shape_;
54   Status CreateGroupByDim(size_t axis, std::vector<Group> *group);
55 };
56 }  // namespace parallel
57 }  // namespace mindspore
58 
59 #endif  // MINDSPORE_CCSRC_FRONTEND_PARALLEL_TENSOR_LAYOUT_CONSTRUCT_OPERATOR_H_
60