• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2019 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef MINDSPORE_CCSRC_FRONTEND_PARALLEL_TENSOR_LAYOUT_TENSOR_INFO_H_
18 #define MINDSPORE_CCSRC_FRONTEND_PARALLEL_TENSOR_LAYOUT_TENSOR_INFO_H_
19 
20 #include <cstdint>
21 #include <string>
22 #include <utility>
23 #include <vector>
24 
25 #include "frontend/parallel/device_matrix.h"
26 #include "frontend/parallel/tensor_layout/tensor_layout.h"
27 
28 namespace mindspore {
29 namespace parallel {
30 using Shapes = std::vector<Shape>;
31 
32 class TensorInfo {
33  public:
TensorInfo(const TensorLayout & tensor_layout,Shape shape,Shape slice_shape)34   TensorInfo(const TensorLayout &tensor_layout, Shape shape, Shape slice_shape)
35       : tensor_layout_(tensor_layout), shape_(std::move(shape)), slice_shape_(std::move(slice_shape)) {}
TensorInfo(const TensorLayout & tensor_layout)36   explicit TensorInfo(const TensorLayout &tensor_layout) : tensor_layout_(tensor_layout) {
37     shape_ = tensor_layout.tensor_shape().array();
38     slice_shape_ = tensor_layout.slice_shape().array();
39   }
40   // trivial default constructor will not initialize c language types.
41   TensorInfo() = default;
42   ~TensorInfo() = default;
tensor_layout()43   TensorLayout tensor_layout() const { return tensor_layout_; }
slice_shape()44   Shape slice_shape() const { return slice_shape_; }
shape()45   Shape shape() const { return shape_; }
set_reduce_dim(const std::vector<int64_t> & dim)46   void set_reduce_dim(const std::vector<int64_t> &dim) { reduce_dim_ = dim; }
reduce_dim()47   std::vector<int64_t> reduce_dim() const { return reduce_dim_; }
InferStrategy()48   Dimensions InferStrategy() const {
49     Dimensions stra;
50     for (size_t i = 0; i < shape_.size(); ++i) {
51       if ((slice_shape_[i] == 0) || (shape_[i] % slice_shape_[i] != 0)) {
52         return stra;
53       }
54       int64_t dim = shape_[i] / slice_shape_[i];
55       stra.push_back(dim);
56     }
57     return stra;
58   }
59   bool operator==(const TensorInfo &other) {
60     if (this->slice_shape_ != other.slice_shape_) {
61       return false;
62     }
63     if (this->tensor_layout_ != other.tensor_layout_) {
64       return false;
65     }
66     return true;
67   }
68 
69  private:
70   TensorLayout tensor_layout_;
71   Shape shape_;
72   Shape slice_shape_;
73   // reduce method's reduce dim
74   std::vector<int64_t> reduce_dim_;
75 };
76 }  // namespace parallel
77 }  // namespace mindspore
78 
79 #endif  // MINDSPORE_CCSRC_FRONTEND_PARALLEL_TENSOR_LAYOUT_TENSOR_INFO_H_
80