1 /** 2 * Copyright 2019 Huawei Technologies Co., Ltd 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef MINDSPORE_CCSRC_FRONTEND_PARALLEL_TENSOR_LAYOUT_SHAPE_UTIL_H_ 18 #define MINDSPORE_CCSRC_FRONTEND_PARALLEL_TENSOR_LAYOUT_SHAPE_UTIL_H_ 19 20 #include <cstdint> 21 #include <map> 22 #include <memory> 23 #include <string> 24 #include <vector> 25 26 #include "frontend/parallel/status.h" 27 #include "frontend/parallel/device_matrix.h" 28 29 namespace mindspore { 30 namespace parallel { 31 /* 32 * compute the accumulating product of all the values in shape from left to right, 33 * the accumulating results are saved in shape_accum from left to right 34 * 35 * given a shape = [d_n-1, d_n-2, ..., d_0](d_i > 0, i=0,1,...,n-1, elements of shape must be larger than zero), 36 * then *shape_accum = [d_n-1, d_n-1 * d_n-2, d_n-1 * d_n-2 * d_n-3, ..., d_n-1 * d_n-2 * ... *d_0] 37 * 38 * example: 39 * shape = [2, 8, 32] 40 * shape_accum = [2, 2 * 8, 2 * 8 * 32] 41 * 42 */ 43 Status ShapeToAccumulateProduct(const Shape &shape, Shape *shape_accum); 44 45 /* 46 * compute the accumulating product of all the values in shape from right to left, 47 * the accumulating results are saved in shape_accum from right to left 48 * 49 * given a shape = [d_n-1, d_n-2, ..., d_0](d_i > 0, i=0,1,...,n-1, elements of shape must be larger than zero), 50 * then *shape_accum = [d_n-1 * d_n-2 * ... *d_0, d_n-2 * d_n-3 * ... *d_0, ..., d_0] 51 * 52 * example: 53 * shape = [2, 8, 32] 54 * shape_accum = [2 * 8 * 32, 8 * 32, 32] 55 * 56 */ 57 Status ShapeToAccumulateProductReverse(const Shape &shape, Shape *shape_accum); 58 59 /* 60 * compute the original shape from the accumulating product shape_accum, 61 * elements of shape_accum is saved from left to right, 62 * given shape_accum = [accum_n-1, accum_n-2, accum_n-3, ..., accum_0] 63 * (accum_i > 0, i=0,1,...,n-1, elements of shape_accum must be larger than zero), 64 * (accum_i-1 % accum_i == 0, i=1,...,n-1) 65 * then *shape = [accum_n-2/accum_n-1, accum_n-3/accum_n-2, ..., accum_0/accum_1] 66 * 67 * example: 68 * shape_accum = [2, 2 * 8, 2 * 8 * 32] 69 * shape = [2, 8, 32] 70 * 71 */ 72 Status AccumulateProductToShape(const Shape &shape_accum, Shape *shape); 73 74 /* 75 * compute the original shape from the accumulating product shape_accum, 76 * elements of shape_accum is saved from right to left, 77 * given shape_accum_reverse = [accum_n-1, accum_n-2, accum_n-3, ..., accum_0] 78 * (accum_i > 0, i=0,1,...,n-1, elements of shape_accum must be larger than zero), 79 * (accum_i % accum_i-1 == 0, i=1,...,n-1) 80 * then *shape = [accum_n-1/accum_n-2, accum_n-2/accum_n-1, ..., accum_1/accum_0] 81 * 82 * example: 83 * shape_accum_reverse = [2 * 8 * 32, 8 * 32, 32] 84 * shape = [2, 8, 32] 85 * 86 */ 87 Status AccumulateProductReverseToShape(const Shape &shape_accum_reverse, Shape *shape); 88 89 /* 90 * given two accumulate product in1_accum and in2_accum, compute the union of in1_accum and in2_accum, 91 * results are saved in out. 92 * i.e. *out_accum = in1_accum U in2_accum 93 * elements of out are saved in increasing order 94 * 95 * example1: 96 * in1_accum = [2, 8] 97 * in2_accum = [4, 8] 98 * out_accum = [2, 4, 8] 99 * 100 * example2: 101 * in1_accum = [2, 4, 16] 102 * in2_accum = [8, 16] 103 * out_accum = [2, 4, 8, 16] 104 */ 105 Status UnifyAccumulateProduct(const Shape &in1_accum, const Shape &in2_accum, Shape *out_accum); 106 107 /* 108 * given two shape in1 = [din1_n-1, din1_n-2, ..., din1_0] and in2 = [din2_m-1, din2_m-2, ..., din2_m] 109 * size = din1_n-1 * din1n-2 * ... * din1_0 = din2_m-1 * din2_m-2 * ... * din2_0 110 * find *out = [dout_k-1, dout_k-2, ..., dout_0], s.t. dout_k-1 * dout_k-2 * ... * dout_0 = size and 111 * suppose in1_accum, in2_accum, and *out_accum is the ShapeToAccumulateProduct result of in1, in2, and *out 112 * then for each din1_i in in1_accum, din1_i is in *out_accumulate, 113 * for each din2_i in in2_accum, din2_i is in *out_accumulate 114 * 115 * example: 116 * in1 = [8, 4] 117 * in2 = [2, 16] 118 * out = [2, 4, 4] 119 */ 120 Status UnifyShape(const Shape &in1, const Shape &in2, Shape *out); 121 122 /* 123 * given two accumulate product in reverse order of in and expand, 124 * in_accum_reverse = [din_n-1, din_n-2, ..., din_0] and expand_pos_reverse = [dexp_n-1, dexp_n-2, ..., dexp_0], 125 * i.e. in_accum_reverse is the ShapeToAccumulateProductReverse result of a shape in, 126 * expand_accum_reverse is the ShapeToAccumulateProductReverse result of a shape expand, 127 * compute the accumulate product in reverse order out_accum_reverse = [dout_k-1, dout_k-2, ..., dout_0], 128 * s.t. elements in out_accum_reverse are union of elements in in_accum_reverse and expand_accum_reverse 129 * (out_accum_reverse = in_accum_reverse U expand_accum_reverse), and 130 * out_accum_reverse is the ShapeToAccumulateProductReverse result of shape expand, 131 * i.e. dout_i > 0, i=0,1,...,k-1, elements of out_accum_reverse must be larger than zero, 132 * dout_i-1 % dout_i == 0, i=1,...,k-1 133 * 134 * example1: 135 * in_accum_reverse = [2 * 8 * 32, 8 * 32, 32] 136 * expand_accum_reverse = [2 * 8 * 32, 32, 8] 137 * out_accum_reverse = [2 * 8 * 4 * 8, 8 * 4 * 8, 4 * 8, 8] 138 * 139 * example2: 140 * in_accum_reverse = [2 * 8 * 32, 8 * 32, 32] 141 * expand_accum_reverse = [2 * 4 * 8, 4 * 8, 8] 142 * out_accum_reverse = [2 * 4 * 2 * 4 * 8, 4 * 2 * 4 * 8, 2 * 4 * 8, 4 * 8, 8] 143 */ 144 Status ExpandAccumulateProduct(const Shape &in_accum_reverse, const Shape &expand_accum_reverse, 145 Shape *out_accum_reverse); 146 147 /* 148 * given a shape in = [din_n-1, din_n-2, ..., d_0], and the expand shape expand= [dexp_m-1, dexp_m-2, ..., dexp_0], 149 * compute the expended shape out = [dout_k-1, dout_k-2, ..., dout_0], 150 * s.t. dout_k-1 * dout_k-2 * ...* dout_0 = din_n-1 * din_n-2 * ... * d_0 151 * suppose in_accum_reverse is the ShapeToAccumulateProductReverse result of in, 152 * expand_accum_reverse is the ShapeToAccumulateProductReverse result of expand, 153 * out_accum_reverse is the ShapeToAccumulateProductReverse result of out, 154 * then out_accum_reverse is the union of in_accum_reverse and expand_accum_reverse 155 * (out_accum_reverse = in_accum_reverse U expand_accum_reverse) 156 * 157 * example1: 158 * in = [2, 8, 32] 159 * expand = [16, 4, 8] 160 * out = [2, 8, 4, 8] 161 * 162 * example2: 163 * in = [2, 8, 32] 164 * expand = [2, 4, 8] 165 * out = [2, 4, 2, 4, 8] 166 */ 167 Status ExpandShape(const Shape &in, const Shape &expand, Shape *out); 168 } // namespace parallel 169 } // namespace mindspore 170 171 #endif // MINDSPORE_CCSRC_FRONTEND_PARALLEL_TENSOR_LAYOUT_SHAPE_UTIL_H_ 172