• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/compiler/xla/index_util.h"
17 
18 #include <algorithm>
19 #include <string>
20 
21 #include "absl/strings/str_join.h"
22 #include "tensorflow/compiler/xla/shape_util.h"
23 #include "tensorflow/compiler/xla/types.h"
24 #include "tensorflow/core/platform/logging.h"
25 
26 namespace xla {
27 
MultidimensionalIndexToLinearIndex(const Shape & shape,absl::Span<const int64> multi_index)28 /* static */ int64 IndexUtil::MultidimensionalIndexToLinearIndex(
29     const Shape& shape, absl::Span<const int64> multi_index) {
30   DCHECK_EQ(shape.dimensions_size(), multi_index.size());
31 
32   for (size_t i = 0; i < multi_index.size(); ++i) {
33     DCHECK_GE(multi_index[i], 0);
34     DCHECK_LT(multi_index[i], shape.dimensions(i))
35         << "indexing beyond extent in dimension " << i << ":"
36         << "\n\tindex: " << absl::StrJoin(multi_index, ",")
37         << "\n\tshape: " << ShapeUtil::HumanString(shape);
38   }
39 
40   // Let the array be sized like so for dimensions i from 0 to n-1:
41   //
42   //   [D{n-1} x D{n-2} x .. x D{0}]
43   //
44   // Let the order of the dimensions in the minor_to_major field in
45   // Layout be:
46   //
47   //   L(0), L(1), ... , L(n-1)
48   //
49   // where L(0) is the most-minor dimension and L(n-1) the most-major. The
50   // multidimensional index:
51   //
52   //   [I{0}, I{1}, ... , I{n-1}]
53   //
54   // then corresponds to the following linear index:
55   //
56   // linear_index =
57   //   (((  ... + I{L(2)}) * D{L(1)} + I{L(1)}) * D{L(0)} + I{L(0)}
58   //
59   // or equivalently:
60   //
61   // linear_index =
62   //   I{L(n-1)} * (D{L(n-2)} * D{L(n-3)} * D{L(n-4)} *     ....    D{L(0)}) +
63   //   I{L(n-2)} *             (D{L(n-3)} * D{L(n-4)} *     ....    D{L(0)}) +
64   //   I{L(n-3)} *                         (D{L(n-4)} *     ....    D{L(0)}) +
65   //                                   ...                                   +
66   //   I{L(2)} *                                         (D{L(1)} * D{L(0)}) +
67   //   I{L(1)} *                                                    D{L(0)}  +
68   //   I{L(0)}
69   //
70   // We compute the linear index value by accumulating the terms above from
71   // I{L(0)} up to I{L(n-1)}. Scale accumulates the product term D{L(0}} *
72   // D{L(1)} * ...
73 
74   // Scale factor holding the growing product of D{L(i)} terms.
75   int64 scale = 1;
76   int64 linear_index = 0;
77   bool first = true;
78   for (auto dimension : LayoutUtil::MinorToMajor(shape)) {
79     if (first) {
80       // Avoid two multiplies on the first loop iteration
81       linear_index = multi_index[dimension];
82       scale = shape.dimensions(dimension);
83       first = false;
84     } else {
85       linear_index += scale * multi_index[dimension];
86       scale *= shape.dimensions(dimension);
87     }
88   }
89   return linear_index;
90 }
91 
LinearIndexToMultidimensionalIndex(const Shape & shape,int64 linear_index)92 /* static */ std::vector<int64> IndexUtil::LinearIndexToMultidimensionalIndex(
93     const Shape& shape, int64 linear_index) {
94   DCHECK_GE(linear_index, 0);
95   DCHECK_LT(linear_index, ShapeUtil::ElementsIn(shape));
96 
97   // The following formula computes each element of the multidimensional index
98   // (See comments in MultidimensionalIndexToLinearIndex for notation):
99   //
100   // I{L(0)} = linear_index % D{L(0)}
101   // I{L(1)} = (linear_index / D{L(0)}) % D{L(1)}
102   // I{L(2)} = (linear_index / (D{L(0)} * D{L(1)})) % D{L(2)}
103   // ...
104   std::vector<int64> multi_index(shape.dimensions_size());
105 
106   // Accumulated product D{L(0)} * D{L(1)} * ...
107   int64 divisor = 1;
108   for (auto dimension : LayoutUtil::MinorToMajor(shape)) {
109     multi_index[dimension] =
110         (linear_index / divisor) % shape.dimensions(dimension);
111     divisor *= shape.dimensions(dimension);
112   }
113   return multi_index;
114 }
115 
BumpIndices(const Shape & shape,absl::Span<int64> indices)116 /* static */ bool IndexUtil::BumpIndices(const Shape& shape,
117                                          absl::Span<int64> indices) {
118   for (int64 dimno = indices.size() - 1; dimno >= 0; --dimno) {
119     int64 limit = shape.dimensions(dimno);
120     if (indices[dimno] + 1 < limit) {
121       indices[dimno]++;
122       std::fill(indices.begin() + dimno + 1, indices.end(), 0);
123       return true;
124     }
125   }
126   return false;
127 }
128 
GetDimensionStride(const Shape & shape,int64 dimension)129 /* static */ int64 IndexUtil::GetDimensionStride(const Shape& shape,
130                                                  int64 dimension) {
131   int64 stride = 1;
132   for (auto dim : LayoutUtil::MinorToMajor(shape)) {
133     if (dim == dimension) {
134       break;
135     }
136     stride *= shape.dimensions()[dim];
137   }
138   return stride;
139 }
140 
IndexInBounds(const Shape & shape,absl::Span<const int64> index)141 /* static */ bool IndexUtil::IndexInBounds(const Shape& shape,
142                                            absl::Span<const int64> index) {
143   int64 rank = shape.rank();
144   if (rank != index.size()) {
145     return false;
146   }
147   for (int64 d = 0; d < rank; ++d) {
148     if (index[d] >= shape.dimensions(d)) {
149       return false;
150     }
151   }
152   return true;
153 }
154 
CompareIndices(absl::Span<const int64> lhs,absl::Span<const int64> rhs)155 /* static */ int IndexUtil::CompareIndices(absl::Span<const int64> lhs,
156                                            absl::Span<const int64> rhs) {
157   int64 rank = lhs.size();
158   CHECK_EQ(rhs.size(), rank);
159   for (int64 dim = 0; dim < rank; ++dim) {
160     if (lhs[dim] < rhs[dim]) {
161       return -1;
162     } else if (lhs[dim] > rhs[dim]) {
163       return 1;
164     }
165   }
166   return 0;
167 }
168 
169 }  // namespace xla
170