• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 // Utility functions related to layouts of Shapes.
17 
18 #ifndef TENSORFLOW_COMPILER_XLA_LAYOUT_UTIL_H_
19 #define TENSORFLOW_COMPILER_XLA_LAYOUT_UTIL_H_
20 
21 #include <string>
22 
23 #include "tensorflow/compiler/xla/types.h"
24 #include "tensorflow/compiler/xla/xla_data.pb.h"
25 #include "tensorflow/core/lib/core/status.h"
26 #include "tensorflow/core/lib/gtl/array_slice.h"
27 #include "tensorflow/core/platform/macros.h"
28 #include "tensorflow/core/platform/types.h"
29 
30 namespace xla {
31 
32 // Namespaced collection of (static) Layout utilities.
33 class LayoutUtil {
34  public:
35   // Creates a layout with the given minor-to-major dimension order. (This is a
36   // convenience function for protobuf construction.)
37   static Layout MakeLayout(tensorflow::gtl::ArraySlice<int64> minor_to_major);
38 
39   // Creates a sparse layout with the given maximum number of elements. (This is
40   // a convenience function for protobuf construction.)
41   static Layout MakeSparseLayout(int64 max_sparse_elements);
42 
43   // Returns default layout for the given shape.
44   static Layout GetDefaultLayoutForShape(const Shape& shape);
45 
46   // Helper functions that create default layouts for various ranks.
47   static Layout GetDefaultLayoutForRank(int64 rank);
48   static Layout GetDefaultLayoutForR2();
49   static Layout GetDefaultLayoutForR3();
50   static Layout GetDefaultLayoutForR4();
51 
52   // Sets the default layout on the Shape.
53   static void SetToDefaultLayout(Shape* shape);
54 
55   // Returns a shape with the same dimensions as `shape` but with the default
56   // layout.
57   static Shape GetWithDefaultLayout(const Shape& shape);
58 
59   // Sets the layouts of all Shapes within the given ProgramShape to the
60   // default.
61   static void SetToDefaultLayout(ProgramShape* program_shape);
62 
63   // Validates that the layout within the given shape is correct.
64   static tensorflow::Status ValidateLayoutInShape(const Shape& shape);
65 
66   // Validates that the provided layout satisfies invariants for the given
67   // shape.
68   static tensorflow::Status ValidateLayoutForShape(const Layout& layout,
69                                                    const Shape& shape);
70 
71   // Clears the layout in the given Shape. After this function is called,
72   // HasLayout will return false for the shape.
73   static void ClearLayout(Shape* shape);
74 
75   // Clears the layout on all Shapes within the given ProgramShape.
76   static void ClearLayout(ProgramShape* program_shape);
77 
78   // Returns whether the given Shape is an array and has a dense format layout.
79   static bool IsDenseArray(const Shape& shape);
80 
81   // Returns whether the given Layout has a dense format.
82   static bool IsDense(const Layout& layout);
83 
84   // Returns whether the layout is monotonic and dim 0 is minor in the layout.
85   // * R0 and R1: this is always trivially true.
86   // * R2+: equivalent to column-major. Dimension 0 is the minor, dimension 1 is
87   //        more major, and so on until dimension N-1 which is the major.
88   static bool IsMonotonicWithDim0Minor(const Layout& layout);
89 
90   // Returns whether the layout is monotonic and dim 0 is major in the layout.
91   // * R0 and R1: this is always trivially true.
92   // * R2+: equivalent to row-major. Dimension 0 is the major, dimension 1 is
93   //        more minor, and so on until dimension N-1 which is the minor.
94   static bool IsMonotonicWithDim0Major(const Layout& layout);
95 
96   // Returns whether the layout of the given shape has padding (a
97   // padded_dimension value in Layout is greater than the corresponding
98   // dimension size).
99   static bool IsPadded(const Shape& shape);
100 
101   // Returns the padded_dimensions array for the given Shape.  Requires that the
102   // shape is an array and has a dense layout.
103   static tensorflow::gtl::ArraySlice<int64> PaddedDimensions(
104       const Shape& shape);
105 
106   // Returns the given index of the padded_dimensions array for the given Shape.
107   // Requires that the shape is an array and has a dense layout.
108   static int64 PaddedDimension(const Shape& shape, int64 index);
109 
110   // Returns the padding_value for the given Shape.  Requires that the shape is
111   // an array and has a dense layout.
112   static PaddingValue GetPaddingValue(const Shape& shape);
113 
114   // Returns whether the given Shape is an array (i.e. not a tuple) and has a
115   // sparse format layout.
116   static bool IsSparseArray(const Shape& shape);
117 
118   // Returns whether the given Layout has a sparse format.
119   static bool IsSparse(const Layout& layout);
120 
121   // Returns the maximum number of elements that can be stored in a sparse
122   // layout.
123   static int64 MaxSparseElements(const Layout& layout);
124 
125   // Returns whether the given shape has a layout. For tuple shapes, true is
126   // returned only if all elements have layouts.
127   static bool HasLayout(const Shape& shape);
128 
129   // Returns whether all Shapes within the given ProgramShape have layouts.
130   static bool HasLayout(const ProgramShape& program_shape);
131 
132   // Returns whether lhs and rhs are identical.
133   static bool Equal(const Layout& lhs, const Layout& rhs);
134 
135   // Returns the minor_to_major array for the given Shape.  Requires that the
136   // shape is an array and has a dense layout.
137   static tensorflow::gtl::ArraySlice<int64> MinorToMajor(const Shape& shape);
138   static tensorflow::gtl::ArraySlice<int64> MinorToMajor(const Layout& layout);
139 
140   // Major(0) is the most major logical dimension number, Major(1) is the
141   // second-most-major logical dimension number and so on.
142   //
143   // This can be used to translate physical dimension numbers to logical
144   // dimension numbers. Assume that we are numbering the physical dimensions so
145   // that the most major physical dimension has physical dimension number 0 and
146   // so on. Then a physical dimension number p corresponds to the logical
147   // dimension number Major(p). So this function could also be called
148   // PhysicalToLogical().
149   //
150   // As an example, consider physical dimension number 0, which by definition is
151   // the most major. Then Major(0) is the most major logical dimension, so Major
152   // maps the physical dimension number 0 to the most major logical dimension
153   // number Major(0).
154   static int64 Major(const Layout& layout, int64 physical_dimension_number);
155 
156   // Minor(0) is the most minor logical dimension number, minor(1) is the
157   // second-most-minor logical dimension number and so on.
158   static int64 Minor(const Layout& layout, int64 physical_dimension_number);
159 
160   // Returns the inverse mapping of the Major() function. More precisely, return
161   // a vector v such that if l == Major(p), then v[l] == p.
162   //
163   // This can be used to translate logical dimension numbers into physical
164   // dimension numbers. Assume that we are numbering the physical dimensions so
165   // that the most major physical dimension has physical dimension number 0 and
166   // so on. Then a logical dimension number l corresponds to the physical
167   // dimension number MakeLogicalToPhysical(layout)[l].
168   //
169   // As an example, consider physical dimension number 0, which by definition is
170   // the most major. Then l := Major(0) is the most major logical dimension. If
171   // v is the vector returned from this function, then v[l] == 0. So v maps the
172   // most major logical dimension l to the physical dimension number 0.
173   static std::vector<int64> MakeLogicalToPhysical(const Layout& layout);
174 
175   // Returns a human-readable string that represents the given layout.
176   static string HumanString(const Layout& layout);
177 
178   // Copies the layout from 'src' to 'dst'. Recursively copies layouts of
179   // tuples.  'src' and 'dst' need not be compatible but the two shapes must
180   // have the same tuple structure (if any) and arrays must have the same
181   // rank. within the shapes must have the same number of dimensions.
182   static tensorflow::Status CopyLayoutBetweenShapes(const Shape& src,
183                                                     Shape* dst);
184 
185   // Returns true if the layouts of lhs and rhs are equal, false
186   // otherwise. Recursively compares layouts of tuples.
187   //
188   // lhs and rhs need not be compatible to have the same layout but the two
189   // shapes must have the same tuple structure (if any) and arrays must have the
190   // same rank. Element type is ignored.
191   static bool LayoutsInShapesEqual(const Shape& lhs, const Shape& rhs);
192 
193   // Returns whether the given dimensions are consecutive in the given layout,
194   // not necessarily in the order given.
195   static bool AreDimensionsConsecutive(const Layout& layout,
196                                        tensorflow::gtl::ArraySlice<int64> dims);
197 
198  private:
199   TF_DISALLOW_COPY_AND_ASSIGN(LayoutUtil);
200 };
201 
202 std::ostream& operator<<(std::ostream& out, const Layout& layout);
203 
204 }  // namespace xla
205 
206 #endif  // TENSORFLOW_COMPILER_XLA_LAYOUT_UTIL_H_
207