1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include "tensorflow/compiler/xla/layout_util.h"
17
18 #include <stddef.h>
19 #include <algorithm>
20 #include <functional>
21 #include <random>
22 #include <string>
23 #include <unordered_map>
24 #include <vector>
25
26 #include "tensorflow/compiler/xla/protobuf_util.h"
27 #include "tensorflow/compiler/xla/shape_util.h"
28 #include "tensorflow/compiler/xla/status_macros.h"
29 #include "tensorflow/compiler/xla/types.h"
30 #include "tensorflow/compiler/xla/util.h"
31 #include "tensorflow/core/lib/core/errors.h"
32 #include "tensorflow/core/lib/strings/numbers.h"
33 #include "tensorflow/core/lib/strings/str_util.h"
34 #include "tensorflow/core/lib/strings/strcat.h"
35 #include "tensorflow/core/platform/logging.h"
36 #include "tensorflow/core/platform/protobuf.h"
37
38 namespace xla {
39 namespace {
40
41 // Internal helper for GetDefaultLayoutForShape and SetToDefaultLayout. Sets
42 // minor_to_major to the value that represents the default layout.
SetDefaultLayoutToContainer(tensorflow::protobuf::RepeatedField<tensorflow::protobuf_int64> * minor_to_major)43 void SetDefaultLayoutToContainer(
44 tensorflow::protobuf::RepeatedField<tensorflow::protobuf_int64>*
45 minor_to_major) {
46 // The default XLA layout is major-to-minor (dim 0 is major).
47 // For more information on XLA layouts, see:
48 // https://www.tensorflow.org/performance/xla/shapes
49 const int64 size = minor_to_major->size();
50 for (int64 i = 0; i < size; ++i) {
51 minor_to_major->Set(i, size - 1 - i);
52 }
53 }
54
55 } // namespace
56
MakeLayout(tensorflow::gtl::ArraySlice<int64> minor_to_major)57 /* static */ Layout LayoutUtil::MakeLayout(
58 tensorflow::gtl::ArraySlice<int64> minor_to_major) {
59 Layout layout;
60 layout.set_format(DENSE);
61 for (int64 dimension_number : minor_to_major) {
62 layout.add_minor_to_major(dimension_number);
63 }
64 return layout;
65 }
66
MakeSparseLayout(int64 max_sparse_elements)67 /* static */ Layout LayoutUtil::MakeSparseLayout(int64 max_sparse_elements) {
68 Layout layout;
69 layout.set_format(SPARSE);
70 layout.set_max_sparse_elements(max_sparse_elements);
71 return layout;
72 }
73
74 namespace {
75
76 // Internal helper that creates a default layout for an array of the given rank.
CreateDefaultLayoutForRank(int64 rank)77 Layout CreateDefaultLayoutForRank(int64 rank) {
78 Layout layout;
79 layout.set_format(DENSE);
80 tensorflow::protobuf::RepeatedField<tensorflow::protobuf_int64>*
81 minor_to_major = layout.mutable_minor_to_major();
82 minor_to_major->Resize(rank, 0);
83 SetDefaultLayoutToContainer(minor_to_major);
84 return layout;
85 }
86
87 } // namespace
88
GetDefaultLayoutForShape(const Shape & shape)89 /* static */ Layout LayoutUtil::GetDefaultLayoutForShape(const Shape& shape) {
90 // A Layout proto corresponds to a single array, not a tuple.
91 DCHECK(!ShapeUtil::IsTuple(shape));
92 return CreateDefaultLayoutForRank(shape.dimensions_size());
93 }
94
GetDefaultLayoutForRank(int64 rank)95 /* static */ Layout LayoutUtil::GetDefaultLayoutForRank(int64 rank) {
96 return CreateDefaultLayoutForRank(rank);
97 }
98
GetDefaultLayoutForR2()99 /* static */ Layout LayoutUtil::GetDefaultLayoutForR2() {
100 return CreateDefaultLayoutForRank(2);
101 }
102
GetDefaultLayoutForR3()103 /* static */ Layout LayoutUtil::GetDefaultLayoutForR3() {
104 return CreateDefaultLayoutForRank(3);
105 }
106
GetDefaultLayoutForR4()107 /* static */ Layout LayoutUtil::GetDefaultLayoutForR4() {
108 return CreateDefaultLayoutForRank(4);
109 }
110
SetToDefaultLayout(Shape * shape)111 /* static */ void LayoutUtil::SetToDefaultLayout(Shape* shape) {
112 if (ShapeUtil::IsTuple(*shape)) {
113 // Tuple shape.
114 for (auto& element_shape : *shape->mutable_tuple_shapes()) {
115 SetToDefaultLayout(&element_shape);
116 }
117 shape->clear_layout();
118 } else if (ShapeUtil::IsOpaque(*shape)) {
119 shape->clear_layout();
120 } else {
121 shape->mutable_layout()->set_format(DENSE);
122 tensorflow::protobuf::RepeatedField<tensorflow::protobuf_int64>*
123 minor_to_major = shape->mutable_layout()->mutable_minor_to_major();
124 minor_to_major->Resize(shape->dimensions_size(), 0);
125 SetDefaultLayoutToContainer(minor_to_major);
126 }
127 }
128
GetWithDefaultLayout(const Shape & shape)129 /* static */ Shape LayoutUtil::GetWithDefaultLayout(const Shape& shape) {
130 Shape copy(shape);
131 LayoutUtil::SetToDefaultLayout(©);
132 return copy;
133 }
134
SetToDefaultLayout(ProgramShape * program_shape)135 /* static */ void LayoutUtil::SetToDefaultLayout(ProgramShape* program_shape) {
136 for (auto& parameter_shape : *program_shape->mutable_parameters()) {
137 LayoutUtil::SetToDefaultLayout(¶meter_shape);
138 }
139 LayoutUtil::SetToDefaultLayout(program_shape->mutable_result());
140 }
141
ValidateLayoutInShape(const Shape & shape)142 /* static */ tensorflow::Status LayoutUtil::ValidateLayoutInShape(
143 const Shape& shape) {
144 if (ShapeUtil::IsTuple(shape)) {
145 // Tuple shape.
146 if (shape.has_layout()) {
147 return InvalidArgument("tuple should not have a layout field");
148 }
149 for (auto& element_shape : shape.tuple_shapes()) {
150 TF_RETURN_IF_ERROR(ValidateLayoutInShape(element_shape));
151 }
152 return tensorflow::Status::OK();
153 } else if (ShapeUtil::IsOpaque(shape)) {
154 if (shape.has_layout()) {
155 return InvalidArgument("opaque should not have a layout field");
156 }
157 return tensorflow::Status::OK();
158 } else {
159 // Array shape.
160 if (!shape.has_layout()) {
161 return InvalidArgument("shape %s does not have a layout",
162 ShapeUtil::HumanString(shape).c_str());
163 }
164 return ValidateLayoutForShape(shape.layout(), shape);
165 }
166 }
167
ValidateLayoutForShape(const Layout & layout,const Shape & shape)168 /* static */ tensorflow::Status LayoutUtil::ValidateLayoutForShape(
169 const Layout& layout, const Shape& shape) {
170 if (ShapeUtil::IsTuple(shape)) {
171 return InvalidArgument("a single Layout is not valid for tuple shapes");
172 }
173
174 if (ShapeUtil::IsOpaque(shape)) {
175 return tensorflow::Status::OK();
176 }
177
178 if (layout.format() == INVALID_FORMAT) {
179 return InvalidArgument(
180 "Layout does not have a valid format: layout {%s}, shape {%s}",
181 layout.ShortDebugString().c_str(), shape.ShortDebugString().c_str());
182 }
183
184 if (layout.format() == DENSE) {
185 if (layout.minor_to_major_size() != ShapeUtil::Rank(shape)) {
186 return InvalidArgument(
187 "layout minor_to_major field contains %d elements, "
188 "but shape is rank %lld: {%s}; shape: %s",
189 layout.minor_to_major_size(), ShapeUtil::Rank(shape),
190 tensorflow::str_util::Join(layout.minor_to_major(), ", ").c_str(),
191 shape.ShortDebugString().c_str());
192 }
193
194 std::vector<bool> dimensions_in_layout(ShapeUtil::Rank(shape), false);
195 for (int64 i = 0; i < ShapeUtil::Rank(shape); ++i) {
196 int64 dim = layout.minor_to_major(i);
197 if (dim < 0 || dim >= ShapeUtil::Rank(shape)) {
198 return InvalidArgument(
199 "layout minor_to_major field has out-of-bounds value: %s",
200 HumanString(layout).c_str());
201 }
202 if (dimensions_in_layout[dim]) {
203 return InvalidArgument(
204 "layout minor_to_major field has duplicate values: {%s}",
205 HumanString(layout).c_str());
206 }
207 dimensions_in_layout[dim] = true;
208 }
209
210 if (layout.padded_dimensions_size() > 0) {
211 if (layout.padded_dimensions_size() != ShapeUtil::Rank(shape)) {
212 return InvalidArgument(
213 "layout has %d padded dimensions, but shape is rank %lld",
214 layout.padded_dimensions_size(), ShapeUtil::Rank(shape));
215 }
216 for (int i = 0; i < layout.padded_dimensions_size(); ++i) {
217 if (layout.padded_dimensions(i) < shape.dimensions(i)) {
218 return InvalidArgument(
219 "for dimension %d, dimension padding (%lld) is smaller than "
220 "the dimension size (%lld) of the shape",
221 i, layout.padded_dimensions(i), shape.dimensions(i));
222 }
223 }
224 }
225 }
226
227 return tensorflow::Status::OK();
228 }
229
ClearLayout(Shape * shape)230 /* static */ void LayoutUtil::ClearLayout(Shape* shape) {
231 shape->clear_layout();
232 for (auto& element_shape : *shape->mutable_tuple_shapes()) {
233 ClearLayout(&element_shape);
234 }
235 }
236
ClearLayout(ProgramShape * program_shape)237 /* static */ void LayoutUtil::ClearLayout(ProgramShape* program_shape) {
238 for (auto& parameter_shape : *program_shape->mutable_parameters()) {
239 LayoutUtil::ClearLayout(¶meter_shape);
240 }
241 LayoutUtil::ClearLayout(program_shape->mutable_result());
242 }
243
IsDenseArray(const Shape & shape)244 /* static */ bool LayoutUtil::IsDenseArray(const Shape& shape) {
245 return ShapeUtil::IsArray(shape) && shape.has_layout() &&
246 IsDense(shape.layout());
247 }
248
IsDense(const Layout & layout)249 /* static */ bool LayoutUtil::IsDense(const Layout& layout) {
250 return layout.format() == DENSE;
251 }
252
IsMonotonicWithDim0Minor(const Layout & layout)253 /* static */ bool LayoutUtil::IsMonotonicWithDim0Minor(const Layout& layout) {
254 CHECK(layout.format() == DENSE);
255 return std::is_sorted(layout.minor_to_major().begin(),
256 layout.minor_to_major().end());
257 }
258
IsMonotonicWithDim0Major(const Layout & layout)259 /* static */ bool LayoutUtil::IsMonotonicWithDim0Major(const Layout& layout) {
260 CHECK(layout.format() == DENSE);
261 return std::is_sorted(layout.minor_to_major().begin(),
262 layout.minor_to_major().end(), std::greater<int64>());
263 }
264
IsPadded(const Shape & shape)265 /* static */ bool LayoutUtil::IsPadded(const Shape& shape) {
266 if (ShapeUtil::IsTuple(shape) || !HasLayout(shape) ||
267 shape.layout().padded_dimensions_size() == 0) {
268 return false;
269 }
270 CHECK(IsDenseArray(shape));
271 CHECK_EQ(shape.dimensions_size(), shape.layout().padded_dimensions_size());
272 for (int64 i = 0; i < shape.dimensions_size(); ++i) {
273 if (shape.layout().padded_dimensions(i) > shape.dimensions(i)) {
274 return true;
275 }
276 }
277 return false;
278 }
279
PaddedDimensions(const Shape & shape)280 /* static */ tensorflow::gtl::ArraySlice<int64> LayoutUtil::PaddedDimensions(
281 const Shape& shape) {
282 CHECK(IsDenseArray(shape));
283 return AsInt64Slice(shape.layout().padded_dimensions());
284 }
285
PaddedDimension(const Shape & shape,int64 index)286 /* static */ int64 LayoutUtil::PaddedDimension(const Shape& shape,
287 int64 index) {
288 CHECK(IsDenseArray(shape));
289 return shape.layout().padded_dimensions(index);
290 }
291
GetPaddingValue(const Shape & shape)292 /* static */ PaddingValue LayoutUtil::GetPaddingValue(const Shape& shape) {
293 CHECK(IsDenseArray(shape));
294 return shape.layout().padding_value();
295 }
296
IsSparseArray(const Shape & shape)297 /* static */ bool LayoutUtil::IsSparseArray(const Shape& shape) {
298 return ShapeUtil::IsArray(shape) && shape.has_layout() &&
299 IsSparse(shape.layout());
300 }
301
IsSparse(const Layout & layout)302 /* static */ bool LayoutUtil::IsSparse(const Layout& layout) {
303 return layout.format() == SPARSE;
304 }
305
MaxSparseElements(const Layout & layout)306 /* static */ int64 LayoutUtil::MaxSparseElements(const Layout& layout) {
307 CHECK(IsSparse(layout));
308 return layout.max_sparse_elements();
309 }
310
HasLayout(const Shape & shape)311 /* static */ bool LayoutUtil::HasLayout(const Shape& shape) {
312 if (ShapeUtil::IsTuple(shape)) {
313 // Tuple shape: all subshapes must have a layout.
314 return std::all_of(shape.tuple_shapes().begin(), shape.tuple_shapes().end(),
315 [](const Shape& s) { return HasLayout(s); });
316 } else if (ShapeUtil::IsOpaque(shape)) {
317 return true;
318 }
319 return shape.has_layout() && shape.layout().format() != INVALID_FORMAT;
320 }
321
HasLayout(const ProgramShape & program_shape)322 /* static */ bool LayoutUtil::HasLayout(const ProgramShape& program_shape) {
323 for (auto& parameter_shape : program_shape.parameters()) {
324 if (!LayoutUtil::HasLayout(parameter_shape)) {
325 return false;
326 }
327 }
328 return LayoutUtil::HasLayout(program_shape.result());
329 }
330
Equal(const Layout & lhs,const Layout & rhs)331 /* static */ bool LayoutUtil::Equal(const Layout& lhs, const Layout& rhs) {
332 return protobuf_util::ProtobufEquals(lhs, rhs);
333 }
334
MinorToMajor(const Shape & shape)335 /* static */ tensorflow::gtl::ArraySlice<int64> LayoutUtil::MinorToMajor(
336 const Shape& shape) {
337 CHECK(IsDenseArray(shape));
338 return AsInt64Slice(shape.layout().minor_to_major());
339 }
340
MinorToMajor(const Layout & layout)341 /* static */ tensorflow::gtl::ArraySlice<int64> LayoutUtil::MinorToMajor(
342 const Layout& layout) {
343 CHECK(layout.format() == DENSE);
344 return AsInt64Slice(layout.minor_to_major());
345 }
346
Major(const Layout & layout,int64 physical_dimension_number)347 /* static */ int64 LayoutUtil::Major(const Layout& layout,
348 int64 physical_dimension_number) {
349 CHECK_LE(0, physical_dimension_number);
350 CHECK_LT(physical_dimension_number, layout.minor_to_major_size());
351 return Minor(layout,
352 layout.minor_to_major_size() - 1 - physical_dimension_number);
353 }
354
Minor(const Layout & layout,int64 physical_dimension_number)355 /* static */ int64 LayoutUtil::Minor(const Layout& layout,
356 int64 physical_dimension_number) {
357 CHECK_EQ(layout.format(), DENSE);
358 CHECK_LE(0, physical_dimension_number);
359 CHECK_LT(physical_dimension_number, layout.minor_to_major_size());
360 return layout.minor_to_major(physical_dimension_number);
361 }
362
MakeLogicalToPhysical(const Layout & layout)363 /* static */ std::vector<int64> LayoutUtil::MakeLogicalToPhysical(
364 const Layout& layout) {
365 std::vector<int64> logical_to_physical(layout.minor_to_major_size());
366 for (int64 physical = 0; physical < logical_to_physical.size(); ++physical) {
367 const int64 logical = Major(layout, physical);
368 logical_to_physical[logical] = physical;
369 }
370 return logical_to_physical;
371 }
372
HumanString(const Layout & layout)373 /* static */ string LayoutUtil::HumanString(const Layout& layout) {
374 if (IsSparse(layout)) {
375 return tensorflow::strings::StrCat("sparse{", layout.max_sparse_elements(),
376 "}");
377 }
378 CHECK(IsDense(layout));
379 return tensorflow::strings::StrCat(
380 "{", tensorflow::str_util::Join(layout.minor_to_major(), ","), "}");
381 }
382
383 namespace {
384
385 // Internal helper for recursively copying layouts.
CopyLayoutInternal(const Shape & src,Shape * dst)386 tensorflow::Status CopyLayoutInternal(const Shape& src, Shape* dst) {
387 if (ShapeUtil::IsTuple(src) != ShapeUtil::IsTuple(*dst)) {
388 return InvalidArgument(
389 "cannot copy layout from shape: shape structure differs");
390 }
391 if (ShapeUtil::IsTuple(src)) {
392 if (ShapeUtil::TupleElementCount(src) !=
393 ShapeUtil::TupleElementCount(*dst)) {
394 return InvalidArgument(
395 "cannot copy layout from shape: tuple element count differs");
396 }
397 for (int64 i = 0; i < ShapeUtil::TupleElementCount(src); ++i) {
398 TF_RETURN_IF_ERROR(CopyLayoutInternal(src.tuple_shapes(i),
399 dst->mutable_tuple_shapes(i)));
400 }
401 } else {
402 if (src.has_layout()) {
403 if (ShapeUtil::Rank(src) != ShapeUtil::Rank(*dst)) {
404 return InvalidArgument("cannot copy layout from shape: ranks differs");
405 }
406 TF_RETURN_IF_ERROR(
407 LayoutUtil::ValidateLayoutForShape(src.layout(), *dst));
408 *dst->mutable_layout() = src.layout();
409 } else {
410 dst->clear_layout();
411 }
412 }
413 return tensorflow::Status::OK();
414 }
415
416 } // namespace
417
418 /* static */
CopyLayoutBetweenShapes(const Shape & src,Shape * dst)419 tensorflow::Status LayoutUtil::CopyLayoutBetweenShapes(const Shape& src,
420 Shape* dst) {
421 return CopyLayoutInternal(src, dst);
422 }
423
LayoutsInShapesEqual(const Shape & lhs,const Shape & rhs)424 /* static */ bool LayoutUtil::LayoutsInShapesEqual(const Shape& lhs,
425 const Shape& rhs) {
426 if (ShapeUtil::IsTuple(lhs) != ShapeUtil::IsTuple(rhs)) {
427 return false;
428 }
429 if (ShapeUtil::IsTuple(lhs)) {
430 if (ShapeUtil::TupleElementCount(lhs) !=
431 ShapeUtil::TupleElementCount(rhs)) {
432 return false;
433 }
434 for (int i = 0; i < ShapeUtil::TupleElementCount(lhs); ++i) {
435 if (!LayoutsInShapesEqual(lhs.tuple_shapes(i), rhs.tuple_shapes(i))) {
436 return false;
437 }
438 }
439 return true;
440 } else {
441 return ShapeUtil::Rank(lhs) == ShapeUtil::Rank(rhs) &&
442 LayoutUtil::Equal(lhs.layout(), rhs.layout());
443 }
444 }
445
AreDimensionsConsecutive(const Layout & layout,tensorflow::gtl::ArraySlice<int64> dims)446 /* static */ bool LayoutUtil::AreDimensionsConsecutive(
447 const Layout& layout, tensorflow::gtl::ArraySlice<int64> dims) {
448 CHECK(IsDense(layout));
449 std::vector<int64> positions_in_layout;
450 for (int64 dim : dims) {
451 positions_in_layout.push_back(
452 PositionInContainer(layout.minor_to_major(), dim));
453 }
454 std::sort(positions_in_layout.begin(), positions_in_layout.end());
455 for (size_t i = 1; i < positions_in_layout.size(); ++i) {
456 if (1 != positions_in_layout[i] - positions_in_layout[i - 1]) {
457 return false;
458 }
459 }
460 return true;
461 }
462
operator <<(std::ostream & out,const Layout & layout)463 std::ostream& operator<<(std::ostream& out, const Layout& layout) {
464 out << LayoutUtil::HumanString(layout);
465 return out;
466 }
467
468 } // namespace xla
469