1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 // Generally useful utility functions that are common to (not specific to any
17 // given part of) the XLA code base.
18
19 #ifndef TENSORFLOW_COMPILER_XLA_UTIL_H_
20 #define TENSORFLOW_COMPILER_XLA_UTIL_H_
21
22 #include <algorithm>
23 #include <string>
24 #include <type_traits>
25 #include <vector>
26
27 #include "absl/algorithm/container.h"
28 #include "absl/container/inlined_vector.h"
29 #include "absl/strings/str_cat.h"
30 #include "absl/strings/str_format.h"
31 #include "absl/strings/string_view.h"
32 #include "absl/types/span.h"
33 #include "tensorflow/compiler/xla/status.h"
34 #include "tensorflow/compiler/xla/status_macros.h"
35 #include "tensorflow/compiler/xla/types.h"
36 #include "tensorflow/compiler/xla/xla_data.pb.h"
37 #include "tensorflow/core/lib/core/errors.h"
38 #include "tensorflow/core/lib/core/status.h"
39 #include "tensorflow/core/lib/math/math_util.h"
40 #include "tensorflow/core/lib/strings/numbers.h"
41 #include "tensorflow/core/platform/logging.h"
42 #include "tensorflow/core/platform/macros.h"
43 #include "tensorflow/core/platform/protobuf.h"
44 #include "tensorflow/core/platform/types.h"
45
46 namespace xla {
47
48 // Logs the provided status message with a backtrace.
49 //
50 // For use by Status-factories, logs a backtrace at the point where the status
51 // is created, such that we can use --vmodule=util=1 to see all status
52 // creation backtraces.
53 Status WithLogBacktrace(const Status& status);
54
55 // Ranks greater than 8 are very rare, so use InlinedVector<int64, 8> to store
56 // the bounds and indices. And for the rare cases of ranks greater than 8,
57 // the InlinedVector will just behave like an std::vector<> and allocate the
58 // memory to store its values.
59 static constexpr int kInlineRank = 8;
60 using DimensionVector = absl::InlinedVector<int64, kInlineRank>;
61
62 // RAII timer that logs with a given label the wall clock time duration in human
63 // readable form. This differs from base's ElapsedTimer primarily in that it
64 // spits out the human-readable duration form.
65 //
66 // By default, the timing traces are only printed at VLOG(1) and above:
67 //
68 // XLA_SCOPED_LOGGING_TIMER("fooing bar"); // nop if !VLOG_IS_ON(1).
69 //
70 // but you can control this via:
71 //
72 // XLA_SCOPED_LOGGING_TIMER_LEVEL("fooing bar", 2); // nop if !VLOG_IS_ON(2)
73 //
74 #define XLA_SCOPED_LOGGING_TIMER(label) \
75 XLA_SCOPED_LOGGING_TIMER_HELPER(label, 1, __COUNTER__)
76 #define XLA_SCOPED_LOGGING_TIMER_LEVEL(label, level) \
77 XLA_SCOPED_LOGGING_TIMER_HELPER(label, level, __COUNTER__)
78
79 // Helper for implementing macros above. Do not use directly.
80 //
81 // Forces the evaluation of "counter", which we expect is equal to __COUNTER__.
82 #define XLA_SCOPED_LOGGING_TIMER_HELPER(label, level, counter) \
83 XLA_SCOPED_LOGGING_TIMER_HELPER2(label, level, counter)
84
85 // Helper for macros above. Don't use directly.
86 #define XLA_SCOPED_LOGGING_TIMER_HELPER2(label, level, counter) \
87 ::xla::ScopedLoggingTimer XLA_ScopedLoggingTimerInstance##counter( \
88 label, VLOG_IS_ON(level))
89
90 // RAII timer for XLA_SCOPED_LOGGING_TIMER and XLA_SCOPED_LOGGING_TIMER_LEVEL
91 // macros above. Recommended usage is via the macros so you don't have to give
92 // the timer a name or worry about calling VLOG_IS_ON yourself.
93 struct ScopedLoggingTimer {
94 // The timer does nothing if enabled is false. This lets you pass in your
95 // file's VLOG_IS_ON value.
96 ScopedLoggingTimer(const string& label, bool enabled);
97 ~ScopedLoggingTimer();
98
99 bool enabled;
100 string label;
101 uint64 start_micros;
102 };
103
104 // Given a vector<T>, returns a Span<char> that points at its
105 // internals.
106 //
107 // Warning: if the vector is updated its storage pointer may change, so use this
108 // with caution (ideally in limited scopes with temporary lifetimes).
109 template <typename T>
MutableByteSlice(std::vector<T> * v)110 absl::Span<uint8> MutableByteSlice(std::vector<T>* v) {
111 return absl::Span<uint8>(reinterpret_cast<uint8*>(v->data()),
112 v->size() * sizeof(T));
113 }
114
115 // Turns an immutable slice of type T into an immutable slice of bytes with the
116 // same byte size.
117 template <typename T>
CastToByteSlice(absl::Span<const T> slice)118 absl::Span<const uint8> CastToByteSlice(absl::Span<const T> slice) {
119 return absl::Span<const uint8>(reinterpret_cast<const uint8*>(slice.data()),
120 slice.size() * sizeof(T));
121 }
122
123 // Casts a byte slice to a non-byte type T, checking that the original slice
124 // length is a multiple of sizeof(T).
125 template <typename T>
CastByteSlice(absl::Span<const uint8> slice)126 absl::Span<const T> CastByteSlice(absl::Span<const uint8> slice) {
127 CHECK_EQ(0, slice.size() % sizeof(T));
128 return absl::Span<const T>(reinterpret_cast<const T*>(slice.data()),
129 slice.size() / sizeof(T));
130 }
131
132 // Convenience function to force a vector to convert to an immutable slice.
133 template <typename T>
AsSlice(const std::vector<T> & v)134 absl::Span<const T> AsSlice(const std::vector<T>& v) {
135 return absl::Span<const T>(v);
136 }
137
138 // Converts a mutable vector pointer into a Span of the same
139 // type.
140 template <typename T>
AsMutableSlice(std::vector<T> * v)141 absl::Span<T> AsMutableSlice(std::vector<T>* v) {
142 return absl::Span<T>(v->data(), v->size());
143 }
144
145 // xla::int64 is not the same type as tensorflow::protobuf_int64 in open-source.
146 // Wrapper function that gives an int64 array slice view of a repeated int64
147 // protobuf field.
AsInt64Slice(const tensorflow::protobuf::RepeatedField<tensorflow::protobuf_int64> & v)148 static inline absl::Span<const int64> AsInt64Slice(
149 const tensorflow::protobuf::RepeatedField<tensorflow::protobuf_int64>& v) {
150 absl::Span<const tensorflow::protobuf_int64> slice(v);
151 return absl::Span<const int64>(reinterpret_cast<const int64*>(slice.data()),
152 slice.size());
153 }
154
155 // TODO(b/29771030): This nop overload was added to simplify the migration of
156 // Shape from a proto to a C++ class. Remove after class has been migrated.
AsInt64Slice(absl::Span<const int64> slice)157 static inline absl::Span<const int64> AsInt64Slice(
158 absl::Span<const int64> slice) {
159 return slice;
160 }
161
162 // As above, but for uint64 types.
AsUInt64Slice(const tensorflow::protobuf::RepeatedField<tensorflow::protobuf_uint64> & v)163 static inline absl::Span<const uint64> AsUInt64Slice(
164 const tensorflow::protobuf::RepeatedField<tensorflow::protobuf_uint64>& v) {
165 absl::Span<const tensorflow::protobuf_uint64> slice(v);
166 return absl::Span<const uint64>(reinterpret_cast<const uint64*>(slice.data()),
167 slice.size());
168 }
169
170 // Compares two containers for equality. Returns true iff the two containers
171 // have the same size and all their elements compare equal using their
172 // operator==. Like std::equal, but forces size equality.
173 template <typename Container1T, typename Container2T>
ContainersEqual(const Container1T & c1,const Container2T & c2)174 bool ContainersEqual(const Container1T& c1, const Container2T& c2) {
175 return ((c1.size() == c2.size()) &&
176 std::equal(std::begin(c1), std::end(c1), std::begin(c2)));
177 }
178
179 template <typename Container1T,
180 typename ElementType = typename Container1T::value_type>
ContainersEqual(const Container1T & c1,std::initializer_list<ElementType> il)181 bool ContainersEqual(const Container1T& c1,
182 std::initializer_list<ElementType> il) {
183 absl::Span<const ElementType> c2{il};
184 return ContainersEqual(c1, c2);
185 }
186
187 // Compares two containers for equality. Returns true iff the two containers
188 // have the same size and all their elements compare equal using the predicate
189 // p. Like std::equal, but forces size equality.
190 template <typename Container1T, typename Container2T, class PredicateT>
ContainersEqual(const Container1T & c1,const Container2T & c2,PredicateT p)191 bool ContainersEqual(const Container1T& c1, const Container2T& c2,
192 PredicateT p) {
193 return ((c1.size() == c2.size()) &&
194 std::equal(std::begin(c1), std::end(c1), std::begin(c2), p));
195 }
196
197 // Performs a copy of count values from src to dest, using different strides for
198 // source and destination. The source starting index is src_base, while the
199 // destination one is dest_base.
200 template <typename D, typename S>
StridedCopy(absl::Span<D> dest,int64 dest_base,int64 dest_stride,absl::Span<const S> src,int64 src_base,int64 src_stride,int64 count)201 void StridedCopy(absl::Span<D> dest, int64 dest_base, int64 dest_stride,
202 absl::Span<const S> src, int64 src_base, int64 src_stride,
203 int64 count) {
204 for (; count > 0; --count, dest_base += dest_stride, src_base += src_stride) {
205 dest[dest_base] = static_cast<D>(src[src_base]);
206 }
207 }
208
209 // Adds some context information to the error message in a
210 // Status. This is useful as Statuses are
211 // propagated upwards.
212 Status AddStatus(Status prior, absl::string_view context);
213 Status AppendStatus(Status prior, absl::string_view context);
214
215 // Status error shorthands -- StrFormat's the arguments to be used as an error
216 // message and returns a status in the canonical error space.
217 template <typename... Args>
InvalidArgument(const absl::FormatSpec<Args...> & format,const Args &...args)218 Status InvalidArgument(const absl::FormatSpec<Args...>& format,
219 const Args&... args) {
220 return WithLogBacktrace(
221 tensorflow::errors::InvalidArgument(absl::StrFormat(format, args...)));
222 }
223 template <typename... Args>
Unimplemented(const absl::FormatSpec<Args...> & format,const Args &...args)224 Status Unimplemented(const absl::FormatSpec<Args...>& format,
225 const Args&... args) {
226 return WithLogBacktrace(
227 tensorflow::errors::Unimplemented(absl::StrFormat(format, args...)));
228 }
229 template <typename... Args>
InternalError(const absl::FormatSpec<Args...> & format,const Args &...args)230 Status InternalError(const absl::FormatSpec<Args...>& format,
231 const Args&... args) {
232 return WithLogBacktrace(
233 tensorflow::errors::Internal(absl::StrFormat(format, args...)));
234 }
235 template <typename... Args>
FailedPrecondition(const absl::FormatSpec<Args...> & format,const Args &...args)236 Status FailedPrecondition(const absl::FormatSpec<Args...>& format,
237 const Args&... args) {
238 return WithLogBacktrace(
239 tensorflow::errors::FailedPrecondition(absl::StrFormat(format, args...)));
240 }
241 template <typename... Args>
Cancelled(const absl::FormatSpec<Args...> & format,const Args &...args)242 Status Cancelled(const absl::FormatSpec<Args...>& format, const Args&... args) {
243 return WithLogBacktrace(
244 tensorflow::errors::Cancelled(absl::StrFormat(format, args...)));
245 }
246 template <typename... Args>
ResourceExhausted(const absl::FormatSpec<Args...> & format,const Args &...args)247 Status ResourceExhausted(const absl::FormatSpec<Args...>& format,
248 const Args&... args) {
249 return WithLogBacktrace(
250 tensorflow::errors::ResourceExhausted(absl::StrFormat(format, args...)));
251 }
252 template <typename... Args>
NotFound(const absl::FormatSpec<Args...> & format,const Args &...args)253 Status NotFound(const absl::FormatSpec<Args...>& format, const Args&... args) {
254 return WithLogBacktrace(
255 tensorflow::errors::NotFound(absl::StrFormat(format, args...)));
256 }
257 template <typename... Args>
Unavailable(const absl::FormatSpec<Args...> & format,const Args &...args)258 Status Unavailable(const absl::FormatSpec<Args...>& format,
259 const Args&... args) {
260 return WithLogBacktrace(
261 tensorflow::errors::Unavailable(absl::StrFormat(format, args...)));
262 }
263 template <typename... Args>
Unknown(const absl::FormatSpec<Args...> & format,const Args &...args)264 Status Unknown(const absl::FormatSpec<Args...>& format, const Args&... args) {
265 return WithLogBacktrace(
266 tensorflow::errors::Unknown(absl::StrFormat(format, args...)));
267 }
268 template <typename... Args>
Internal(const absl::FormatSpec<Args...> & format,const Args &...args)269 Status Internal(const absl::FormatSpec<Args...>& format, const Args&... args) {
270 return WithLogBacktrace(
271 tensorflow::errors::Internal(absl::StrFormat(format, args...)));
272 }
273
274 template <typename... Args>
InvalidArgumentStrCat(Args &&...concat)275 Status InvalidArgumentStrCat(Args&&... concat) {
276 return InvalidArgument("%s", absl::StrCat(std::forward<Args>(concat)...));
277 }
278
279 template <typename... Args>
UnimplementedStrCat(Args &&...concat)280 Status UnimplementedStrCat(Args&&... concat) {
281 return Unimplemented("%s", absl::StrCat(std::forward<Args>(concat)...));
282 }
283
284 template <typename... Args>
InternalErrorStrCat(Args &&...concat)285 Status InternalErrorStrCat(Args&&... concat) {
286 return InternalError("%s", absl::StrCat(std::forward<Args>(concat)...));
287 }
288
289 template <typename... Args>
ResourceExhaustedStrCat(Args &&...concat)290 Status ResourceExhaustedStrCat(Args&&... concat) {
291 return ResourceExhausted("%s", absl::StrCat(std::forward<Args>(concat)...));
292 }
293
294 // Splits the lines of the original, replaces leading whitespace with the prefix
295 // given by "indentation", and returns the string joined by newlines again. As a
296 // side effect, any additional trailing whitespace is removed.
297 //
298 // Note: even different amounts of leading whitespace on different lines will be
299 // uniformly replaced with "indentation".
300 string Reindent(absl::string_view original, absl::string_view indentation);
301
302 // Checks whether permutation is a permutation of the [0, rank) integer range.
303 bool IsPermutation(absl::Span<const int64> permutation, int64 rank);
304
305 // Applies `permutation` on `input` and returns the permuted array.
306 // For each i, output[permutation[i]] = input[i].
307 //
308 // Precondition:
309 // 1. `permutation` is a permutation of 0..permutation.size()-1.
310 // 2. permutation.size() == input.size().
311 template <typename Container>
Permute(absl::Span<const int64> permutation,const Container & input)312 std::vector<typename Container::value_type> Permute(
313 absl::Span<const int64> permutation, const Container& input) {
314 using T = typename Container::value_type;
315 absl::Span<const T> data(input);
316 CHECK(IsPermutation(permutation, data.size()));
317 std::vector<T> output(data.size());
318 for (size_t i = 0; i < permutation.size(); ++i) {
319 output[permutation[i]] = data[i];
320 }
321 return output;
322 }
323
324 // Inverts a permutation, i.e., output_permutation[input_permutation[i]] = i.
325 std::vector<int64> InversePermutation(
326 absl::Span<const int64> input_permutation);
327
328 // Composes two permutations: output[i] = p1[p2[i]].
329 std::vector<int64> ComposePermutations(absl::Span<const int64> p1,
330 absl::Span<const int64> p2);
331
332 // Returns true iff permutation == {0, 1, 2, ...}.
333 bool IsIdentityPermutation(absl::Span<const int64> permutation);
334
335 template <typename Container>
PositionInContainer(const Container & container,int64 value)336 int64 PositionInContainer(const Container& container, int64 value) {
337 return std::distance(container.begin(), absl::c_find(container, value));
338 }
339
340 // Formats the container as a comma-separated string. StrAppend must support
341 // appending the elements of the container. Prefix is prepended and suffix is
342 // appended to the returned string.
343 template <typename Container>
344 string CommaSeparatedString(const Container& c, const char* prefix = "",
345 const char* suffix = "") {
346 // Not using Join() since the implementation here is simple anyway and this
347 // avoids copying the string to append prefix.
348 string comma_separated = prefix;
349 const char* separator = "";
350 for (const auto& entry : c) {
351 absl::StrAppend(&comma_separated, separator, entry);
352 separator = ", ";
353 }
354 comma_separated += suffix;
355 return comma_separated;
356 }
357
358 // Overload needed to allow the container to be an initializer list. The default
359 // type for T makes an empty initializer list work as well.
360 template <typename T = int>
361 string CommaSeparatedString(const std::initializer_list<T>& c,
362 const char* prefix = "", const char* suffix = "") {
363 return CommaSeparatedString<std::initializer_list<T>>(c, prefix, suffix);
364 }
365
366 // Formats the container in the mathematical notation for a vector, e.g. (1, 3,
367 // 7). StrAppend must support appending the elements of c.
368 template <typename Container>
VectorString(const Container & c)369 string VectorString(const Container& c) {
370 return CommaSeparatedString(c, "(", ")");
371 }
372
373 // Overload needed to allow the container to be an initializer list. The default
374 // type for T makes an empty initializer list work as well.
375 template <typename T = int>
VectorString(const std::initializer_list<T> & c)376 string VectorString(const std::initializer_list<T>& c) {
377 return VectorString<std::initializer_list<T>>(c);
378 }
379
380 // Returns a PaddingConfig object that represents no padding for the given rank.
381 PaddingConfig MakeNoPaddingConfig(int64 rank);
382
383 // Returns a PaddingConfig object where 'padding' contains
384 // (low edge padding, high edge padding) pairs for each dimension.
385 PaddingConfig MakeEdgePaddingConfig(
386 absl::Span<const std::pair<int64, int64>> padding);
387
388 // Returns true if the padding configuration has at least one dimension with
389 // non-zero interior padding.
390 bool HasInteriorPadding(const PaddingConfig& config);
391
392 // Imports the templated FloorOfRatio math function from the TensorFlow
393 // namespace, as it is very commonly used.
394 template <typename T>
FloorOfRatio(T dividend,T divisor)395 T FloorOfRatio(T dividend, T divisor) {
396 return tensorflow::MathUtil::FloorOfRatio<T>(dividend, divisor);
397 }
398
399 // Imports the templated CeilOfRatio math function from the TensorFlow
400 // namespace, as it is very commonly used.
401 template <typename T>
CeilOfRatio(T dividend,T divisor)402 T CeilOfRatio(T dividend, T divisor) {
403 return tensorflow::MathUtil::CeilOfRatio<T>(dividend, divisor);
404 }
405
406 template <typename T>
ElementWiseCeilOfRatio(absl::Span<const T> dividends,absl::Span<const T> divisors)407 std::vector<T> ElementWiseCeilOfRatio(absl::Span<const T> dividends,
408 absl::Span<const T> divisors) {
409 std::vector<T> ceil_of_ratios;
410 CHECK_EQ(dividends.size(), divisors.size());
411 ceil_of_ratios.reserve(dividends.size());
412 absl::c_transform(dividends, divisors, std::back_inserter(ceil_of_ratios),
413 [](const T dividend, const T divisor) {
414 return CeilOfRatio<T>(dividend, divisor);
415 });
416 return ceil_of_ratios;
417 }
418
419 // Rounds the value up to a multiple of the divisor by first calling CeilOfRatio
420 // then multiplying by the divisor. For example: RoundUpToNearest(13, 8) => 16
421 template <typename T>
RoundUpToNearest(T value,T divisor)422 T RoundUpToNearest(T value, T divisor) {
423 return CeilOfRatio(value, divisor) * divisor;
424 }
425
426 // Rounds the value down to a multiple of the divisor by first calling
427 // FloorOfRatio then multiplying by the divisor. For example:
428 // RoundDownToNearest(13, 8) => 8
429 template <typename T>
RoundDownToNearest(T value,T divisor)430 T RoundDownToNearest(T value, T divisor) {
431 return FloorOfRatio(value, divisor) * divisor;
432 }
433
434 // Given a number of flops executed in an amount of time, produces a string that
435 // represents the throughput;
436 // e.g. HumanReadableNumFlops(1e9, 1e9) => 1.00GFLOP/s.
437 string HumanReadableNumFlops(double flops, double nanoseconds);
438
439 // Given a number of transcendental ops executed in an amount of time, produces
440 // a string that represents the throughput;
441 // e.g. HumanReadableNumTranscendentalOps(1e9, 1e9) => 1.00GTROP/s.
442 string HumanReadableNumTranscendentalOps(double trops, double nanoseconds);
443
444 // Split the text into multiple lines and log each line with the given
445 // severity, filename, and line number.
446 void LogLines(int sev, absl::string_view text, const char* fname, int lineno);
447
448 template <typename T>
IsPowerOfTwo(T x)449 inline bool IsPowerOfTwo(T x) {
450 static_assert(!std::numeric_limits<T>::is_signed, "unsigned types only");
451 return x != 0 && (x & (x - 1)) == 0;
452 }
453
454 // Returns a mask with "bits" number of least significant bits set.
LsbMaskU32(int bits)455 inline uint32 LsbMaskU32(int bits) {
456 CHECK_GE(bits, 0);
457 return (1U << bits) - 1;
458 }
459
460 // Utility for performing a static_cast<> on a std::unique_ptr<>.
461 template <typename Derived, typename Base>
unique_ptr_static_cast(std::unique_ptr<Base> ptr)462 std::unique_ptr<Derived> unique_ptr_static_cast(std::unique_ptr<Base> ptr) {
463 return std::unique_ptr<Derived>(static_cast<Derived*>(ptr.release()));
464 }
465
466 int64 Product(absl::Span<const int64> xs);
467
468 // Returns the start indices of consecutive non-overlapping subsequences of `a`
469 // and `b` with the same product, i.e. `(i, j)` so
470 // • a = {a[0 = i_0], ..., a[i_1 - 1], a[i_1], ... , a[i_2 - 1], ...}
471 // • b = {b[0 = j_0], ..., b[j_1 - 1], b[j_1], ... , b[j_2 - 1], ...}
472 // • ∀ k . 0 <= k < CommonFactors(a, b).size - 1 =>
473 // a[i_k] × a[i_k + 1] × ... × a[i_(k+1) - 1] =
474 // b[j_k] × b[j_k + 1] × ... × b[j_(k+1) - 1]
475 // where `CommonFactors(a, b)[CommonFactors(a, b).size - 1] = (a.size, b.size)`
476 //
477 // If the given shapes have non-zero size, returns the bounds of the shortest
478 // possible such subsequences; else, returns `{(0, 0), (a.size, b.size)}`.
479 std::vector<std::pair<int64, int64>> CommonFactors(absl::Span<const int64> a,
480 absl::Span<const int64> b);
481
482 // Removes illegal characters from filenames.
483 string SanitizeFileName(string file_name);
484
485 template <typename C, typename Value>
FindIndex(const C & c,Value && value)486 int64 FindIndex(const C& c, Value&& value) {
487 auto it = absl::c_find(c, std::forward<Value>(value));
488 return std::distance(c.begin(), it);
489 }
490
491 template <typename C, typename Value>
InsertAt(C * c,int64 index,Value && value)492 void InsertAt(C* c, int64 index, Value&& value) {
493 c->insert(c->begin() + index, std::forward<Value>(value));
494 }
495
496 template <typename C>
EraseAt(C * c,int64 index)497 void EraseAt(C* c, int64 index) {
498 c->erase(c->begin() + index);
499 }
500
501 template <typename T>
ArraySliceToVector(absl::Span<const T> slice)502 std::vector<T> ArraySliceToVector(absl::Span<const T> slice) {
503 return std::vector<T>(slice.begin(), slice.end());
504 }
505
506 template <typename T, size_t N>
InlinedVectorToVector(const absl::InlinedVector<T,N> & inlined_vector)507 std::vector<T> InlinedVectorToVector(
508 const absl::InlinedVector<T, N>& inlined_vector) {
509 return std::vector<T>(inlined_vector.begin(), inlined_vector.end());
510 }
511
512 // Returns true if `x` fits in 32-bits.
513 template <typename T>
IsInt32(T x)514 bool IsInt32(T x) {
515 // Following conversion rules: "the value is unchanged if it can be
516 // represented in the destination type (and bit-field width); otherwise, the
517 // value is implementation-defined."
518 return static_cast<int32>(x) == x;
519 }
520
521 template <typename T>
EraseElementFromVector(std::vector<T> * container,const T & value)522 Status EraseElementFromVector(std::vector<T>* container, const T& value) {
523 // absl::c_find returns a const_iterator which does not seem to work on
524 // gcc 4.8.4, and this breaks the ubuntu/xla_gpu build bot.
525 auto it = std::find(container->begin(), container->end(), value);
526 TF_RET_CHECK(it != container->end());
527 container->erase(it);
528 return Status::OK();
529 }
530 } // namespace xla
531
532 #define XLA_LOG_LINES(SEV, STRING) \
533 ::xla::LogLines(SEV, STRING, __FILE__, __LINE__)
534
535 #define XLA_VLOG_LINES(LEVEL, STRING) \
536 do { \
537 if (VLOG_IS_ON(LEVEL)) XLA_LOG_LINES(::tensorflow::INFO, STRING); \
538 } while (false);
539
540 // Utility macro that performs the equivalent of what one would expect
541 // LOG_LINES(FATAL, X) to do but can be used at the end of a function that
542 // returns a value without getting a compiler warning that no value is returned.
543 #define XLA_FATAL_LOG(X) \
544 XLA_LOG_LINES(::tensorflow::ERROR, X); \
545 LOG(FATAL) << "Aborting in " << __FUNCTION__ << " due to previous errors.";
546
547 #endif // TENSORFLOW_COMPILER_XLA_UTIL_H_
548