1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #ifndef TENSORFLOW_CORE_FRAMEWORK_TENSOR_H_
17 #define TENSORFLOW_CORE_FRAMEWORK_TENSOR_H_
18
19 #include <cstdint>
20 #include <type_traits>
21
22 #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
23 #include "tensorflow/core/framework/allocator.h"
24 #include "tensorflow/core/framework/tensor_shape.h"
25 #include "tensorflow/core/framework/tensor_types.h"
26 #include "tensorflow/core/framework/types.h"
27 #include "tensorflow/core/framework/types.pb.h"
28 #include "tensorflow/core/lib/core/refcount.h"
29 #include "tensorflow/core/lib/core/status.h"
30 #include "tensorflow/core/lib/core/stringpiece.h"
31 #include "tensorflow/core/lib/gtl/inlined_vector.h"
32 #include "tensorflow/core/platform/logging.h"
33 #include "tensorflow/core/platform/macros.h"
34 #include "tensorflow/core/platform/mem.h"
35 #include "tensorflow/core/platform/types.h"
36
37 namespace tensorflow {
38
39 // Forward declarations. In particular, we forward declare protos so that their
40 // symbols can be removed from .so exports.
41 class AllocationDescription;
42 class Allocator;
43 class OpKernelContext;
44 class Tensor;
45 class TensorBuffer;
46 class TensorCApi;
47 class TensorCord;
48 class TensorDescription;
49 class TensorProto;
50 class Var;
51
52 namespace batch_util {
53 Status CopyElementToSlice(Tensor element, Tensor* parent, int64 index);
54 Status CopySliceToElement(const Tensor& parent, Tensor* element, int64 index);
55 Status MaybeMoveSliceToElement(Tensor* parent, Tensor* element, int64 index);
56 Status CopyContiguousSlices(const Tensor& src, int64 src_offset,
57 int64 dst_offset, int64 num_slices, Tensor* dst);
58 } // namespace batch_util
59
60 /// @ingroup core
61
62 /// Interface to access the raw ref-counted data buffer.
63 class TensorBuffer : public core::RefCounted {
64 public:
TensorBuffer(void * data_ptr)65 explicit TensorBuffer(void* data_ptr) : data_(data_ptr) {}
~TensorBuffer()66 ~TensorBuffer() override {}
67
68 /// \brief data() points to a memory region of size() bytes.
69 ///
70 /// NOTE(mrry): The `data()` method is not virtual for performance reasons.
71 /// It can be called multiple times when the contents of a `Tensor` are
72 /// accessed, and so making it non-virtual allows the body to be inlined.
data()73 void* data() const { return data_; }
74
75 /// \brief Size (in bytes) of the buffer.
76 virtual size_t size() const = 0;
77
78 /// \brief If this TensorBuffer is sub-buffer of another TensorBuffer,
79 /// returns that TensorBuffer. Otherwise, returns this.
80 virtual TensorBuffer* root_buffer() = 0;
81
82 /// \brief Fills metadata about the allocation into the proto.
83 virtual void FillAllocationDescription(
84 AllocationDescription* proto) const = 0;
85
86 virtual bool GetAllocatedBytes(size_t* out_bytes) const;
87
88 /// \brief Helper method to reinterpret the buffer as an array of `T`.
89 template <typename T>
base()90 T* base() const {
91 return reinterpret_cast<T*>(data());
92 }
93
94 /// \brief Whether this TensorBuffer owns the underlying memory.
OwnsMemory()95 virtual bool OwnsMemory() const { return true; }
96
97 private:
98 void* const data_;
99 };
100
101 /// Represents an n-dimensional array of values.
102 class Tensor {
103 public:
104 /// \brief Creates a 1-dimensional, 0-element float tensor.
105 ///
106 /// The returned Tensor is not a scalar (shape {}), but is instead
107 /// an empty one-dimensional Tensor (shape {0}, NumElements() ==
108 /// 0). Since it has no elements, it does not need to be assigned a
109 /// value and is initialized by default (IsInitialized() is
110 /// true). If this is undesirable, consider creating a one-element
111 /// scalar which does require initialization:
112 ///
113 /// ```c++
114 ///
115 /// Tensor(DT_FLOAT, TensorShape({}))
116 ///
117 /// ```
118 Tensor();
119
120 /// \brief Creates a Tensor of the given `type` and `shape`. If
121 /// LogMemory::IsEnabled() the allocation is logged as coming from
122 /// an unknown kernel and step. Calling the Tensor constructor
123 /// directly from within an Op is deprecated: use the
124 /// OpKernelConstruction/OpKernelContext allocate_* methods to
125 /// allocate a new tensor, which record the kernel and step.
126 ///
127 /// The underlying buffer is allocated using a `CPUAllocator`.
128 Tensor(DataType type, const TensorShape& shape);
129
130 /// \brief Creates a tensor with the input `type` and `shape`, using
131 /// the allocator `a` to allocate the underlying buffer. If
132 /// LogMemory::IsEnabled() the allocation is logged as coming from
133 /// an unknown kernel and step. Calling the Tensor constructor
134 /// directly from within an Op is deprecated: use the
135 /// OpKernelConstruction/OpKernelContext allocate_* methods to
136 /// allocate a new tensor, which record the kernel and step.
137 ///
138 /// `a` must outlive the lifetime of this Tensor.
139 Tensor(Allocator* a, DataType type, const TensorShape& shape);
140
141 /// \brief Creates a tensor with the input `type` and `shape`, using
142 /// the allocator `a` and the specified "allocation_attr" to
143 /// allocate the underlying buffer. If the kernel and step are known
144 /// allocation_attr.allocation_will_be_logged should be set to true
145 /// and LogMemory::RecordTensorAllocation should be called after the
146 /// tensor is constructed. Calling the Tensor constructor directly
147 /// from within an Op is deprecated: use the
148 /// OpKernelConstruction/OpKernelContext allocate_* methods to
149 /// allocate a new tensor, which record the kernel and step.
150 ///
151 /// `a` must outlive the lifetime of this Tensor.
152 Tensor(Allocator* a, DataType type, const TensorShape& shape,
153 const AllocationAttributes& allocation_attr);
154
155 /// \brief Creates a tensor with the input datatype, shape and buf.
156 ///
157 /// Acquires a ref on buf that belongs to this Tensor.
158 Tensor(DataType type, const TensorShape& shape, TensorBuffer* buf);
159
160 /// \brief Creates a tensor with the input datatype, shape and buf.
161 ///
162 /// Takes an ownership of the bufffer from the reference counted pointer.
163 Tensor(DataType type, const TensorShape& shape,
164 core::RefCountPtr<TensorBuffer> buf);
165
166 /// \brief Creates an empty Tensor of the given data type.
167 ///
168 /// Like Tensor(), returns a 1-dimensional, 0-element Tensor with
169 /// IsInitialized() returning True. See the Tensor() documentation
170 /// for details.
171 explicit Tensor(DataType type);
172
173 private:
174 // A tag type for selecting the `Tensor` constructor overload that creates a
175 // scalar tensor in host memory.
176 struct host_scalar_tag {};
177
178 class HostScalarTensorBufferBase;
179 template <typename T>
180 struct ValueAndTensorBuffer;
181
182 // Creates a tensor with the given scalar `value` in CPU memory.
183 template <typename T>
184 Tensor(T value, host_scalar_tag tag);
185
186 public:
187 // A series of specialized constructors for scalar tensors in host memory.
188 //
189 // NOTE: The `Variant` host-scalar constructor is not defined, because Variant
190 // is implicitly constructible from many different types, and this causes
191 // ambiguities with some compilers.
Tensor(float scalar_value)192 explicit Tensor(float scalar_value)
193 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(double scalar_value)194 explicit Tensor(double scalar_value)
195 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(int32 scalar_value)196 explicit Tensor(int32 scalar_value)
197 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(uint32 scalar_value)198 explicit Tensor(uint32 scalar_value)
199 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(uint16 scalar_value)200 explicit Tensor(uint16 scalar_value)
201 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(uint8 scalar_value)202 explicit Tensor(uint8 scalar_value)
203 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(int16 scalar_value)204 explicit Tensor(int16 scalar_value)
205 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(int8 scalar_value)206 explicit Tensor(int8 scalar_value)
207 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(tstring scalar_value)208 explicit Tensor(tstring scalar_value)
209 : Tensor(std::move(scalar_value), host_scalar_tag{}) {}
Tensor(complex64 scalar_value)210 explicit Tensor(complex64 scalar_value)
211 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(complex128 scalar_value)212 explicit Tensor(complex128 scalar_value)
213 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(int64 scalar_value)214 explicit Tensor(int64 scalar_value)
215 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(uint64 scalar_value)216 explicit Tensor(uint64 scalar_value)
217 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(bool scalar_value)218 explicit Tensor(bool scalar_value)
219 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(qint8 scalar_value)220 explicit Tensor(qint8 scalar_value)
221 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(quint8 scalar_value)222 explicit Tensor(quint8 scalar_value)
223 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(qint16 scalar_value)224 explicit Tensor(qint16 scalar_value)
225 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(quint16 scalar_value)226 explicit Tensor(quint16 scalar_value)
227 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(qint32 scalar_value)228 explicit Tensor(qint32 scalar_value)
229 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(bfloat16 scalar_value)230 explicit Tensor(bfloat16 scalar_value)
231 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(Eigen::half scalar_value)232 explicit Tensor(Eigen::half scalar_value)
233 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(ResourceHandle scalar_value)234 explicit Tensor(ResourceHandle scalar_value)
235 : Tensor(std::move(scalar_value), host_scalar_tag{}) {}
236
237 // NOTE: The `const char*` host-scalar constructor is provided as a
238 // convenience because otherwise passing a string literal would surprisingly
239 // construct a DT_BOOL tensor.
Tensor(const char * scalar_value)240 explicit Tensor(const char* scalar_value)
241 : Tensor(tstring(scalar_value), host_scalar_tag{}) {}
242
243 /// Copy constructor.
244 Tensor(const Tensor& other);
245
246 /// \brief Move constructor. After this call, <other> is safely destructible
247 /// can be assigned to, and IsInitialized() can be called and will return
248 /// false. Other calls on <other> (e.g. shape manipulation) are not valid.
249 Tensor(Tensor&& other);
250
251 // Explicitly delete constructor that take a pointer (except char*)
252 // so that the pointer doesn't get implicitly cast to bool.
253 template <typename T, typename std::enable_if<!std::is_same<T, char>::value,
254 T>::type* = nullptr>
255 explicit Tensor(T* t) = delete;
256
257 ~Tensor();
258
259 /// Returns the data type.
dtype()260 DataType dtype() const { return shape_.data_type(); }
261
262 /// Returns the shape of the tensor.
shape()263 const TensorShape& shape() const { return shape_; }
264
265 /// \brief Convenience accessor for the tensor shape.
266 ///
267 /// For all shape accessors, see comments for relevant methods of
268 /// `TensorShape` in `tensor_shape.h`.
dims()269 int dims() const { return shape().dims(); }
270
271 /// Convenience accessor for the tensor shape.
dim_size(int d)272 int64 dim_size(int d) const { return shape().dim_size(d); }
273
274 /// Convenience accessor for the tensor shape.
NumElements()275 int64 NumElements() const { return shape().num_elements(); }
276
IsSameSize(const Tensor & b)277 bool IsSameSize(const Tensor& b) const {
278 return shape().IsSameSize(b.shape());
279 }
280
281 // True iff the two tensors use the same underlying refcounted storage
282 bool SharesBufferWith(const Tensor& b) const;
283
284 /// \brief If necessary, has this Tensor been initialized?
285 ///
286 /// Zero-element Tensors are always considered initialized, even if they
287 /// have never been assigned to and do not have any memory allocated.
288 bool IsInitialized() const;
289
290 /// Returns the estimated memory usage of this tensor.
291 size_t TotalBytes() const;
292
293 // Returns the size of allocated memory for this tensor.
294 size_t AllocatedBytes() const;
295
296 /// Returns true iff this tensor is aligned.
IsAligned()297 bool IsAligned() const {
298 #if EIGEN_MAX_ALIGN_BYTES == 0
299 return true;
300 #else
301 void* ptr = base<void>();
302 return dtype() == DT_STRING ||
303 (reinterpret_cast<intptr_t>(ptr) % EIGEN_MAX_ALIGN_BYTES == 0);
304 #endif
305 }
306
307 /// Assign operator. This tensor shares other's underlying storage.
308 Tensor& operator=(const Tensor& other) {
309 CopyFromInternal(other, other.shape());
310 return *this;
311 }
312
313 /// Move operator. See move constructor for details.
314 Tensor& operator=(Tensor&& other);
315
316 /// \brief Copy the other tensor into this tensor and reshape it.
317 ///
318 /// This tensor shares other's underlying storage. Returns `true`
319 /// iff `other.shape()` has the same number of elements of the given
320 /// `shape`.
CopyFrom(const Tensor & other,const TensorShape & shape)321 bool CopyFrom(const Tensor& other,
322 const TensorShape& shape) TF_MUST_USE_RESULT {
323 if (other.NumElements() != shape.num_elements()) return false;
324 CopyFromInternal(other, shape);
325 return true;
326 }
327
328 /// \brief Slice this tensor along the 1st dimension.
329
330 /// I.e., the returned tensor satisfies
331 /// returned[i, ...] == this[dim0_start + i, ...].
332 /// The returned tensor shares the underlying tensor buffer with this
333 /// tensor.
334 ///
335 /// NOTE: The returned tensor may not satisfy the same alignment
336 /// requirement as this tensor depending on the shape. The caller
337 /// must check the returned tensor's alignment before calling certain
338 /// methods that have alignment requirement (e.g., `flat()`, `tensor()`).
339 ///
340 /// NOTE: When fed with an N-dimensional tensor, this method returns a tensor
341 /// also with N dimensions. If you want to select a sub tensor, see SubSlice.
342 ///
343 /// REQUIRES: `dims()` >= 1
344 /// REQUIRES: `0 <= dim0_start <= dim0_limit <= dim_size(0)`
345 Tensor Slice(int64 dim0_start, int64 dim0_limit) const;
346
347 /// \brief Select a subslice from this tensor along the 1st dimension.
348 ///
349 /// When fed with an N-dimensional tensor, this method returns a tensor with
350 /// N-1 dimensions, where the returned tensor is a subslice of the input
351 /// tensor along the first dimension. The N-1 dimensions of the returned
352 /// tensor are the last N-1 dimensions of the input tensor.
353 ///
354 /// NOTE: The returned tensor may not satisfy the same alignment
355 /// requirement as this tensor depending on the shape. The caller
356 /// must check the returned tensor's alignment before calling certain
357 /// methods that have alignment requirement (e.g., `flat()`, `tensor()`).
358 ///
359 /// REQUIRES: `dims()` >= 1
360 /// REQUIRES: `0 <= index < dim_size(0)`
361 Tensor SubSlice(int64 index) const;
362
363 /// \brief Parse `other` and construct the tensor.
364
365 /// Returns `true` iff the parsing succeeds. If the parsing fails,
366 /// the state of `*this` is unchanged.
367 bool FromProto(const TensorProto& other) TF_MUST_USE_RESULT;
368 bool FromProto(Allocator* a, const TensorProto& other) TF_MUST_USE_RESULT;
369
370 /// \brief Fills in `proto` with `*this` tensor's content.
371 ///
372 /// `AsProtoField()` fills in the repeated field for `proto.dtype()`, while
373 /// `AsProtoTensorContent()` encodes the content in `proto.tensor_content()`
374 /// in a compact form.
375 void AsProtoField(TensorProto* proto) const;
376 void AsProtoTensorContent(TensorProto* proto) const;
377
378 /// \brief Return the tensor data as an `Eigen::Tensor` with the type and
379 /// sizes of this `Tensor`.
380 ///
381 /// Use these methods when you know the data type and the number of
382 /// dimensions of the Tensor and you want an `Eigen::Tensor`
383 /// automatically sized to the `Tensor` sizes. The implementation check
384 /// fails if either type or sizes mismatch.
385 ///
386 /// Example:
387 ///
388 /// ```c++
389 ///
390 /// typedef float T;
391 /// Tensor my_mat(...built with Shape{rows: 3, cols: 5}...);
392 /// auto mat = my_mat.matrix<T>(); // 2D Eigen::Tensor, 3 x 5.
393 /// auto mat = my_mat.tensor<T, 2>(); // 2D Eigen::Tensor, 3 x 5.
394 /// auto vec = my_mat.vec<T>(); // CHECK fails as my_mat is 2D.
395 /// auto vec = my_mat.tensor<T, 3>(); // CHECK fails as my_mat is 2D.
396 /// auto mat = my_mat.matrix<int32>();// CHECK fails as type mismatch.
397 ///
398 /// ```
399 template <typename T>
vec()400 typename TTypes<T>::Vec vec() {
401 return tensor<T, 1>();
402 }
403
404 template <typename T>
matrix()405 typename TTypes<T>::Matrix matrix() {
406 return tensor<T, 2>();
407 }
408
409 template <typename T, size_t NDIMS>
410 typename TTypes<T, NDIMS>::Tensor tensor();
411
412 /// \brief Return the tensor data to an `Eigen::Tensor` with the
413 /// same size but a bitwise cast to the specified dtype `T`.
414 ///
415 /// Using a bitcast is useful for move and copy operations.
416 /// NOTE: this is the same as `tensor()` except a bitcast is allowed.
417 template <typename T, size_t NDIMS>
418 typename TTypes<T, NDIMS>::Tensor bit_casted_tensor();
419
420 /// \brief Return the tensor data to an `Eigen::Tensor` with the
421 /// last dimension elements converted into single elements of a larger type.
422 ///
423 /// For example, this is useful for kernels that can treat NCHW_VECT_C int8
424 /// tensors as NCHW int32 tensors. The sizeof(T) should equal the size of
425 /// the original element type * num elements in the original last dimension.
426 /// NDIMS should be 1 less than the original number of dimensions.
427 template <typename T, size_t NDIMS>
428 typename TTypes<T, NDIMS>::Tensor reinterpret_last_dimension();
429
430 /// \brief Return the tensor data as an `Eigen::Tensor` of the data type and a
431 /// specified shape.
432 ///
433 /// These methods allow you to access the data with the dimensions
434 /// and sizes of your choice. You do not need to know the number of
435 /// dimensions of the Tensor to call them. However, they `CHECK` that
436 /// the type matches and the dimensions requested creates an
437 /// `Eigen::Tensor` with the same number of elements as the tensor.
438 ///
439 /// Example:
440 ///
441 /// ```c++
442 ///
443 /// typedef float T;
444 /// Tensor my_ten(...built with Shape{planes: 4, rows: 3, cols: 5}...);
445 /// // 1D Eigen::Tensor, size 60:
446 /// auto flat = my_ten.flat<T>();
447 /// // 2D Eigen::Tensor 12 x 5:
448 /// auto inner = my_ten.flat_inner_dims<T>();
449 /// // 2D Eigen::Tensor 4 x 15:
450 /// auto outer = my_ten.shaped<T, 2>({4, 15});
451 /// // CHECK fails, bad num elements:
452 /// auto outer = my_ten.shaped<T, 2>({4, 8});
453 /// // 3D Eigen::Tensor 6 x 5 x 2:
454 /// auto weird = my_ten.shaped<T, 3>({6, 5, 2});
455 /// // CHECK fails, type mismatch:
456 /// auto bad = my_ten.flat<int32>();
457 ///
458 /// ```
459 template <typename T>
flat()460 typename TTypes<T>::Flat flat() {
461 return shaped<T, 1>({NumElements()});
462 }
463
464 template <typename T>
unaligned_flat()465 typename TTypes<T>::UnalignedFlat unaligned_flat() {
466 return unaligned_shaped<T, 1>({NumElements()});
467 }
468
469 /// Returns the data as an Eigen::Tensor with NDIMS dimensions, collapsing all
470 /// Tensor dimensions but the last NDIMS-1 into the first dimension of the
471 /// result. If NDIMS > dims() then leading dimensions of size 1 will be
472 /// added to make the output rank NDIMS.
473 template <typename T, size_t NDIMS = 2>
474 typename TTypes<T, NDIMS>::Tensor flat_inner_dims();
475
476 /// Returns the data as an Eigen::Tensor with NDIMS dimensions, collapsing all
477 /// Tensor dimensions but the first NDIMS-1 into the last dimension of the
478 /// result. If NDIMS > dims() then trailing dimensions of size 1 will be
479 /// added to make the output rank NDIMS.
480 template <typename T, size_t NDIMS = 2>
481 typename TTypes<T, NDIMS>::Tensor flat_outer_dims();
482
483 /// Returns the data as an Eigen::Tensor with NDIMS dimensions, collapsing the
484 /// first 'begin' Tensor dimensions into the first dimension of the result and
485 /// the Tensor dimensions of the last dims() - 'begin' - NDIMS into the last
486 /// dimension of the result. If 'begin' < 0 then the |'begin'| leading
487 /// dimensions of size 1 will be added. If 'begin' + NDIMS > dims() then
488 /// 'begin' + NDIMS - dims() trailing dimensions of size 1 will be added.
489 template <typename T, size_t NDIMS = 3>
490 typename TTypes<T, NDIMS>::Tensor flat_inner_outer_dims(int64 begin);
491
492 template <typename T, size_t NDIMS>
493 typename TTypes<T, NDIMS>::Tensor shaped(gtl::ArraySlice<int64> new_sizes);
494
495 /// \brief Return the tensor data to an `Eigen::Tensor` with the new
496 /// shape specified in `new_sizes` and cast to a new dtype `T`.
497 ///
498 /// Using a bitcast is useful for move and copy operations.
499 /// The allowed bitcast is the only difference from `shaped()`.
500 template <typename T, size_t NDIMS>
501 typename TTypes<T, NDIMS>::Tensor bit_casted_shaped(
502 gtl::ArraySlice<int64> new_sizes);
503
504 template <typename T, size_t NDIMS>
505 typename TTypes<T, NDIMS>::UnalignedTensor unaligned_shaped(
506 gtl::ArraySlice<int64> new_sizes);
507
508 /// \brief Return the Tensor data as a `TensorMap` of fixed size 1:
509 /// `TensorMap<TensorFixedSize<T, 1>>`.
510
511 /// Using `scalar()` allows the compiler to perform optimizations as
512 /// the size of the tensor is known at compile time.
513 template <typename T>
514 typename TTypes<T>::Scalar scalar();
515
516 /// Const versions of all the methods above.
517 template <typename T>
vec()518 typename TTypes<T>::ConstVec vec() const {
519 return tensor<T, 1>();
520 }
521
522 template <typename T>
matrix()523 typename TTypes<T>::ConstMatrix matrix() const {
524 return tensor<T, 2>();
525 }
526
527 template <typename T, size_t NDIMS>
528 typename TTypes<T, NDIMS>::ConstTensor tensor() const;
529
530 /// \brief Return the tensor data to an `Eigen::Tensor` with the
531 /// same size but a bitwise cast to the specified dtype `T`.
532 ///
533 /// Using a bitcast is useful for move and copy operations.
534 /// NOTE: this is the same as `tensor()` except a bitcast is allowed.
535 template <typename T, size_t NDIMS>
536 typename TTypes<T, NDIMS>::ConstTensor bit_casted_tensor() const;
537
538 /// \brief Return the tensor data to an `Eigen::Tensor` with the
539 /// last dimension elements converted into single elements of a larger type.
540 ///
541 /// For example, this is useful for kernels that can treat NCHW_VECT_C int8
542 /// tensors as NCHW int32 tensors. The sizeof(T) should equal the size of
543 /// the original element type * num elements in the original last dimension.
544 /// NDIMS should be 1 less than the original number of dimensions.
545 template <typename T, size_t NDIMS>
546 typename TTypes<T, NDIMS>::ConstTensor reinterpret_last_dimension() const;
547
548 template <typename T>
flat()549 typename TTypes<T>::ConstFlat flat() const {
550 return shaped<T, 1>({NumElements()});
551 }
552
553 template <typename T>
unaligned_flat()554 typename TTypes<T>::UnalignedConstFlat unaligned_flat() const {
555 return unaligned_shaped<T, 1>({NumElements()});
556 }
557
558 template <typename T, size_t NDIMS>
559 typename TTypes<T, NDIMS>::ConstTensor shaped(
560 gtl::ArraySlice<int64> new_sizes) const;
561
562 /// \brief Return the tensor data to an `Eigen::Tensor` with the new
563 /// shape specified in `new_sizes` and cast to a new dtype `T`.
564 ///
565 /// Using a bitcast is useful for move and copy operations.
566 /// The allowed bitcast is the only difference from `shaped()`.
567 template <typename T, size_t NDIMS>
568 typename TTypes<T, NDIMS>::ConstTensor bit_casted_shaped(
569 gtl::ArraySlice<int64> new_sizes) const;
570
571 template <typename T, size_t NDIMS>
572 typename TTypes<T, NDIMS>::UnalignedConstTensor unaligned_shaped(
573 gtl::ArraySlice<int64> new_sizes) const;
574
575 template <typename T>
576 typename TTypes<T>::ConstScalar scalar() const;
577
578 template <typename T, size_t NDIMS = 2>
579 typename TTypes<T, NDIMS>::ConstTensor flat_inner_dims() const;
580
581 template <typename T, size_t NDIMS = 2>
582 typename TTypes<T, NDIMS>::ConstTensor flat_outer_dims() const;
583
584 template <typename T, size_t NDIMS = 3>
585 typename TTypes<T, NDIMS>::ConstTensor flat_inner_outer_dims(
586 int64 begin) const;
587
588 /// Render the first `max_entries` values in `*this` into a string.
589 std::string SummarizeValue(int64 max_entries, bool print_v2 = false) const;
590
591 /// A human-readable summary of the tensor suitable for debugging.
592 // `num_values` is the number of actual data values in the tensor
593 // included in the message. If the tensor might be resident in
594 // GPU/TPU memory use DeviceSafeDebugString instead.
595 std::string DebugString(int num_values) const;
DebugString()596 std::string DebugString() const { return DebugString(3); }
597
598 // Variant of DebugString() that should be used for possibly non-CPU tensors.
599 // If the tensor is not resident on CPU, we can't read its values as
600 // DebugString() does.
601 std::string DeviceSafeDebugString() const;
602
603 /// Fill in the `TensorDescription` proto with metadata about the
604 /// tensor that is useful for monitoring and debugging.
605 void FillDescription(TensorDescription* description) const;
606
607 /// \brief Returns a `StringPiece` mapping the current tensor's buffer.
608 ///
609 /// The returned `StringPiece` may point to memory location on devices
610 /// that the CPU cannot address directly.
611 ///
612 /// NOTE: The underlying tensor buffer is refcounted, so the lifetime
613 /// of the contents mapped by the `StringPiece` matches the lifetime of
614 /// the buffer; callers should arrange to make sure the buffer does
615 /// not get destroyed while the `StringPiece` is still used.
616 ///
617 /// REQUIRES: `DataTypeCanUseMemcpy(dtype())`.
618 StringPiece tensor_data() const;
619 void* data() const;
620
621 /// Copy the other tensor into this tensor, reshape it and reinterpret the
622 /// buffer's datatype. If Status::OK() is returned, the two tensors now share
623 /// the same underlying storage.
624 ///
625 /// This call requires that the `other` tensor and the given type and shape
626 /// are "compatible" (i.e. they occupy the same number of bytes).
627 ///
628 /// Specifically:
629 ///
630 /// shape.num_elements() * DataTypeSize(type)
631 ///
632 /// must equal
633 ///
634 /// other.num_elements() * DataTypeSize(other.dtype())
635 ///
636 /// In addition, this function requires:
637 /// * DataTypeSize(other.dtype()) != 0
638 /// * DataTypeSize(type) != 0
639 ///
640 /// If any of the requirements are not met, errors::InvalidArgument is
641 /// returned.
642 Status BitcastFrom(const Tensor& other, DataType dtype,
643 const TensorShape& shape);
644
645 /// Like BitcastFrom, but CHECK fails if any preconditions are not met.
646 ///
647 /// Deprecated. Use BitcastFrom instead and check the returned Status.
UnsafeCopyFromInternal(const Tensor & other,DataType dtype,const TensorShape & shape)648 void UnsafeCopyFromInternal(const Tensor& other, DataType dtype,
649 const TensorShape& shape) {
650 TF_CHECK_OK(BitcastFrom(other, dtype, shape));
651 }
652
653 // Returns true if the refcount on buf_ and any possible underlying root
654 // buffer is one.
655 bool RefCountIsOne() const;
656
657 private:
658 void CheckType(DataType expected_dtype) const;
659 void CheckTypeAndIsAligned(DataType expected_dtype) const;
660 void CheckIsAlignedAndSingleElement() const;
set_dtype(DataType t)661 void set_dtype(DataType t) { shape_.set_data_type(t); }
662
663 // TensorShape's InlineVector.
664 static gtl::InlinedVector<int64, 4> ComputeFlatInnerDims(
665 gtl::ArraySlice<int64> orig, int64 num_out_dims);
666 static gtl::InlinedVector<int64, 4> ComputeFlatOuterDims(
667 gtl::ArraySlice<int64> orig, int64 num_out_dims);
668
669 TensorShape shape_;
670 TensorBuffer* buf_;
671
672 friend class DMAHelper; // For access to buf_.
673 friend class TensorCApi; // For access to buf_.
674 friend class TensorCord; // For access to buf_.
675 friend class TensorReference; // For access to buf_.
676 friend class VariableOp; // For access to set_shape.
677 friend class AutoReloadVariableOp; // For access to set_shape.
678 friend class TensorTestHelper; // For access to set_shape.
679 friend class CastOpBase; // For access to set_dtype.
680 friend class ScopedAllocator; // For access to buf_.
681 friend Status batch_util::CopyElementToSlice(
682 Tensor element, Tensor* parent,
683 int64 index); // For access to base<T>().
684 friend Status batch_util::CopySliceToElement(
685 const Tensor& parent, Tensor* element,
686 int64 index); // For access to base<T>().
687 friend Status batch_util::MaybeMoveSliceToElement(
688 Tensor* parent, Tensor* element,
689 int64 index); // For access to base<T>().
690 friend Status batch_util::CopyContiguousSlices(
691 const Tensor& src, int64 src_offset, int64 dst_offset, int64 num_slices,
692 Tensor* dst); // For access to base<T>().
693
694 bool CanUseDMA() const;
695
696 // Only needed by variable op to set the shape of an uninitialized
697 // Tensor.
698 // TODO: Remove this when we have a better story for detecting
699 // uninitialized tensors.
set_shape(const TensorShape & shape)700 void set_shape(const TensorShape& shape) {
701 DataType dt = dtype();
702 shape_ = shape;
703 set_dtype(dt);
704 }
705
CopyFromInternal(const Tensor & other,const TensorShape & shape)706 inline void CopyFromInternal(const Tensor& other, const TensorShape& shape) {
707 DCHECK_EQ(shape.num_elements(), other.NumElements());
708 // Data type will be overwritten if this == &other, since dtype is part of
709 // shape.
710 DataType other_dtype = other.dtype();
711 shape_ = shape;
712 set_dtype(other_dtype);
713 if (buf_ != other.buf_) {
714 if (buf_) buf_->Unref();
715 buf_ = other.buf_;
716 if (buf_) buf_->Ref();
717 }
718 }
719
720 template <typename T>
721 T* base() const;
722
723 template <size_t NDIMS>
724 void FillDimsAndValidateCompatibleShape(
725 gtl::ArraySlice<int64> new_sizes,
726 Eigen::array<Eigen::DenseIndex, NDIMS>* dims) const;
727
728 template <typename T, size_t NDIMS>
729 void FillDimsAndValidateCompatibleShape(
730 gtl::ArraySlice<int64> new_sizes,
731 Eigen::array<Eigen::DenseIndex, NDIMS>* dims) const;
732 };
733
734 // Implementation details
735
736 // START_SKIP_DOXYGEN
737
738 template <typename T>
base()739 T* Tensor::base() const {
740 return buf_ == nullptr ? nullptr : buf_->base<T>();
741 }
742
743 template <typename T, size_t NDIMS>
tensor()744 typename TTypes<T, NDIMS>::Tensor Tensor::tensor() {
745 CheckTypeAndIsAligned(DataTypeToEnum<T>::v());
746 return typename TTypes<T, NDIMS>::Tensor(base<T>(),
747 shape().AsEigenDSizes<NDIMS>());
748 }
749
750 template <typename T, size_t NDIMS>
tensor()751 typename TTypes<T, NDIMS>::ConstTensor Tensor::tensor() const {
752 CheckTypeAndIsAligned(DataTypeToEnum<T>::v());
753 return typename TTypes<T, NDIMS>::ConstTensor(base<const T>(),
754 shape().AsEigenDSizes<NDIMS>());
755 }
756
757 template <typename T, size_t NDIMS>
bit_casted_tensor()758 typename TTypes<T, NDIMS>::Tensor Tensor::bit_casted_tensor() {
759 CHECK(IsAligned());
760 return typename TTypes<T, NDIMS>::Tensor(base<T>(),
761 shape().AsEigenDSizes<NDIMS>());
762 }
763
764 template <typename T, size_t NDIMS>
bit_casted_tensor()765 typename TTypes<T, NDIMS>::ConstTensor Tensor::bit_casted_tensor() const {
766 CHECK(IsAligned());
767 return typename TTypes<T, NDIMS>::ConstTensor(base<const T>(),
768 shape().AsEigenDSizes<NDIMS>());
769 }
770
771 template <typename T, size_t NDIMS>
reinterpret_last_dimension()772 typename TTypes<T, NDIMS>::Tensor Tensor::reinterpret_last_dimension() {
773 if (NDIMS == dims()) {
774 return tensor<T, NDIMS>();
775 }
776 CHECK(IsAligned());
777 CHECK_EQ(NDIMS, dims() - 1);
778 CHECK_EQ(sizeof(T), shape_.dim_sizes()[NDIMS] * DataTypeSize(dtype()));
779 Eigen::array<Eigen::DenseIndex, NDIMS> dims;
780 for (int d = 0; d < NDIMS; ++d) {
781 dims[d] = shape_.dim_sizes()[d];
782 }
783 return typename TTypes<T, NDIMS>::Tensor(base<T>(), dims);
784 }
785
786 template <typename T, size_t NDIMS>
reinterpret_last_dimension()787 typename TTypes<T, NDIMS>::ConstTensor Tensor::reinterpret_last_dimension()
788 const {
789 if (NDIMS == dims()) {
790 return tensor<T, NDIMS>();
791 }
792 CHECK(IsAligned());
793 CHECK_EQ(NDIMS, dims() - 1);
794 CHECK_EQ(sizeof(T), shape_.dim_sizes()[NDIMS] * DataTypeSize(dtype()));
795 Eigen::array<Eigen::DenseIndex, NDIMS> dims;
796 for (int d = 0; d < NDIMS; ++d) {
797 dims[d] = shape_.dim_sizes()[d];
798 }
799 return typename TTypes<T, NDIMS>::ConstTensor(base<const T>(), dims);
800 }
801
802 template <size_t NDIMS>
FillDimsAndValidateCompatibleShape(gtl::ArraySlice<int64> new_sizes,Eigen::array<Eigen::DenseIndex,NDIMS> * dims)803 void Tensor::FillDimsAndValidateCompatibleShape(
804 gtl::ArraySlice<int64> new_sizes,
805 Eigen::array<Eigen::DenseIndex, NDIMS>* dims) const {
806 CHECK_EQ(NDIMS, new_sizes.size());
807 int64 new_num_elements = 1;
808 for (size_t d = 0; d < NDIMS; d++) {
809 new_num_elements *= new_sizes[d];
810 (*dims)[d] = new_sizes[d];
811 }
812 CHECK_EQ(new_num_elements, NumElements());
813 }
814
815 template <typename T, size_t NDIMS>
FillDimsAndValidateCompatibleShape(gtl::ArraySlice<int64> new_sizes,Eigen::array<Eigen::DenseIndex,NDIMS> * dims)816 void Tensor::FillDimsAndValidateCompatibleShape(
817 gtl::ArraySlice<int64> new_sizes,
818 Eigen::array<Eigen::DenseIndex, NDIMS>* dims) const {
819 CHECK_EQ(NDIMS, new_sizes.size());
820 int64 new_num_elements = 1;
821 for (size_t d = 0; d < NDIMS; d++) {
822 new_num_elements *= new_sizes[d];
823 (*dims)[d] = new_sizes[d];
824 }
825 const int element_size = DataTypeSize(BaseType(dtype()));
826 if (element_size > 0) {
827 CHECK_EQ(new_num_elements * sizeof(T), NumElements() * element_size);
828 } else {
829 // DataTypeSize() returns 0 for some data types. In this case, assume that T
830 // has the same size as the buffer type.
831 // NOTE: If we can be sure that DataTypeSize() does not return 0 for all POD
832 // types, then we should check DataTypeToEnum<T>::v() == dtype(). Or simply
833 // check if `element_size > 0` to err when bit cast is attempted on Tensor
834 // of unknown data type size.
835 CHECK_EQ(new_num_elements, NumElements());
836 }
837 }
838
839 template <typename T, size_t NDIMS>
shaped(gtl::ArraySlice<int64> new_sizes)840 typename TTypes<T, NDIMS>::Tensor Tensor::shaped(
841 gtl::ArraySlice<int64> new_sizes) {
842 CheckTypeAndIsAligned(DataTypeToEnum<T>::v());
843 Eigen::array<Eigen::DenseIndex, NDIMS> dims;
844 FillDimsAndValidateCompatibleShape(new_sizes, &dims);
845 return typename TTypes<T, NDIMS>::Tensor(base<T>(), dims);
846 }
847
848 template <typename T, size_t NDIMS>
bit_casted_shaped(gtl::ArraySlice<int64> new_sizes)849 typename TTypes<T, NDIMS>::Tensor Tensor::bit_casted_shaped(
850 gtl::ArraySlice<int64> new_sizes) {
851 CHECK(IsAligned());
852 Eigen::array<Eigen::DenseIndex, NDIMS> dims;
853 FillDimsAndValidateCompatibleShape<T>(new_sizes, &dims);
854 return typename TTypes<T, NDIMS>::Tensor(base<T>(), dims);
855 }
856
857 template <typename T, size_t NDIMS>
unaligned_shaped(gtl::ArraySlice<int64> new_sizes)858 typename TTypes<T, NDIMS>::UnalignedTensor Tensor::unaligned_shaped(
859 gtl::ArraySlice<int64> new_sizes) {
860 CheckType(DataTypeToEnum<T>::v());
861 Eigen::array<Eigen::DenseIndex, NDIMS> dims;
862 FillDimsAndValidateCompatibleShape(new_sizes, &dims);
863 return typename TTypes<T, NDIMS>::UnalignedTensor(base<T>(), dims);
864 }
865
866 template <typename T, size_t NDIMS>
shaped(gtl::ArraySlice<int64> new_sizes)867 typename TTypes<T, NDIMS>::ConstTensor Tensor::shaped(
868 gtl::ArraySlice<int64> new_sizes) const {
869 CheckType(DataTypeToEnum<T>::v());
870 CHECK(IsAligned());
871 Eigen::array<Eigen::DenseIndex, NDIMS> dims;
872 FillDimsAndValidateCompatibleShape(new_sizes, &dims);
873 return typename TTypes<T, NDIMS>::ConstTensor(base<T>(), dims);
874 }
875
876 template <typename T, size_t NDIMS>
bit_casted_shaped(gtl::ArraySlice<int64> new_sizes)877 typename TTypes<T, NDIMS>::ConstTensor Tensor::bit_casted_shaped(
878 gtl::ArraySlice<int64> new_sizes) const {
879 CHECK(IsAligned());
880 Eigen::array<Eigen::DenseIndex, NDIMS> dims;
881 FillDimsAndValidateCompatibleShape<T>(new_sizes, &dims);
882 return typename TTypes<T, NDIMS>::ConstTensor(base<T>(), dims);
883 }
884
885 template <typename T, size_t NDIMS>
unaligned_shaped(gtl::ArraySlice<int64> new_sizes)886 typename TTypes<T, NDIMS>::UnalignedConstTensor Tensor::unaligned_shaped(
887 gtl::ArraySlice<int64> new_sizes) const {
888 CheckType(DataTypeToEnum<T>::v());
889 Eigen::array<Eigen::DenseIndex, NDIMS> dims;
890 FillDimsAndValidateCompatibleShape(new_sizes, &dims);
891 return typename TTypes<T, NDIMS>::UnalignedConstTensor(base<T>(), dims);
892 }
893
894 template <typename T>
scalar()895 typename TTypes<T>::Scalar Tensor::scalar() {
896 static_assert(
897 !std::is_same<T, std::string>::value,
898 "std::string is no longer a scalar type, use tensorflow::tstring");
899 CheckIsAlignedAndSingleElement();
900 return typename TTypes<T>::Scalar(base<T>());
901 }
902
903 template <typename T>
scalar()904 typename TTypes<T>::ConstScalar Tensor::scalar() const {
905 static_assert(
906 !std::is_same<T, std::string>::value,
907 "std::string is no longer a scalar type, use tensorflow::tstring");
908 CheckIsAlignedAndSingleElement();
909 return typename TTypes<T>::ConstScalar(base<T>());
910 }
911
912 template <typename T, size_t NDIMS>
flat_inner_dims()913 typename TTypes<T, NDIMS>::Tensor Tensor::flat_inner_dims() {
914 return shaped<T, NDIMS>(ComputeFlatInnerDims(shape_.dim_sizes(), NDIMS));
915 }
916
917 template <typename T, size_t NDIMS>
flat_outer_dims()918 typename TTypes<T, NDIMS>::Tensor Tensor::flat_outer_dims() {
919 return shaped<T, NDIMS>(ComputeFlatOuterDims(shape_.dim_sizes(), NDIMS));
920 }
921
922 template <typename T, size_t NDIMS>
flat_inner_outer_dims(int64 begin)923 typename TTypes<T, NDIMS>::Tensor Tensor::flat_inner_outer_dims(int64 begin) {
924 gtl::InlinedVector<int64, 4> flat_outer =
925 ComputeFlatOuterDims(shape_.dim_sizes(), begin + NDIMS);
926 return shaped<T, NDIMS>(ComputeFlatInnerDims(flat_outer, NDIMS));
927 }
928
929 template <typename T, size_t NDIMS>
flat_inner_dims()930 typename TTypes<T, NDIMS>::ConstTensor Tensor::flat_inner_dims() const {
931 return shaped<T, NDIMS>(ComputeFlatInnerDims(shape_.dim_sizes(), NDIMS));
932 }
933
934 template <typename T, size_t NDIMS>
flat_outer_dims()935 typename TTypes<T, NDIMS>::ConstTensor Tensor::flat_outer_dims() const {
936 return shaped<T, NDIMS>(ComputeFlatOuterDims(shape_.dim_sizes(), NDIMS));
937 }
938
939 template <typename T, size_t NDIMS>
flat_inner_outer_dims(int64 begin)940 typename TTypes<T, NDIMS>::ConstTensor Tensor::flat_inner_outer_dims(
941 int64 begin) const {
942 gtl::InlinedVector<int64, 4> flat_outer =
943 ComputeFlatOuterDims(shape_.dim_sizes(), begin + NDIMS);
944 return shaped<T, NDIMS>(ComputeFlatInnerDims(flat_outer, NDIMS));
945 }
946
Tensor(const Tensor & other)947 inline Tensor::Tensor(const Tensor& other)
948 : shape_(other.shape()), buf_(other.buf_) {
949 if (buf_) buf_->Ref();
950 }
951
Tensor(Tensor && other)952 inline Tensor::Tensor(Tensor&& other)
953 : shape_(std::move(other.shape_)), buf_(other.buf_) {
954 other.buf_ = nullptr;
955 }
956
957 class Tensor::HostScalarTensorBufferBase : public TensorBuffer {
958 public:
959 using TensorBuffer::TensorBuffer;
960 bool GetAllocatedBytes(size_t* out_bytes) const final;
961 void FillAllocationDescription(AllocationDescription* proto) const final;
962 };
963
964 // A packed representation for a single scalar value of type `T`, and a
965 // `TensorBuffer` implementation that describes (and manages the lifetime of)
966 // that value.
967 template <typename T>
968 struct Tensor::ValueAndTensorBuffer {
969 class HostScalarTensorBuffer : public Tensor::HostScalarTensorBufferBase {
970 public:
HostScalarTensorBufferValueAndTensorBuffer971 explicit HostScalarTensorBuffer(void* data)
972 : HostScalarTensorBufferBase(data) {}
sizeValueAndTensorBuffer973 size_t size() const final { return sizeof(T); }
root_bufferValueAndTensorBuffer974 TensorBuffer* root_buffer() final { return this; }
975
976 // Override `operator delete` so that calling `delete this` in
977 // `core::Refcounted::Unref()` for an object of this type will free
978 // the enclosing `ValueAndTensorBuffer` for the tensor buffer.
979 //
980 // NOTE(mrry): The definition of this method must be outside the class
981 // definition in order to satisfy some compilers.
982 static void operator delete(void* ptr);
983
deleteValueAndTensorBuffer984 static void operator delete(void*, void*) {
985 // Some compilers require an overridden class-specific deallocation
986 // function, which will be called if placement `new` throws an
987 // exception.
988 }
989
990 private:
~HostScalarTensorBufferValueAndTensorBuffer991 ~HostScalarTensorBuffer() override { static_cast<T*>(data())->~T(); }
992 };
993
994 T value;
995 HostScalarTensorBuffer tensor_buffer;
996 };
997
998 /* static */
999 template <typename T>
delete(void * ptr)1000 void Tensor::ValueAndTensorBuffer<T>::HostScalarTensorBuffer::operator delete(
1001 void* ptr) {
1002 // Use a dummy object to compute to offset of
1003 // `ValueAndTensorBuffer::tensor_buffer`, because `offsetof()` is not
1004 // necessarily defined on this non-POD type (until C++17).
1005 //
1006 // NOTE(mrry): Using `sizeof(Tensor::ValueAndTensorBuffer<T>)` here requires
1007 // us to define this method outside the class definition, so that it is not
1008 // considered an incomplete type.
1009 typename std::aligned_storage<sizeof(Tensor::ValueAndTensorBuffer<T>),
1010 alignof(Tensor::ValueAndTensorBuffer<T>)>::type
1011 dummy_storage_;
1012 Tensor::ValueAndTensorBuffer<T>* dummy_object =
1013 reinterpret_cast<Tensor::ValueAndTensorBuffer<T>*>(&dummy_storage_);
1014 intptr_t offset = reinterpret_cast<intptr_t>(&dummy_object->tensor_buffer) -
1015 reinterpret_cast<intptr_t>(dummy_object);
1016
1017 port::AlignedFree(static_cast<char*>(ptr) - offset);
1018 }
1019
1020 template <typename T>
Tensor(T value,host_scalar_tag tag)1021 Tensor::Tensor(T value, host_scalar_tag tag) {
1022 auto* value_and_buf = static_cast<Tensor::ValueAndTensorBuffer<T>*>(
1023 port::AlignedMalloc(sizeof(typename Tensor::ValueAndTensorBuffer<T>),
1024 EIGEN_MAX_ALIGN_BYTES));
1025 new (&value_and_buf->value) T(std::move(value));
1026 new (&value_and_buf->tensor_buffer)
1027 typename Tensor::ValueAndTensorBuffer<T>::HostScalarTensorBuffer(
1028 value_and_buf);
1029 buf_ = &value_and_buf->tensor_buffer;
1030 set_dtype(DataTypeToEnum<T>::value);
1031 }
1032
1033 inline Tensor& Tensor::operator=(Tensor&& other) {
1034 // Avoid self-assignment, since we might destroy our underlying buffer.
1035 if (&other != this) {
1036 shape_ = std::move(other.shape_);
1037 if (buf_) buf_->Unref();
1038 buf_ = other.buf_;
1039 other.buf_ = nullptr;
1040 }
1041 return *this;
1042 }
1043
1044 // END_SKIP_DOXYGEN
1045
1046 } // namespace tensorflow
1047
1048 #endif // TENSORFLOW_CORE_FRAMEWORK_TENSOR_H_
1049