1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #ifndef TENSORFLOW_CORE_FRAMEWORK_TENSOR_H_
17 #define TENSORFLOW_CORE_FRAMEWORK_TENSOR_H_
18
19 #include <cstdint>
20 #include <type_traits>
21
22 #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
23 #include "tensorflow/core/framework/allocator.h"
24 #include "tensorflow/core/framework/tensor_shape.h"
25 #include "tensorflow/core/framework/tensor_types.h"
26 #include "tensorflow/core/framework/types.h"
27 #include "tensorflow/core/framework/types.pb.h"
28 #include "tensorflow/core/lib/core/refcount.h"
29 #include "tensorflow/core/lib/core/status.h"
30 #include "tensorflow/core/lib/core/stringpiece.h"
31 #include "tensorflow/core/lib/gtl/inlined_vector.h"
32 #include "tensorflow/core/platform/logging.h"
33 #include "tensorflow/core/platform/macros.h"
34 #include "tensorflow/core/platform/mem.h"
35 #include "tensorflow/core/platform/types.h"
36
37 namespace tensorflow {
38
39 // Forward declarations. In particular, we forward declare protos so that their
40 // symbols can be removed from .so exports.
41 class AllocationDescription;
42 class Allocator;
43 class OpKernelContext;
44 class Tensor;
45 class TensorBuffer;
46 class TensorCApi;
47 class TensorCord;
48 class TensorDescription;
49 class TensorProto;
50 class Var;
51
52 namespace batch_util {
53 Status CopyElementToSlice(Tensor element, Tensor* parent, int64_t index);
54 Status CopySliceToElement(const Tensor& parent, Tensor* element, int64_t index);
55 Status MaybeMoveSliceToElement(Tensor* parent, Tensor* element, int64_t index);
56 Status CopyContiguousSlices(const Tensor& src, int64_t src_offset,
57 int64_t dst_offset, int64_t num_slices,
58 Tensor* dst);
59 } // namespace batch_util
60
61 /// @ingroup core
62
63 /// Interface to access the raw ref-counted data buffer.
64 class TensorBuffer : public core::RefCounted {
65 public:
TensorBuffer(void * data_ptr)66 explicit TensorBuffer(void* data_ptr) : data_(data_ptr) {}
~TensorBuffer()67 ~TensorBuffer() override {}
68
69 /// \brief data() points to a memory region of size() bytes.
70 ///
71 /// NOTE(mrry): The `data()` method is not virtual for performance reasons.
72 /// It can be called multiple times when the contents of a `Tensor` are
73 /// accessed, and so making it non-virtual allows the body to be inlined.
data()74 void* data() const { return data_; }
75
76 /// \brief Size (in bytes) of the buffer.
77 virtual size_t size() const = 0;
78
79 /// \brief If this TensorBuffer is sub-buffer of another TensorBuffer,
80 /// returns that TensorBuffer. Otherwise, returns this.
81 virtual TensorBuffer* root_buffer() = 0;
82
83 /// \brief Fills metadata about the allocation into the proto.
84 virtual void FillAllocationDescription(
85 AllocationDescription* proto) const = 0;
86
87 virtual bool GetAllocatedBytes(size_t* out_bytes) const;
88
89 /// \brief Helper method to reinterpret the buffer as an array of `T`.
90 template <typename T>
base()91 T* base() const {
92 return reinterpret_cast<T*>(data());
93 }
94
95 /// \brief Whether this TensorBuffer owns the underlying memory.
OwnsMemory()96 virtual bool OwnsMemory() const { return true; }
97
98 private:
99 void* const data_;
100 };
101
102 /// Represents an n-dimensional array of values.
103 class Tensor {
104 public:
105 /// \brief Creates a 1-dimensional, 0-element float tensor.
106 ///
107 /// The returned Tensor is not a scalar (shape {}), but is instead
108 /// an empty one-dimensional Tensor (shape {0}, NumElements() ==
109 /// 0). Since it has no elements, it does not need to be assigned a
110 /// value and is initialized by default (IsInitialized() is
111 /// true). If this is undesirable, consider creating a one-element
112 /// scalar which does require initialization:
113 ///
114 /// ```c++
115 ///
116 /// Tensor(DT_FLOAT, TensorShape({}))
117 ///
118 /// ```
119 Tensor();
120
121 /// \brief Creates a Tensor of the given `type` and `shape`. If
122 /// LogMemory::IsEnabled() the allocation is logged as coming from
123 /// an unknown kernel and step. Calling the Tensor constructor
124 /// directly from within an Op is deprecated: use the
125 /// OpKernelConstruction/OpKernelContext allocate_* methods to
126 /// allocate a new tensor, which record the kernel and step.
127 ///
128 /// The underlying buffer is allocated using a `CPUAllocator`.
129 Tensor(DataType type, const TensorShape& shape);
130
131 /// \brief Creates a tensor with the input `type` and `shape`, using
132 /// the allocator `a` to allocate the underlying buffer. If
133 /// LogMemory::IsEnabled() the allocation is logged as coming from
134 /// an unknown kernel and step. Calling the Tensor constructor
135 /// directly from within an Op is deprecated: use the
136 /// OpKernelConstruction/OpKernelContext allocate_* methods to
137 /// allocate a new tensor, which record the kernel and step.
138 ///
139 /// `a` must outlive the lifetime of this Tensor.
140 Tensor(Allocator* a, DataType type, const TensorShape& shape);
141
142 /// \brief Creates a tensor with the input `type` and `shape`, using
143 /// the allocator `a` and the specified "allocation_attr" to
144 /// allocate the underlying buffer. If the kernel and step are known
145 /// allocation_attr.allocation_will_be_logged should be set to true
146 /// and LogMemory::RecordTensorAllocation should be called after the
147 /// tensor is constructed. Calling the Tensor constructor directly
148 /// from within an Op is deprecated: use the
149 /// OpKernelConstruction/OpKernelContext allocate_* methods to
150 /// allocate a new tensor, which record the kernel and step.
151 ///
152 /// `a` must outlive the lifetime of this Tensor.
153 Tensor(Allocator* a, DataType type, const TensorShape& shape,
154 const AllocationAttributes& allocation_attr);
155
156 /// \brief Creates a tensor with the input datatype, shape and buf.
157 ///
158 /// Acquires a ref on buf that belongs to this Tensor.
159 Tensor(DataType type, const TensorShape& shape, TensorBuffer* buf);
160
161 /// \brief Creates a tensor with the input datatype, shape and buf.
162 ///
163 /// Takes an ownership of the bufffer from the reference counted pointer.
164 Tensor(DataType type, TensorShape shape, core::RefCountPtr<TensorBuffer> buf);
165
166 /// \brief Creates an empty Tensor of the given data type.
167 ///
168 /// Like Tensor(), returns a 1-dimensional, 0-element Tensor with
169 /// IsInitialized() returning True. See the Tensor() documentation
170 /// for details.
171 explicit Tensor(DataType type);
172
173 private:
174 // A tag type for selecting the `Tensor` constructor overload that creates a
175 // scalar tensor in host memory.
176 struct host_scalar_tag {};
177
178 class HostScalarTensorBufferBase;
179 template <typename T>
180 struct ValueAndTensorBuffer;
181
182 // Creates a tensor with the given scalar `value` in CPU memory.
183 template <typename T>
184 Tensor(T value, host_scalar_tag tag);
185
186 public:
187 // A series of specialized constructors for scalar tensors in host memory.
188 //
189 // NOTE: The `Variant` host-scalar constructor is not defined, because Variant
190 // is implicitly constructible from many different types, and this causes
191 // ambiguities with some compilers.
Tensor(float scalar_value)192 explicit Tensor(float scalar_value)
193 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(double scalar_value)194 explicit Tensor(double scalar_value)
195 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(int32_t scalar_value)196 explicit Tensor(int32_t scalar_value)
197 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(uint32 scalar_value)198 explicit Tensor(uint32 scalar_value)
199 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(uint16 scalar_value)200 explicit Tensor(uint16 scalar_value)
201 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(uint8 scalar_value)202 explicit Tensor(uint8 scalar_value)
203 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(int16_t scalar_value)204 explicit Tensor(int16_t scalar_value)
205 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(int8_t scalar_value)206 explicit Tensor(int8_t scalar_value)
207 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(tstring scalar_value)208 explicit Tensor(tstring scalar_value)
209 : Tensor(std::move(scalar_value), host_scalar_tag{}) {}
Tensor(complex64 scalar_value)210 explicit Tensor(complex64 scalar_value)
211 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(complex128 scalar_value)212 explicit Tensor(complex128 scalar_value)
213 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(int64_t scalar_value)214 explicit Tensor(int64_t scalar_value)
215 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(uint64 scalar_value)216 explicit Tensor(uint64 scalar_value)
217 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(bool scalar_value)218 explicit Tensor(bool scalar_value)
219 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(qint8 scalar_value)220 explicit Tensor(qint8 scalar_value)
221 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(quint8 scalar_value)222 explicit Tensor(quint8 scalar_value)
223 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(qint16 scalar_value)224 explicit Tensor(qint16 scalar_value)
225 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(quint16 scalar_value)226 explicit Tensor(quint16 scalar_value)
227 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(qint32 scalar_value)228 explicit Tensor(qint32 scalar_value)
229 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(bfloat16 scalar_value)230 explicit Tensor(bfloat16 scalar_value)
231 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(Eigen::half scalar_value)232 explicit Tensor(Eigen::half scalar_value)
233 : Tensor(scalar_value, host_scalar_tag{}) {}
Tensor(ResourceHandle scalar_value)234 explicit Tensor(ResourceHandle scalar_value)
235 : Tensor(std::move(scalar_value), host_scalar_tag{}) {}
236
237 // NOTE: The `const char*` host-scalar constructor is provided as a
238 // convenience because otherwise passing a string literal would surprisingly
239 // construct a DT_BOOL tensor.
Tensor(const char * scalar_value)240 explicit Tensor(const char* scalar_value)
241 : Tensor(tstring(scalar_value), host_scalar_tag{}) {}
242
243 /// Copy constructor.
244 Tensor(const Tensor& other);
245
246 /// \brief Move constructor. After this call, <other> is safely destructible
247 /// can be assigned to, and IsInitialized() can be called and will return
248 /// false. Other calls on <other> (e.g. shape manipulation) are not valid.
249 Tensor(Tensor&& other);
250
251 // Explicitly delete constructor that take a pointer (except char*)
252 // so that the pointer doesn't get implicitly cast to bool.
253 template <typename T, typename std::enable_if<!std::is_same<T, char>::value,
254 T>::type* = nullptr>
255 explicit Tensor(T* t) = delete;
256
257 ~Tensor();
258
259 /// Returns the data type.
dtype()260 DataType dtype() const { return shape_.data_type(); }
261
262 /// Returns the shape of the tensor.
shape()263 const TensorShape& shape() const { return shape_; }
264
265 /// \brief Convenience accessor for the tensor shape.
266 ///
267 /// For all shape accessors, see comments for relevant methods of
268 /// `TensorShape` in `tensor_shape.h`.
dims()269 int dims() const { return shape().dims(); }
270
271 /// Convenience accessor for the tensor shape.
dim_size(int d)272 int64 dim_size(int d) const { return shape().dim_size(d); }
273
274 /// Convenience accessor for the tensor shape.
NumElements()275 int64 NumElements() const { return shape().num_elements(); }
276
IsSameSize(const Tensor & b)277 bool IsSameSize(const Tensor& b) const {
278 return shape().IsSameSize(b.shape());
279 }
280
281 // True iff the two tensors use the same underlying refcounted storage
282 bool SharesBufferWith(const Tensor& b) const;
283
284 /// \brief If necessary, has this Tensor been initialized?
285 ///
286 /// Zero-element Tensors are always considered initialized, even if they
287 /// have never been assigned to and do not have any memory allocated.
288 bool IsInitialized() const;
289
290 /// Returns the estimated memory usage of this tensor.
291 size_t TotalBytes() const;
292
293 // Returns the size of allocated memory for this tensor.
294 size_t AllocatedBytes() const;
295
296 /// Returns true iff this tensor is aligned.
IsAligned()297 bool IsAligned() const {
298 #if EIGEN_MAX_ALIGN_BYTES == 0
299 return true;
300 #else
301 void* ptr = base<void>();
302 return dtype() == DT_STRING || NumElements() == 0 ||
303 (reinterpret_cast<intptr_t>(ptr) % EIGEN_MAX_ALIGN_BYTES == 0);
304 #endif
305 }
306
307 /// Assign operator. This tensor shares other's underlying storage.
308 Tensor& operator=(const Tensor& other) {
309 CopyFromInternal(other, other.shape());
310 return *this;
311 }
312
313 /// Move operator. See move constructor for details.
314 Tensor& operator=(Tensor&& other);
315
316 /// \brief Copy the other tensor into this tensor and reshape it.
317 ///
318 /// This tensor shares other's underlying storage. Returns `true`
319 /// iff `other.shape()` has the same number of elements of the given
320 /// `shape`.
CopyFrom(const Tensor & other,const TensorShape & shape)321 bool CopyFrom(const Tensor& other,
322 const TensorShape& shape) TF_MUST_USE_RESULT {
323 if (other.NumElements() != shape.num_elements()) return false;
324 CopyFromInternal(other, shape);
325 return true;
326 }
327
328 /// \brief Slice this tensor along the 1st dimension.
329
330 /// I.e., the returned tensor satisfies
331 /// returned[i, ...] == this[dim0_start + i, ...].
332 /// The returned tensor shares the underlying tensor buffer with this
333 /// tensor.
334 ///
335 /// NOTE: The returned tensor may not satisfy the same alignment
336 /// requirement as this tensor depending on the shape. The caller
337 /// must check the returned tensor's alignment before calling certain
338 /// methods that have alignment requirement (e.g., `flat()`, `tensor()`).
339 ///
340 /// NOTE: When fed with an N-dimensional tensor, this method returns a tensor
341 /// also with N dimensions. If you want to select a sub tensor, see SubSlice.
342 ///
343 /// REQUIRES: `dims()` >= 1
344 /// REQUIRES: `0 <= dim0_start <= dim0_limit <= dim_size(0)`
345 Tensor Slice(int64_t dim0_start, int64_t dim0_limit) const;
346
347 /// \brief Select a subslice from this tensor along the 1st dimension.
348 ///
349 /// When fed with an N-dimensional tensor, this method returns a tensor with
350 /// N-1 dimensions, where the returned tensor is a subslice of the input
351 /// tensor along the first dimension. The N-1 dimensions of the returned
352 /// tensor are the last N-1 dimensions of the input tensor.
353 ///
354 /// NOTE: The returned tensor may not satisfy the same alignment
355 /// requirement as this tensor depending on the shape. The caller
356 /// must check the returned tensor's alignment before calling certain
357 /// methods that have alignment requirement (e.g., `flat()`, `tensor()`).
358 ///
359 /// REQUIRES: `dims()` >= 1
360 /// REQUIRES: `0 <= index < dim_size(0)`
361 Tensor SubSlice(int64_t index) const;
362
363 /// \brief Parse `other` and construct the tensor.
364
365 /// Returns `true` iff the parsing succeeds. If the parsing fails,
366 /// the state of `*this` is unchanged.
367 bool FromProto(const TensorProto& other) TF_MUST_USE_RESULT;
368 bool FromProto(Allocator* a, const TensorProto& other) TF_MUST_USE_RESULT;
369
370 /// \brief Fills in `proto` with `*this` tensor's content.
371 ///
372 /// `AsProtoField()` fills in the repeated field for `proto.dtype()`, while
373 /// `AsProtoTensorContent()` encodes the content in `proto.tensor_content()`
374 /// in a compact form.
375 void AsProtoField(TensorProto* proto) const;
376 void AsProtoTensorContent(TensorProto* proto) const;
377
378 /// \brief Return the tensor data as an `Eigen::Tensor` with the type and
379 /// sizes of this `Tensor`.
380 ///
381 /// Use these methods when you know the data type and the number of
382 /// dimensions of the Tensor and you want an `Eigen::Tensor`
383 /// automatically sized to the `Tensor` sizes. The implementation check
384 /// fails if either type or sizes mismatch.
385 ///
386 /// Example:
387 ///
388 /// ```c++
389 ///
390 /// typedef float T;
391 /// Tensor my_mat(...built with Shape{rows: 3, cols: 5}...);
392 /// auto mat = my_mat.matrix<T>(); // 2D Eigen::Tensor, 3 x 5.
393 /// auto mat = my_mat.tensor<T, 2>(); // 2D Eigen::Tensor, 3 x 5.
394 /// auto vec = my_mat.vec<T>(); // CHECK fails as my_mat is 2D.
395 /// auto vec = my_mat.tensor<T, 3>(); // CHECK fails as my_mat is 2D.
396 /// auto mat = my_mat.matrix<int32>();// CHECK fails as type mismatch.
397 ///
398 /// ```
399 template <typename T>
vec()400 typename TTypes<T>::Vec vec() {
401 return tensor<T, 1>();
402 }
403
404 template <typename T>
matrix()405 typename TTypes<T>::Matrix matrix() {
406 return tensor<T, 2>();
407 }
408
409 template <typename T, size_t NDIMS>
410 typename TTypes<T, NDIMS>::Tensor tensor();
411
412 /// \brief Return the tensor data to an `Eigen::Tensor` with the
413 /// same size but a bitwise cast to the specified dtype `T`.
414 ///
415 /// Using a bitcast is useful for move and copy operations.
416 /// NOTE: this is the same as `tensor()` except a bitcast is allowed.
417 template <typename T, size_t NDIMS>
418 typename TTypes<T, NDIMS>::Tensor bit_casted_tensor();
419
420 /// \brief Return the tensor data to an `Eigen::Tensor` with the
421 /// last dimension elements converted into single elements of a larger type.
422 ///
423 /// For example, this is useful for kernels that can treat NCHW_VECT_C int8
424 /// tensors as NCHW int32 tensors. The sizeof(T) should equal the size of
425 /// the original element type * num elements in the original last dimension.
426 /// NDIMS should be 1 less than the original number of dimensions.
427 template <typename T, size_t NDIMS>
428 typename TTypes<T, NDIMS>::Tensor reinterpret_last_dimension();
429
430 /// \brief Return the tensor data as an `Eigen::Tensor` of the data type and a
431 /// specified shape.
432 ///
433 /// These methods allow you to access the data with the dimensions
434 /// and sizes of your choice. You do not need to know the number of
435 /// dimensions of the Tensor to call them. However, they `CHECK` that
436 /// the type matches and the dimensions requested creates an
437 /// `Eigen::Tensor` with the same number of elements as the tensor.
438 ///
439 /// Example:
440 ///
441 /// ```c++
442 ///
443 /// typedef float T;
444 /// Tensor my_ten(...built with Shape{planes: 4, rows: 3, cols: 5}...);
445 /// // 1D Eigen::Tensor, size 60:
446 /// auto flat = my_ten.flat<T>();
447 /// // 2D Eigen::Tensor 12 x 5:
448 /// auto inner = my_ten.flat_inner_dims<T>();
449 /// // 2D Eigen::Tensor 4 x 15:
450 /// auto outer = my_ten.shaped<T, 2>({4, 15});
451 /// // CHECK fails, bad num elements:
452 /// auto outer = my_ten.shaped<T, 2>({4, 8});
453 /// // 3D Eigen::Tensor 6 x 5 x 2:
454 /// auto weird = my_ten.shaped<T, 3>({6, 5, 2});
455 /// // CHECK fails, type mismatch:
456 /// auto bad = my_ten.flat<int32>();
457 ///
458 /// ```
459 template <typename T>
flat()460 typename TTypes<T>::Flat flat() {
461 return shaped<T, 1>({NumElements()});
462 }
463
464 template <typename T>
unaligned_flat()465 typename TTypes<T>::UnalignedFlat unaligned_flat() {
466 return unaligned_shaped<T, 1>({NumElements()});
467 }
468
469 /// Returns the data as an Eigen::Tensor with NDIMS dimensions, collapsing all
470 /// Tensor dimensions but the last NDIMS-1 into the first dimension of the
471 /// result. If NDIMS > dims() then leading dimensions of size 1 will be
472 /// added to make the output rank NDIMS.
473 template <typename T, size_t NDIMS = 2>
474 typename TTypes<T, NDIMS>::Tensor flat_inner_dims();
475
476 /// Returns the data as an Eigen::Tensor with NDIMS dimensions, collapsing all
477 /// Tensor dimensions but the first NDIMS-1 into the last dimension of the
478 /// result. If NDIMS > dims() then trailing dimensions of size 1 will be
479 /// added to make the output rank NDIMS.
480 template <typename T, size_t NDIMS = 2>
481 typename TTypes<T, NDIMS>::Tensor flat_outer_dims();
482
483 /// Returns the data as an Eigen::Tensor with NDIMS dimensions, collapsing the
484 /// first 'begin' Tensor dimensions into the first dimension of the result and
485 /// the Tensor dimensions of the last dims() - 'begin' - NDIMS into the last
486 /// dimension of the result. If 'begin' < 0 then the |'begin'| leading
487 /// dimensions of size 1 will be added. If 'begin' + NDIMS > dims() then
488 /// 'begin' + NDIMS - dims() trailing dimensions of size 1 will be added.
489 template <typename T, size_t NDIMS = 3>
490 typename TTypes<T, NDIMS>::Tensor flat_inner_outer_dims(int64_t begin);
491
492 template <typename T, size_t NDIMS>
493 typename TTypes<T, NDIMS>::Tensor shaped(gtl::ArraySlice<int64> new_sizes);
494
495 /// \brief Return the tensor data to an `Eigen::Tensor` with the new
496 /// shape specified in `new_sizes` and cast to a new dtype `T`.
497 ///
498 /// Using a bitcast is useful for move and copy operations.
499 /// The allowed bitcast is the only difference from `shaped()`.
500 template <typename T, size_t NDIMS>
501 typename TTypes<T, NDIMS>::Tensor bit_casted_shaped(
502 gtl::ArraySlice<int64> new_sizes);
503
504 template <typename T, size_t NDIMS>
505 typename TTypes<T, NDIMS>::UnalignedTensor unaligned_shaped(
506 gtl::ArraySlice<int64> new_sizes);
507
508 /// \brief Return the Tensor data as a `TensorMap` of fixed size 1:
509 /// `TensorMap<TensorFixedSize<T, 1>>`.
510
511 /// Using `scalar()` allows the compiler to perform optimizations as
512 /// the size of the tensor is known at compile time.
513 template <typename T>
514 typename TTypes<T>::Scalar scalar();
515
516 /// Const versions of all the methods above.
517 template <typename T>
vec()518 typename TTypes<T>::ConstVec vec() const {
519 return tensor<T, 1>();
520 }
521
522 template <typename T>
matrix()523 typename TTypes<T>::ConstMatrix matrix() const {
524 return tensor<T, 2>();
525 }
526
527 template <typename T, size_t NDIMS>
528 typename TTypes<T, NDIMS>::ConstTensor tensor() const;
529
530 /// \brief Return the tensor data to an `Eigen::Tensor` with the
531 /// same size but a bitwise cast to the specified dtype `T`.
532 ///
533 /// Using a bitcast is useful for move and copy operations.
534 /// NOTE: this is the same as `tensor()` except a bitcast is allowed.
535 template <typename T, size_t NDIMS>
536 typename TTypes<T, NDIMS>::ConstTensor bit_casted_tensor() const;
537
538 /// \brief Return the tensor data to an `Eigen::Tensor` with the
539 /// last dimension elements converted into single elements of a larger type.
540 ///
541 /// For example, this is useful for kernels that can treat NCHW_VECT_C int8
542 /// tensors as NCHW int32 tensors. The sizeof(T) should equal the size of
543 /// the original element type * num elements in the original last dimension.
544 /// NDIMS should be 1 less than the original number of dimensions.
545 template <typename T, size_t NDIMS>
546 typename TTypes<T, NDIMS>::ConstTensor reinterpret_last_dimension() const;
547
548 template <typename T>
flat()549 typename TTypes<T>::ConstFlat flat() const {
550 return shaped<T, 1>({NumElements()});
551 }
552
553 template <typename T>
unaligned_flat()554 typename TTypes<T>::UnalignedConstFlat unaligned_flat() const {
555 return unaligned_shaped<T, 1>({NumElements()});
556 }
557
558 template <typename T, size_t NDIMS>
559 typename TTypes<T, NDIMS>::ConstTensor shaped(
560 gtl::ArraySlice<int64> new_sizes) const;
561
562 /// \brief Return the tensor data to an `Eigen::Tensor` with the new
563 /// shape specified in `new_sizes` and cast to a new dtype `T`.
564 ///
565 /// Using a bitcast is useful for move and copy operations.
566 /// The allowed bitcast is the only difference from `shaped()`.
567 template <typename T, size_t NDIMS>
568 typename TTypes<T, NDIMS>::ConstTensor bit_casted_shaped(
569 gtl::ArraySlice<int64> new_sizes) const;
570
571 template <typename T, size_t NDIMS>
572 typename TTypes<T, NDIMS>::UnalignedConstTensor unaligned_shaped(
573 gtl::ArraySlice<int64> new_sizes) const;
574
575 template <typename T>
576 typename TTypes<T>::ConstScalar scalar() const;
577
578 template <typename T, size_t NDIMS = 2>
579 typename TTypes<T, NDIMS>::ConstTensor flat_inner_dims() const;
580
581 template <typename T, size_t NDIMS = 2>
582 typename TTypes<T, NDIMS>::ConstTensor flat_outer_dims() const;
583
584 template <typename T, size_t NDIMS = 3>
585 typename TTypes<T, NDIMS>::ConstTensor flat_inner_outer_dims(
586 int64_t begin) const;
587
588 /// Render the first `max_entries` values in `*this` into a string.
589 std::string SummarizeValue(int64_t max_entries, bool print_v2 = false) const;
590
591 /// A human-readable summary of the tensor suitable for debugging.
592 // `num_values` is the number of actual data values in the tensor
593 // included in the message. If the tensor might be resident in
594 // GPU/TPU memory use DeviceSafeDebugString instead.
595 std::string DebugString(int num_values) const;
DebugString()596 std::string DebugString() const { return DebugString(3); }
597
598 // Variant of DebugString() that should be used for possibly non-CPU tensors.
599 // If the tensor is not resident on CPU, we can't read its values as
600 // DebugString() does.
601 std::string DeviceSafeDebugString() const;
602
603 /// Fill in the `TensorDescription` proto with metadata about the
604 /// tensor that is useful for monitoring and debugging.
605 void FillDescription(TensorDescription* description) const;
606
607 /// \brief Returns a `StringPiece` mapping the current tensor's buffer.
608 ///
609 /// The returned `StringPiece` may point to memory location on devices
610 /// that the CPU cannot address directly.
611 ///
612 /// NOTE: The underlying tensor buffer is refcounted, so the lifetime
613 /// of the contents mapped by the `StringPiece` matches the lifetime of
614 /// the buffer; callers should arrange to make sure the buffer does
615 /// not get destroyed while the `StringPiece` is still used.
616 ///
617 /// REQUIRES: `DataTypeCanUseMemcpy(dtype())`.
618 StringPiece tensor_data() const;
619 void* data() const;
620
621 /// Copy the other tensor into this tensor, reshape it and reinterpret the
622 /// buffer's datatype. If Status::OK() is returned, the two tensors now share
623 /// the same underlying storage.
624 ///
625 /// This call requires that the `other` tensor and the given type and shape
626 /// are "compatible" (i.e. they occupy the same number of bytes).
627 ///
628 /// Specifically:
629 ///
630 /// shape.num_elements() * DataTypeSize(type)
631 ///
632 /// must equal
633 ///
634 /// other.num_elements() * DataTypeSize(other.dtype())
635 ///
636 /// In addition, this function requires:
637 /// * DataTypeSize(other.dtype()) != 0
638 /// * DataTypeSize(type) != 0
639 ///
640 /// If any of the requirements are not met, errors::InvalidArgument is
641 /// returned.
642 Status BitcastFrom(const Tensor& other, DataType dtype,
643 const TensorShape& shape);
644
645 /// Like BitcastFrom, but CHECK fails if any preconditions are not met.
646 ///
647 /// Deprecated. Use BitcastFrom instead and check the returned Status.
UnsafeCopyFromInternal(const Tensor & other,DataType dtype,const TensorShape & shape)648 void UnsafeCopyFromInternal(const Tensor& other, DataType dtype,
649 const TensorShape& shape) {
650 TF_CHECK_OK(BitcastFrom(other, dtype, shape));
651 }
652
653 // Returns true if the refcount on buf_ and any possible underlying root
654 // buffer is one.
655 bool RefCountIsOne() const;
656
657 private:
658 void CheckType(DataType expected_dtype) const;
659 void CheckTypeAndIsAligned(DataType expected_dtype) const;
660 void CheckIsAlignedAndSingleElement() const;
set_dtype(DataType t)661 void set_dtype(DataType t) { shape_.set_data_type(t); }
662
663 // TensorShape's InlineVector.
664 static gtl::InlinedVector<int64, 4> ComputeFlatInnerDims(
665 gtl::ArraySlice<int64> orig, int64_t num_out_dims);
666 static gtl::InlinedVector<int64, 4> ComputeFlatOuterDims(
667 gtl::ArraySlice<int64> orig, int64_t num_out_dims);
668
669 TensorShape shape_;
670 TensorBuffer* buf_;
671
672 friend class DMAHelper; // For access to buf_.
673 friend class TensorCApi; // For access to buf_.
674 friend class TensorCord; // For access to buf_.
675 friend class TensorReference; // For access to buf_.
676 friend class VariableOp; // For access to set_shape.
677 friend class AutoReloadVariableOp; // For access to set_shape.
678 friend class TensorTestHelper; // For access to set_shape.
679 friend class CastOpBase; // For access to set_dtype.
680 friend class ScopedAllocator; // For access to buf_.
681 friend Status batch_util::CopyElementToSlice(
682 Tensor element, Tensor* parent,
683 int64_t index); // For access to base<T>().
684 friend Status batch_util::CopySliceToElement(
685 const Tensor& parent, Tensor* element,
686 int64_t index); // For access to base<T>().
687 friend Status batch_util::MaybeMoveSliceToElement(
688 Tensor* parent, Tensor* element,
689 int64_t index); // For access to base<T>().
690 friend Status batch_util::CopyContiguousSlices(
691 const Tensor& src, int64_t src_offset, int64_t dst_offset,
692 int64_t num_slices,
693 Tensor* dst); // For access to base<T>().
694
695 bool CanUseDMA() const;
696
697 // Only needed by variable op to set the shape of an uninitialized
698 // Tensor.
699 // TODO: Remove this when we have a better story for detecting
700 // uninitialized tensors.
set_shape(const TensorShape & shape)701 void set_shape(const TensorShape& shape) {
702 DataType dt = dtype();
703 shape_ = shape;
704 set_dtype(dt);
705 }
706
CopyFromInternal(const Tensor & other,const TensorShape & shape)707 inline void CopyFromInternal(const Tensor& other, const TensorShape& shape) {
708 DCHECK_EQ(shape.num_elements(), other.NumElements());
709 // Data type will be overwritten if this == &other, since dtype is part of
710 // shape.
711 DataType other_dtype = other.dtype();
712 shape_ = shape;
713 set_dtype(other_dtype);
714 if (buf_ != other.buf_) {
715 if (buf_) buf_->Unref();
716 buf_ = other.buf_;
717 if (buf_) buf_->Ref();
718 }
719 }
720
721 template <typename T>
722 T* base() const;
723
724 template <size_t NDIMS>
725 void FillDimsAndValidateCompatibleShape(
726 gtl::ArraySlice<int64> new_sizes,
727 Eigen::array<Eigen::DenseIndex, NDIMS>* dims) const;
728
729 template <typename T, size_t NDIMS>
730 void FillDimsAndValidateCompatibleShape(
731 gtl::ArraySlice<int64> new_sizes,
732 Eigen::array<Eigen::DenseIndex, NDIMS>* dims) const;
733 };
734
735 // Implementation details
736
737 // START_SKIP_DOXYGEN
738
739 template <typename T>
base()740 T* Tensor::base() const {
741 return buf_ == nullptr ? nullptr : buf_->base<T>();
742 }
743
744 template <typename T, size_t NDIMS>
tensor()745 typename TTypes<T, NDIMS>::Tensor Tensor::tensor() {
746 CheckTypeAndIsAligned(DataTypeToEnum<T>::v());
747 return typename TTypes<T, NDIMS>::Tensor(base<T>(),
748 shape().AsEigenDSizes<NDIMS>());
749 }
750
751 template <typename T, size_t NDIMS>
tensor()752 typename TTypes<T, NDIMS>::ConstTensor Tensor::tensor() const {
753 CheckTypeAndIsAligned(DataTypeToEnum<T>::v());
754 return typename TTypes<T, NDIMS>::ConstTensor(base<const T>(),
755 shape().AsEigenDSizes<NDIMS>());
756 }
757
758 template <typename T, size_t NDIMS>
bit_casted_tensor()759 typename TTypes<T, NDIMS>::Tensor Tensor::bit_casted_tensor() {
760 CHECK(IsAligned());
761 return typename TTypes<T, NDIMS>::Tensor(base<T>(),
762 shape().AsEigenDSizes<NDIMS>());
763 }
764
765 template <typename T, size_t NDIMS>
bit_casted_tensor()766 typename TTypes<T, NDIMS>::ConstTensor Tensor::bit_casted_tensor() const {
767 CHECK(IsAligned());
768 return typename TTypes<T, NDIMS>::ConstTensor(base<const T>(),
769 shape().AsEigenDSizes<NDIMS>());
770 }
771
772 template <typename T, size_t NDIMS>
reinterpret_last_dimension()773 typename TTypes<T, NDIMS>::Tensor Tensor::reinterpret_last_dimension() {
774 if (NDIMS == dims()) {
775 return tensor<T, NDIMS>();
776 }
777 CHECK(IsAligned());
778 CHECK_EQ(NDIMS, dims() - 1);
779 CHECK_EQ(sizeof(T), shape_.dim_sizes()[NDIMS] * DataTypeSize(dtype()));
780 Eigen::array<Eigen::DenseIndex, NDIMS> dims;
781 for (int d = 0; d < NDIMS; ++d) {
782 dims[d] = shape_.dim_sizes()[d];
783 }
784 return typename TTypes<T, NDIMS>::Tensor(base<T>(), dims);
785 }
786
787 template <typename T, size_t NDIMS>
reinterpret_last_dimension()788 typename TTypes<T, NDIMS>::ConstTensor Tensor::reinterpret_last_dimension()
789 const {
790 if (NDIMS == dims()) {
791 return tensor<T, NDIMS>();
792 }
793 CHECK(IsAligned());
794 CHECK_EQ(NDIMS, dims() - 1);
795 CHECK_EQ(sizeof(T), shape_.dim_sizes()[NDIMS] * DataTypeSize(dtype()));
796 Eigen::array<Eigen::DenseIndex, NDIMS> dims;
797 for (int d = 0; d < NDIMS; ++d) {
798 dims[d] = shape_.dim_sizes()[d];
799 }
800 return typename TTypes<T, NDIMS>::ConstTensor(base<const T>(), dims);
801 }
802
803 template <size_t NDIMS>
FillDimsAndValidateCompatibleShape(gtl::ArraySlice<int64> new_sizes,Eigen::array<Eigen::DenseIndex,NDIMS> * dims)804 void Tensor::FillDimsAndValidateCompatibleShape(
805 gtl::ArraySlice<int64> new_sizes,
806 Eigen::array<Eigen::DenseIndex, NDIMS>* dims) const {
807 CHECK_EQ(NDIMS, new_sizes.size());
808 int64_t new_num_elements = 1;
809 for (size_t d = 0; d < NDIMS; d++) {
810 new_num_elements *= new_sizes[d];
811 (*dims)[d] = new_sizes[d];
812 }
813 CHECK_EQ(new_num_elements, NumElements());
814 }
815
816 template <typename T, size_t NDIMS>
FillDimsAndValidateCompatibleShape(gtl::ArraySlice<int64> new_sizes,Eigen::array<Eigen::DenseIndex,NDIMS> * dims)817 void Tensor::FillDimsAndValidateCompatibleShape(
818 gtl::ArraySlice<int64> new_sizes,
819 Eigen::array<Eigen::DenseIndex, NDIMS>* dims) const {
820 CHECK_EQ(NDIMS, new_sizes.size());
821 int64_t new_num_elements = 1;
822 for (size_t d = 0; d < NDIMS; d++) {
823 new_num_elements *= new_sizes[d];
824 (*dims)[d] = new_sizes[d];
825 }
826 const int element_size = DataTypeSize(BaseType(dtype()));
827 if (element_size > 0) {
828 CHECK_EQ(new_num_elements * sizeof(T), NumElements() * element_size);
829 } else {
830 // DataTypeSize() returns 0 for some data types. In this case, assume that T
831 // has the same size as the buffer type.
832 // NOTE: If we can be sure that DataTypeSize() does not return 0 for all POD
833 // types, then we should check DataTypeToEnum<T>::v() == dtype(). Or simply
834 // check if `element_size > 0` to err when bit cast is attempted on Tensor
835 // of unknown data type size.
836 CHECK_EQ(new_num_elements, NumElements());
837 }
838 }
839
840 template <typename T, size_t NDIMS>
shaped(gtl::ArraySlice<int64> new_sizes)841 typename TTypes<T, NDIMS>::Tensor Tensor::shaped(
842 gtl::ArraySlice<int64> new_sizes) {
843 CheckTypeAndIsAligned(DataTypeToEnum<T>::v());
844 Eigen::array<Eigen::DenseIndex, NDIMS> dims;
845 FillDimsAndValidateCompatibleShape(new_sizes, &dims);
846 return typename TTypes<T, NDIMS>::Tensor(base<T>(), dims);
847 }
848
849 template <typename T, size_t NDIMS>
bit_casted_shaped(gtl::ArraySlice<int64> new_sizes)850 typename TTypes<T, NDIMS>::Tensor Tensor::bit_casted_shaped(
851 gtl::ArraySlice<int64> new_sizes) {
852 CHECK(IsAligned());
853 Eigen::array<Eigen::DenseIndex, NDIMS> dims;
854 FillDimsAndValidateCompatibleShape<T>(new_sizes, &dims);
855 return typename TTypes<T, NDIMS>::Tensor(base<T>(), dims);
856 }
857
858 template <typename T, size_t NDIMS>
unaligned_shaped(gtl::ArraySlice<int64> new_sizes)859 typename TTypes<T, NDIMS>::UnalignedTensor Tensor::unaligned_shaped(
860 gtl::ArraySlice<int64> new_sizes) {
861 CheckType(DataTypeToEnum<T>::v());
862 Eigen::array<Eigen::DenseIndex, NDIMS> dims;
863 FillDimsAndValidateCompatibleShape(new_sizes, &dims);
864 return typename TTypes<T, NDIMS>::UnalignedTensor(base<T>(), dims);
865 }
866
867 template <typename T, size_t NDIMS>
shaped(gtl::ArraySlice<int64> new_sizes)868 typename TTypes<T, NDIMS>::ConstTensor Tensor::shaped(
869 gtl::ArraySlice<int64> new_sizes) const {
870 CheckType(DataTypeToEnum<T>::v());
871 CHECK(IsAligned()) << "ptr = " << base<void>();
872 Eigen::array<Eigen::DenseIndex, NDIMS> dims;
873 FillDimsAndValidateCompatibleShape(new_sizes, &dims);
874 return typename TTypes<T, NDIMS>::ConstTensor(base<T>(), dims);
875 }
876
877 template <typename T, size_t NDIMS>
bit_casted_shaped(gtl::ArraySlice<int64> new_sizes)878 typename TTypes<T, NDIMS>::ConstTensor Tensor::bit_casted_shaped(
879 gtl::ArraySlice<int64> new_sizes) const {
880 CHECK(IsAligned());
881 Eigen::array<Eigen::DenseIndex, NDIMS> dims;
882 FillDimsAndValidateCompatibleShape<T>(new_sizes, &dims);
883 return typename TTypes<T, NDIMS>::ConstTensor(base<T>(), dims);
884 }
885
886 template <typename T, size_t NDIMS>
unaligned_shaped(gtl::ArraySlice<int64> new_sizes)887 typename TTypes<T, NDIMS>::UnalignedConstTensor Tensor::unaligned_shaped(
888 gtl::ArraySlice<int64> new_sizes) const {
889 CheckType(DataTypeToEnum<T>::v());
890 Eigen::array<Eigen::DenseIndex, NDIMS> dims;
891 FillDimsAndValidateCompatibleShape(new_sizes, &dims);
892 return typename TTypes<T, NDIMS>::UnalignedConstTensor(base<T>(), dims);
893 }
894
895 template <typename T>
scalar()896 typename TTypes<T>::Scalar Tensor::scalar() {
897 static_assert(
898 !std::is_same<T, std::string>::value,
899 "std::string is no longer a scalar type, use tensorflow::tstring");
900 CheckIsAlignedAndSingleElement();
901 return typename TTypes<T>::Scalar(base<T>());
902 }
903
904 template <typename T>
scalar()905 typename TTypes<T>::ConstScalar Tensor::scalar() const {
906 static_assert(
907 !std::is_same<T, std::string>::value,
908 "std::string is no longer a scalar type, use tensorflow::tstring");
909 CheckIsAlignedAndSingleElement();
910 return typename TTypes<T>::ConstScalar(base<T>());
911 }
912
913 template <typename T, size_t NDIMS>
flat_inner_dims()914 typename TTypes<T, NDIMS>::Tensor Tensor::flat_inner_dims() {
915 return shaped<T, NDIMS>(ComputeFlatInnerDims(shape_.dim_sizes(), NDIMS));
916 }
917
918 template <typename T, size_t NDIMS>
flat_outer_dims()919 typename TTypes<T, NDIMS>::Tensor Tensor::flat_outer_dims() {
920 return shaped<T, NDIMS>(ComputeFlatOuterDims(shape_.dim_sizes(), NDIMS));
921 }
922
923 template <typename T, size_t NDIMS>
flat_inner_outer_dims(int64_t begin)924 typename TTypes<T, NDIMS>::Tensor Tensor::flat_inner_outer_dims(int64_t begin) {
925 gtl::InlinedVector<int64, 4> flat_outer =
926 ComputeFlatOuterDims(shape_.dim_sizes(), begin + NDIMS);
927 return shaped<T, NDIMS>(ComputeFlatInnerDims(flat_outer, NDIMS));
928 }
929
930 template <typename T, size_t NDIMS>
flat_inner_dims()931 typename TTypes<T, NDIMS>::ConstTensor Tensor::flat_inner_dims() const {
932 return shaped<T, NDIMS>(ComputeFlatInnerDims(shape_.dim_sizes(), NDIMS));
933 }
934
935 template <typename T, size_t NDIMS>
flat_outer_dims()936 typename TTypes<T, NDIMS>::ConstTensor Tensor::flat_outer_dims() const {
937 return shaped<T, NDIMS>(ComputeFlatOuterDims(shape_.dim_sizes(), NDIMS));
938 }
939
940 template <typename T, size_t NDIMS>
flat_inner_outer_dims(int64_t begin)941 typename TTypes<T, NDIMS>::ConstTensor Tensor::flat_inner_outer_dims(
942 int64_t begin) const {
943 gtl::InlinedVector<int64, 4> flat_outer =
944 ComputeFlatOuterDims(shape_.dim_sizes(), begin + NDIMS);
945 return shaped<T, NDIMS>(ComputeFlatInnerDims(flat_outer, NDIMS));
946 }
947
Tensor(const Tensor & other)948 inline Tensor::Tensor(const Tensor& other)
949 : shape_(other.shape()), buf_(other.buf_) {
950 if (buf_) buf_->Ref();
951 }
952
Tensor(Tensor && other)953 inline Tensor::Tensor(Tensor&& other)
954 : shape_(std::move(other.shape_)), buf_(other.buf_) {
955 other.buf_ = nullptr;
956 }
957
958 class Tensor::HostScalarTensorBufferBase : public TensorBuffer {
959 public:
960 using TensorBuffer::TensorBuffer;
961 bool GetAllocatedBytes(size_t* out_bytes) const final;
962 void FillAllocationDescription(AllocationDescription* proto) const final;
963 };
964
965 // A packed representation for a single scalar value of type `T`, and a
966 // `TensorBuffer` implementation that describes (and manages the lifetime of)
967 // that value.
968 template <typename T>
969 struct Tensor::ValueAndTensorBuffer {
970 class HostScalarTensorBuffer : public Tensor::HostScalarTensorBufferBase {
971 public:
HostScalarTensorBufferValueAndTensorBuffer972 explicit HostScalarTensorBuffer(void* data)
973 : HostScalarTensorBufferBase(data) {}
sizeValueAndTensorBuffer974 size_t size() const final { return sizeof(T); }
root_bufferValueAndTensorBuffer975 TensorBuffer* root_buffer() final { return this; }
976
977 // Override `operator delete` so that calling `delete this` in
978 // `core::Refcounted::Unref()` for an object of this type will free
979 // the enclosing `ValueAndTensorBuffer` for the tensor buffer.
980 //
981 // NOTE(mrry): The definition of this method must be outside the class
982 // definition in order to satisfy some compilers.
983 static void operator delete(void* ptr);
984
deleteValueAndTensorBuffer985 static void operator delete(void*, void*) {
986 // Some compilers require an overridden class-specific deallocation
987 // function, which will be called if placement `new` throws an
988 // exception.
989 }
990
991 private:
~HostScalarTensorBufferValueAndTensorBuffer992 ~HostScalarTensorBuffer() override { static_cast<T*>(data())->~T(); }
993 };
994
995 T value;
996 HostScalarTensorBuffer tensor_buffer;
997 };
998
999 /* static */
1000 template <typename T>
delete(void * ptr)1001 void Tensor::ValueAndTensorBuffer<T>::HostScalarTensorBuffer::operator delete(
1002 void* ptr) {
1003 // Use a dummy object to compute to offset of
1004 // `ValueAndTensorBuffer::tensor_buffer`, because `offsetof()` is not
1005 // necessarily defined on this non-POD type (until C++17).
1006 //
1007 // NOTE(mrry): Using `sizeof(Tensor::ValueAndTensorBuffer<T>)` here requires
1008 // us to define this method outside the class definition, so that it is not
1009 // considered an incomplete type.
1010 typename std::aligned_storage<sizeof(Tensor::ValueAndTensorBuffer<T>),
1011 alignof(Tensor::ValueAndTensorBuffer<T>)>::type
1012 dummy_storage_;
1013 Tensor::ValueAndTensorBuffer<T>* dummy_object =
1014 reinterpret_cast<Tensor::ValueAndTensorBuffer<T>*>(&dummy_storage_);
1015 intptr_t offset = reinterpret_cast<intptr_t>(&dummy_object->tensor_buffer) -
1016 reinterpret_cast<intptr_t>(dummy_object);
1017
1018 port::AlignedFree(static_cast<char*>(ptr) - offset);
1019 }
1020
1021 template <typename T>
Tensor(T value,host_scalar_tag tag)1022 Tensor::Tensor(T value, host_scalar_tag tag) {
1023 auto* value_and_buf = static_cast<Tensor::ValueAndTensorBuffer<T>*>(
1024 port::AlignedMalloc(sizeof(typename Tensor::ValueAndTensorBuffer<T>),
1025 EIGEN_MAX_ALIGN_BYTES));
1026 new (&value_and_buf->value) T(std::move(value));
1027 new (&value_and_buf->tensor_buffer)
1028 typename Tensor::ValueAndTensorBuffer<T>::HostScalarTensorBuffer(
1029 value_and_buf);
1030 buf_ = &value_and_buf->tensor_buffer;
1031 set_dtype(DataTypeToEnum<T>::value);
1032 }
1033
1034 inline Tensor& Tensor::operator=(Tensor&& other) {
1035 // Avoid self-assignment, since we might destroy our underlying buffer.
1036 if (&other != this) {
1037 shape_ = std::move(other.shape_);
1038 if (buf_) buf_->Unref();
1039 buf_ = other.buf_;
1040 other.buf_ = nullptr;
1041 }
1042 return *this;
1043 }
1044
1045 // END_SKIP_DOXYGEN
1046
1047 } // namespace tensorflow
1048
1049 #endif // TENSORFLOW_CORE_FRAMEWORK_TENSOR_H_
1050