1 /* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include "tensorflow/lite/delegates/flex/buffer_map_util.h"
16
17 #include "tensorflow/core/framework/log_memory.h"
18 #include "tensorflow/core/framework/tensor.h"
19 #include "tensorflow/core/framework/typed_allocator.h"
20 #include "tensorflow/lite/delegates/flex/util.h"
21 #include "tensorflow/lite/string_util.h"
22
23 namespace tflite {
24 namespace flex {
25
FillAllocationDescription(tensorflow::AllocationDescription * proto) const26 void BaseTfLiteTensorBuffer::FillAllocationDescription(
27 tensorflow::AllocationDescription* proto) const {
28 int64_t rb = size();
29 proto->set_requested_bytes(rb);
30 proto->set_allocator_name(tensorflow::cpu_allocator()->Name());
31 }
32
LogAllocation()33 void BaseTfLiteTensorBuffer::LogAllocation() {
34 if (tensorflow::LogMemory::IsEnabled() && data() != nullptr) {
35 tensorflow::LogMemory::RecordRawAllocation(
36 "TfLiteTensorBuffer_New",
37 tensorflow::LogMemory::EXTERNAL_TENSOR_ALLOCATION_STEP_ID, size(),
38 data(), tensorflow::cpu_allocator());
39 }
40 }
LogDeallocation()41 void BaseTfLiteTensorBuffer::LogDeallocation() {
42 if (tensorflow::LogMemory::IsEnabled() && data() != nullptr) {
43 tensorflow::LogMemory::RecordRawDeallocation(
44 "TfLiteTensorBuffer_Delete",
45 tensorflow::LogMemory::EXTERNAL_TENSOR_ALLOCATION_STEP_ID, data(),
46 tensorflow::cpu_allocator(), false);
47 }
48 }
49
TfLiteTensorBuffer(const TfLiteTensor * tensor)50 TfLiteTensorBuffer::TfLiteTensorBuffer(const TfLiteTensor* tensor)
51 : BaseTfLiteTensorBuffer(tensorflow::cpu_allocator()->AllocateRaw(
52 EIGEN_MAX_ALIGN_BYTES, tensor->bytes)) {
53 // TODO(ahentz): if we can guarantee that TF Lite allocated tensors with
54 // the same alignment as TensorFlow (EIGEN_MAX_ALIGN_BYTES), then we can
55 // potentially eliminate the copy below.
56 len_ = tensor->bytes;
57
58 LogAllocation();
59
60 if (data()) {
61 std::memcpy(data(), tensor->data.raw, tensor->bytes);
62 }
63 }
64
~TfLiteTensorBuffer()65 TfLiteTensorBuffer::~TfLiteTensorBuffer() {
66 LogDeallocation();
67 tensorflow::cpu_allocator()->DeallocateRaw(data());
68 }
69
StringTfLiteTensorBuffer(const TfLiteTensor * tensor)70 StringTfLiteTensorBuffer::StringTfLiteTensorBuffer(const TfLiteTensor* tensor)
71 : StringTfLiteTensorBuffer(
72 tensor, tensor->data.raw != nullptr ? GetStringCount(tensor) : 0) {}
73
~StringTfLiteTensorBuffer()74 StringTfLiteTensorBuffer::~StringTfLiteTensorBuffer() {
75 LogDeallocation();
76 tensorflow::TypedAllocator::Deallocate<tensorflow::tstring>(
77 tensorflow::cpu_allocator(), static_cast<tensorflow::tstring*>(data()),
78 num_strings_);
79 }
80
StringTfLiteTensorBuffer(const TfLiteTensor * tensor,int num_strings)81 StringTfLiteTensorBuffer::StringTfLiteTensorBuffer(const TfLiteTensor* tensor,
82 int num_strings)
83 : BaseTfLiteTensorBuffer(
84 num_strings != 0
85 ? tensorflow::TypedAllocator::Allocate<tensorflow::tstring>(
86 tensorflow::cpu_allocator(), num_strings,
87 tensorflow::AllocationAttributes())
88 : nullptr),
89 num_strings_(num_strings) {
90 LogAllocation();
91
92 if (data()) {
93 tensorflow::tstring* p = static_cast<tensorflow::tstring*>(data());
94 for (size_t i = 0; i < num_strings_; ++p, ++i) {
95 auto ref = GetString(tensor, i);
96 p->assign(ref.str, ref.len);
97 }
98 }
99 }
100
SetTfTensorFromTfLite(const TfLiteTensor * tensor,tensorflow::Tensor * tf_tensor)101 tensorflow::Status SetTfTensorFromTfLite(const TfLiteTensor* tensor,
102 tensorflow::Tensor* tf_tensor) {
103 // TODO(b/179094265): This is an experimental implementation, subject to
104 // change. This can be re-implemented with life cycle management mechanism
105 // like reference counting.
106 // In a different subgraph, it can load the TensorFlow tensor pointer of the
107 // given TensorFlow Lite tensor, which is stored in the `data` field. The
108 // memory management cycle of the shared TensorFlow's tensor will be managed
109 // by the buffer maps since the loaded tensors always will be kept in the
110 // buffer map.
111 //
112 // The life cycle of the pointer will be managed by the reference counting in
113 // the TensorFlow world and the pointer will be freed when all the buffer
114 // maps, who own it, are gone.
115 if (tensor->type == kTfLiteResource || tensor->type == kTfLiteVariant) {
116 const tensorflow::Tensor** tf_tensor_ptr =
117 reinterpret_cast<const tensorflow::Tensor**>(tensor->data.raw);
118 *tf_tensor = **tf_tensor_ptr;
119 return tensorflow::Status::OK();
120 }
121
122 tensorflow::TensorShape shape;
123 int num_dims = tensor->dims->size;
124 for (int i = 0; i < num_dims; ++i) {
125 shape.AddDim(tensor->dims->data[i]);
126 }
127 // TODO(b/152916533): We assume this is a new tensor and allocate a new buffer
128 // for it. This is not always the best approach. For example, this might
129 // be a reallocation after resizing tensors. In that case it would be
130 // preferable to somehow reuse the buffer.
131 BaseTfLiteTensorBuffer* buf;
132 if (tensor->type == kTfLiteString) {
133 buf = new StringTfLiteTensorBuffer(tensor);
134 } else {
135 buf = new TfLiteTensorBuffer(tensor);
136 }
137 tensorflow::Tensor t = tensorflow::TensorCApi::MakeTensor(
138 GetTensorFlowDataType(tensor->type), shape, buf);
139 buf->Unref();
140
141 *tf_tensor = std::move(t);
142 return tensorflow::Status::OK();
143 }
144
145 } // namespace flex
146 } // namespace tflite
147