1 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include "tensorflow/lite/delegates/flex/buffer_map.h"
16
17 #include "tensorflow/c/c_api_internal.h"
18 #include "tensorflow/lite/delegates/flex/util.h"
19 #include "tensorflow/lite/string.h"
20 #include "tensorflow/lite/string_util.h"
21 #include "tensorflow/core/framework/allocation_description.pb.h"
22 #include "tensorflow/core/framework/log_memory.h"
23
24 namespace tflite {
25 namespace flex {
26 namespace {
27 // A tensor buffer that is allocated, deallocated and populated by TF Lite.
28 class BaseTfLiteTensorBuffer : public tensorflow::TensorBuffer {
29 using tensorflow::TensorBuffer::TensorBuffer;
30
root_buffer()31 TensorBuffer* root_buffer() override { return this; }
FillAllocationDescription(tensorflow::AllocationDescription * proto) const32 void FillAllocationDescription(
33 tensorflow::AllocationDescription* proto) const override {
34 tensorflow::int64 rb = size();
35 proto->set_requested_bytes(rb);
36 proto->set_allocator_name(tensorflow::cpu_allocator()->Name());
37 }
38
39 // Prevents input forwarding from mutating this buffer.
OwnsMemory() const40 bool OwnsMemory() const override { return false; }
41
42 protected:
LogAllocation()43 void LogAllocation() {
44 if (tensorflow::LogMemory::IsEnabled() && data() != nullptr) {
45 tensorflow::LogMemory::RecordRawAllocation(
46 "TfLiteTensorBuffer_New",
47 tensorflow::LogMemory::EXTERNAL_TENSOR_ALLOCATION_STEP_ID, size(),
48 data(), tensorflow::cpu_allocator());
49 }
50 }
LogDeallocation()51 void LogDeallocation() {
52 if (tensorflow::LogMemory::IsEnabled() && data() != nullptr) {
53 tensorflow::LogMemory::RecordRawDeallocation(
54 "TfLiteTensorBuffer_Delete",
55 tensorflow::LogMemory::EXTERNAL_TENSOR_ALLOCATION_STEP_ID, data(),
56 tensorflow::cpu_allocator(), false);
57 }
58 }
59 };
60
61 // A tensor buffer for most data types. Numeric types have exactly the same
62 // representation in TFLITE and TF, so we just need use memcpy().
63 class TfLiteTensorBuffer : public BaseTfLiteTensorBuffer {
64 public:
TfLiteTensorBuffer(const TfLiteTensor * tensor)65 explicit TfLiteTensorBuffer(const TfLiteTensor* tensor)
66 : BaseTfLiteTensorBuffer(tensorflow::cpu_allocator()->AllocateRaw(
67 EIGEN_MAX_ALIGN_BYTES, tensor->bytes)) {
68 // TODO(ahentz): if we can guarantee that TF Lite allocated tensors with
69 // the same alignment as TensorFlow (EIGEN_MAX_ALIGN_BYTES), then we can
70 // potentially eliminate the copy below.
71 len_ = tensor->bytes;
72
73 LogAllocation();
74
75 if (data()) {
76 std::memcpy(data(), tensor->data.raw, tensor->bytes);
77 }
78 }
79
~TfLiteTensorBuffer()80 ~TfLiteTensorBuffer() override {
81 LogDeallocation();
82 tensorflow::cpu_allocator()->DeallocateRaw(data());
83 }
84
size() const85 size_t size() const override { return len_; }
86
87 private:
88 size_t len_;
89 };
90
91 // A string buffer. TFLITE string tensor format is different than
92 // TF's so we need perform the conversion here.
93 class StringTfLiteTensorBuffer : public BaseTfLiteTensorBuffer {
94 public:
StringTfLiteTensorBuffer(const TfLiteTensor * tensor)95 explicit StringTfLiteTensorBuffer(const TfLiteTensor* tensor)
96 : StringTfLiteTensorBuffer(tensor, tensor->data.raw != nullptr
97 ? GetStringCount(tensor->data.raw)
98 : 0) {}
99
~StringTfLiteTensorBuffer()100 ~StringTfLiteTensorBuffer() override {
101 LogDeallocation();
102 tensorflow::cpu_allocator()->Deallocate<string>(
103 static_cast<string*>(data()), num_strings_);
104 }
105
size() const106 size_t size() const override { return num_strings_ * sizeof(string); }
107
108 private:
StringTfLiteTensorBuffer(const TfLiteTensor * tensor,int num_strings)109 StringTfLiteTensorBuffer(const TfLiteTensor* tensor, int num_strings)
110 : BaseTfLiteTensorBuffer(
111 num_strings != 0
112 ? tensorflow::cpu_allocator()->Allocate<string>(num_strings)
113 : nullptr),
114 num_strings_(num_strings) {
115 LogAllocation();
116
117 if (data()) {
118 string* p = static_cast<string*>(data());
119 for (size_t i = 0; i < num_strings_; ++p, ++i) {
120 auto ref = GetString(tensor->data.raw, i);
121 p->assign(ref.str, ref.len);
122 }
123 }
124 }
125
126 int num_strings_;
127 };
128
129 } // namespace
130
BufferMap()131 BufferMap::BufferMap() {}
132
~BufferMap()133 BufferMap::~BufferMap() {}
134
HasTensor(int tensor_index) const135 bool BufferMap::HasTensor(int tensor_index) const {
136 return id_to_tensor_.count(tensor_index) != 0;
137 }
138
IsTensorFlowTensor(int tensor_index) const139 bool BufferMap::IsTensorFlowTensor(int tensor_index) const {
140 return HasTensor(tensor_index) && owned_by_tf_.count(tensor_index) > 0;
141 }
142
GetTensor(int tensor_index) const143 tensorflow::Tensor BufferMap::GetTensor(int tensor_index) const {
144 return id_to_tensor_.at(tensor_index);
145 }
146
SetFromTfLite(int tensor_index,const TfLiteTensor * tensor)147 void BufferMap::SetFromTfLite(int tensor_index, const TfLiteTensor* tensor) {
148 tensorflow::TensorShape shape;
149 int num_dims = tensor->dims->size;
150 for (int i = 0; i < num_dims; ++i) {
151 shape.AddDim(tensor->dims->data[i]);
152 }
153 // TODO(ahentz): we assume this is a new tensor and allocate a new buffer
154 // for it. This is not always the best approach. For example, this might
155 // be a reallocation after resizing tensors. In that case it would be
156 // preferable to somehow reuse the buffer.
157 BaseTfLiteTensorBuffer* buf;
158 if (tensor->type == kTfLiteString) {
159 buf = new StringTfLiteTensorBuffer(tensor);
160 } else {
161 buf = new TfLiteTensorBuffer(tensor);
162 }
163 tensorflow::Tensor t = tensorflow::TensorCApi::MakeTensor(
164 GetTensorFlowDataType(tensor->type), shape, buf);
165 buf->Unref();
166
167 id_to_tensor_[tensor_index] = std::move(t);
168 owned_by_tf_.erase(tensor_index);
169 }
170
SetFromTensorFlow(int tensor_index,tensorflow::Tensor tensor)171 void BufferMap::SetFromTensorFlow(int tensor_index, tensorflow::Tensor tensor) {
172 id_to_tensor_[tensor_index] = std::move(tensor);
173 owned_by_tf_.insert(tensor_index);
174 }
175
176 } // namespace flex
177 } // namespace tflite
178