1 /* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #include "tensorflow/lite/simple_planner.h"
17
18 #include <cstdint>
19 #include <limits>
20 #include <memory>
21 #include <utility>
22 #include <vector>
23
24 #include "tensorflow/lite/c/common.h"
25 #include "tensorflow/lite/graph_info.h"
26
27 namespace tflite {
28
29 namespace {
30
31 constexpr int32_t kNodeNotAssigned = std::numeric_limits<int32_t>::max();
32
33 } // namespace
34
SimplePlanner(TfLiteContext * context,std::unique_ptr<GraphInfo> graph_info)35 SimplePlanner::SimplePlanner(TfLiteContext* context,
36 std::unique_ptr<GraphInfo> graph_info)
37 : context_(context), graph_info_(std::move(graph_info)) {}
38
~SimplePlanner()39 SimplePlanner::~SimplePlanner() { FreeAllAllocations(); }
40
FreeAllAllocations()41 void SimplePlanner::FreeAllAllocations() {
42 for (int i = 0; i < static_cast<int>(allocs_.size()); ++i) {
43 allocs_[i].free();
44 }
45 }
46
ResetAllocations()47 TfLiteStatus SimplePlanner::ResetAllocations() {
48 FreeAllAllocations();
49 allocs_.clear();
50 allocs_.resize(graph_info_->num_tensors());
51 return kTfLiteOk;
52 }
53
ResetAllocationsAfter(int node)54 TfLiteStatus SimplePlanner::ResetAllocationsAfter(int node) {
55 for (int i = 0; i < static_cast<int>(allocs_.size()); ++i) {
56 if (allocs_[i].node > node && allocs_[i].size > 0) {
57 TfLiteTensor& tensor = *graph_info_->tensor(i);
58 if (tensor.allocation_type == kTfLiteArenaRw) {
59 allocs_[i].free();
60 tensor.data.raw = nullptr;
61 }
62 }
63 }
64
65 return kTfLiteOk;
66 }
67
PlanAllocations()68 TfLiteStatus SimplePlanner::PlanAllocations() {
69 // Invalidate any existing data.
70 TF_LITE_ENSURE_STATUS(ResetAllocations());
71 // Maybe other verb instead of 'Assigned'
72 alloc_node_.assign(graph_info_->num_tensors(), kNodeNotAssigned);
73 dealloc_node_.assign(graph_info_->num_tensors(), kNodeNotAssigned);
74
75 // Keeps track of references to each tensor.
76 std::vector<int> refcounts(graph_info_->num_tensors(), 0);
77
78 auto allocate = [this](int node, int tensor) -> TfLiteStatus {
79 if (alloc_node_[tensor] != kNodeNotAssigned) {
80 // Tensor has already been allocated.
81 return kTfLiteOk;
82 }
83 TF_LITE_ENSURE(context_, dealloc_node_[tensor] == kNodeNotAssigned);
84 alloc_node_[tensor] = node;
85 return kTfLiteOk;
86 };
87
88 auto deallocate = [this](int node, int tensor) -> TfLiteStatus {
89 if (alloc_node_[tensor] == kNodeNotAssigned) {
90 // We don't need to deallocate the tensor, that is never allocated.
91 // This happened with the constant tensors.
92 return kTfLiteOk;
93 }
94 TF_LITE_ENSURE(context_, dealloc_node_[tensor] == kNodeNotAssigned);
95 dealloc_node_[tensor] = node;
96 return kTfLiteOk;
97 };
98
99 // We must make sure the output tensors are never overwritten. We do that by
100 // artificially adding one to their ref-counts so they are never selected
101 // for deallocation.
102 for (int tensor_index : graph_info_->outputs()) {
103 refcounts[tensor_index]++;
104 }
105
106 // Variable tensors also should be ensured to be never overwritten and need to
107 // be alive all the time.
108 for (int tensor_index : graph_info_->variables()) {
109 // Increase the reference count for variable tensors by one, so it will
110 // never be deallocated.
111 refcounts[tensor_index]++;
112 // `variables` is a subgraph-level list and it should never be
113 // kTfLiteOptionalTensor.
114 TF_LITE_ENSURE(context_, tensor_index != kTfLiteOptionalTensor);
115 // Variable tensor should be allocated at the very beginning.
116 TF_LITE_ENSURE_STATUS(allocate(0, tensor_index));
117 }
118
119 // Queue all graph inputs for allocation and make sure they are never
120 // overwritten.
121 for (int tensor_index : graph_info_->inputs()) {
122 if (tensor_index != kTfLiteOptionalTensor) {
123 refcounts[tensor_index]++;
124 TF_LITE_ENSURE_STATUS(allocate(0, tensor_index));
125 }
126 }
127
128 // Count references to node input tensors.
129 for (size_t i = 0; i < graph_info_->num_execution_nodes(); ++i) {
130 const TfLiteNode& node = graph_info_->node(i);
131 TfLiteIntArray* node_inputs = node.inputs;
132 for (int j = 0; j < node_inputs->size; ++j) {
133 int tensor_index = node_inputs->data[j];
134 if (tensor_index != kTfLiteOptionalTensor) {
135 refcounts[tensor_index]++;
136 }
137 }
138 }
139
140 // Go through the graph in execution order.
141 for (size_t i = 0; i < graph_info_->num_execution_nodes(); ++i) {
142 const TfLiteNode& node = graph_info_->node(i);
143
144 // First queue output tensors for allocation.
145 TfLiteIntArray* node_outputs = node.outputs;
146 for (int j = 0; j < node_outputs->size; ++j) {
147 int tensor_index = node_outputs->data[j];
148 TF_LITE_ENSURE_STATUS(allocate(i, tensor_index));
149 }
150
151 // Then update the ref-counts of the node's inputs, and if necessary queue
152 // them for deallocation.
153 TfLiteIntArray* node_inputs = node.inputs;
154 for (int j = 0; j < node_inputs->size; ++j) {
155 int tensor_index = node_inputs->data[j];
156 if (tensor_index != kTfLiteOptionalTensor) {
157 refcounts[tensor_index]--;
158 if (refcounts[tensor_index] == 0) {
159 TF_LITE_ENSURE_STATUS(deallocate(i, tensor_index));
160 }
161 }
162 }
163 }
164
165 // Note that graph outputs will never be scheduled for deallocation. We
166 // could do that here for completeness, but it won't have any effect.
167 return kTfLiteOk;
168 }
169
ExecuteAllocations(int first_node,int last_node)170 TfLiteStatus SimplePlanner::ExecuteAllocations(int first_node, int last_node) {
171 alloc_node_.resize(graph_info_->num_tensors(), kNodeNotAssigned);
172 dealloc_node_.resize(graph_info_->num_tensors(), kNodeNotAssigned);
173 allocs_.resize(graph_info_->num_tensors());
174 // Set allocation and deallocation for temporary tensors.
175 for (size_t i = first_node; i <= static_cast<size_t>(last_node) &&
176 i < graph_info_->num_execution_nodes();
177 ++i) {
178 const TfLiteNode& node = graph_info_->node(i);
179 TfLiteIntArray* node_temporaries = node.temporaries;
180 for (int j = 0; j < node_temporaries->size; ++j) {
181 int tensor_index = node_temporaries->data[j];
182 alloc_node_[tensor_index] = i;
183 dealloc_node_[tensor_index] = i;
184 }
185 }
186
187 // Conduct the planned allocations.
188 for (int i = 0; i < static_cast<int>(graph_info_->num_tensors()); ++i) {
189 bool allocated = false;
190 if (alloc_node_[i] >= first_node && alloc_node_[i] <= last_node) {
191 TfLiteTensor& tensor = *graph_info_->tensor(i);
192 if (tensor.allocation_type == kTfLiteArenaRw) {
193 if (allocs_[i].size != 0) {
194 allocs_[i].free();
195 }
196 allocated = allocs_[i].alloc(tensor.bytes, alloc_node_[i]);
197 } else if (tensor.allocation_type == kTfLiteArenaRwPersistent &&
198 allocs_[i].size == 0) {
199 allocated = allocs_[i].alloc(tensor.bytes, alloc_node_[i]);
200 }
201 }
202 if (allocated) {
203 TF_LITE_ENSURE_STATUS(ResolveTensorAllocation(i));
204 }
205 }
206 // TODO(b/191631156): Dealloc node if it's not needed.
207
208 return kTfLiteOk;
209 }
210
ReleaseNonPersistentMemory()211 TfLiteStatus SimplePlanner::ReleaseNonPersistentMemory() {
212 // Set data pointers for all non-persistent tensors to nullptr.
213 for (int i = 0; i < static_cast<int>(graph_info_->num_tensors()); ++i) {
214 TfLiteTensor& tensor = *graph_info_->tensor(i);
215 if (tensor.allocation_type == kTfLiteArenaRw) {
216 allocs_[i].free();
217 tensor.data.raw = nullptr;
218 }
219 }
220 return kTfLiteOk;
221 }
222
AcquireNonPersistentMemory()223 TfLiteStatus SimplePlanner::AcquireNonPersistentMemory() {
224 // Resolve allocations for all tensors not on the persistent arena.
225 for (int i = 0; i < static_cast<int>(graph_info_->num_tensors()); ++i) {
226 TfLiteTensor& tensor = *graph_info_->tensor(i);
227 if (tensor.allocation_type == kTfLiteArenaRw) {
228 TF_LITE_ENSURE_STATUS(ResolveTensorAllocation(i));
229 }
230 }
231 return kTfLiteOk;
232 }
233
ResolveTensorAllocation(int tensor_index)234 TfLiteStatus SimplePlanner::ResolveTensorAllocation(int tensor_index) {
235 TfLiteTensor& tensor = *graph_info_->tensor(tensor_index);
236 if (tensor.allocation_type == kTfLiteArenaRw) {
237 // Skip resolution if the size of the tensor is zero, leaving it as a
238 // nullptr.
239 if (allocs_[tensor_index].size != 0) {
240 tensor.data.raw = allocs_[tensor_index].ptr;
241 }
242 }
243 if (tensor.allocation_type == kTfLiteArenaRwPersistent) {
244 tensor.data.raw = allocs_[tensor_index].ptr;
245 }
246 return kTfLiteOk;
247 }
248
249 } // namespace tflite
250