• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include "tensorflow/lite/arena_planner.h"
16 #include <utility>
17 
18 namespace tflite {
19 
20 struct AllocationInfo {
21   // The node index requesting this allocation.
22   int node;
23   // The tensor index to be allocated or deallocated.
24   int tensor;
25   // Whether to allocate or deallocate
26   enum Type { ALLOC, DEALLOC } type;
27 };
28 
ArenaPlanner(TfLiteContext * context,std::unique_ptr<GraphInfo> graph_info,bool preserve_inputs,bool preserve_intermediates,int tensor_alignment)29 ArenaPlanner::ArenaPlanner(TfLiteContext* context,
30                            std::unique_ptr<GraphInfo> graph_info,
31                            bool preserve_inputs, bool preserve_intermediates,
32                            int tensor_alignment)
33     : context_(context),
34       graph_info_(std::move(graph_info)),
35       arena_(kDefaultArenaAlignment),
36       persistent_arena_(kDefaultArenaAlignment),
37       preserve_inputs_(preserve_inputs),
38       preserve_intermediates_(preserve_intermediates),
39       tensor_alignment_(tensor_alignment) {}
40 
~ArenaPlanner()41 ArenaPlanner::~ArenaPlanner() {}
42 
BasePointer(TfLiteAllocationType type)43 int64_t ArenaPlanner::BasePointer(TfLiteAllocationType type) {
44   if (type == kTfLiteArenaRwPersistent) {
45     return persistent_arena_.BasePointer();
46   }
47   if (type == kTfLiteArenaRw) {
48     return arena_.BasePointer();
49   }
50   return 0;
51 }
52 
ResetAllocations()53 TfLiteStatus ArenaPlanner::ResetAllocations() {
54   TF_LITE_ENSURE_STATUS(arena_.Clear());
55   TF_LITE_ENSURE_STATUS(persistent_arena_.Clear());
56   allocs_.clear();
57   allocs_.resize(graph_info_->num_tensors());
58   // Note that we only clear the alloc_queue_ when re-planning allocations, as
59   // it should only change when the graph topology itself changes.
60   return kTfLiteOk;
61 }
62 
PlanAllocations()63 TfLiteStatus ArenaPlanner::PlanAllocations() {
64   // Invalidate any existing data.
65   TF_LITE_ENSURE_STATUS(ResetAllocations());
66   // The alloc_queue_ is specific to the graph topology, and will be
67   // completely reconstructed from graph data here.
68   alloc_queue_.clear();
69 
70   // Keeps track of references to each tensor.
71   std::vector<int> refcounts(graph_info_->num_tensors(), 0);
72   // `allocated` and `deallocated` are technically list of boolean values.
73   // We're saving the compiled binary size by using `vector<int>`.
74   std::vector<int> allocated(graph_info_->num_tensors(), false);
75   std::vector<int> deallocated(graph_info_->num_tensors(), false);
76 
77   auto allocate = [this, &allocated, &deallocated](int node,
78                                                    int tensor) -> TfLiteStatus {
79     if (allocated[tensor]) {
80       return kTfLiteOk;
81     }
82     TF_LITE_ENSURE(context_, !deallocated[tensor]);
83     alloc_queue_.push_back({node, tensor, AllocationInfo::ALLOC});
84     allocated[tensor] = true;
85     return kTfLiteOk;
86   };
87 
88   auto deallocate = [this, &allocated, &deallocated](
89                         int node, int tensor) -> TfLiteStatus {
90     if (!allocated[tensor]) {
91       // Do not enqueue a DEALLOC if the tensor is never allocated.
92       // This happened with the constant tensors.
93       return kTfLiteOk;
94     }
95     TF_LITE_ENSURE(context_, !deallocated[tensor]);
96     alloc_queue_.push_back({node, tensor, AllocationInfo::DEALLOC});
97     return kTfLiteOk;
98   };
99 
100   // There will be an entry in alloc_queue_ for the allocation of each tensor
101   // and another for their deallocation.
102   alloc_queue_.reserve(2 * graph_info_->num_tensors());
103 
104   // We must make sure the output tensors are never overwritten. We do that by
105   // artificially adding one to their ref-counts so they are never selected
106   // for deallocation.
107   for (int tensor_index : graph_info_->outputs()) {
108     refcounts[tensor_index]++;
109   }
110 
111   // Variable tensors should are also never overwritten and need to be alive all
112   // the time.
113   for (int tensor_index : graph_info_->variables()) {
114     refcounts[tensor_index]++;
115   }
116 
117   // Queue all graph inputs for allocation. If preserve_inputs_ is true, make
118   // sure they never be overwritten.
119   for (int tensor_index : graph_info_->inputs()) {
120     if (tensor_index != kOptionalTensor) {
121       if (preserve_inputs_) {
122         refcounts[tensor_index]++;
123       }
124       TF_LITE_ENSURE_STATUS(allocate(0, tensor_index));
125     }
126   }
127 
128   // Queue all graph variable tensors for allocation.
129   for (int tensor_index : graph_info_->variables()) {
130     if (tensor_index != kOptionalTensor) {
131       // Increase the reference count for input tensors by one, so it will
132       // never be deallocated.
133       TF_LITE_ENSURE_STATUS(allocate(0, tensor_index));
134     }
135   }
136 
137   // Count references to node input tensors.
138   for (int i = 0; i < graph_info_->num_nodes(); ++i) {
139     const TfLiteNode& node = graph_info_->node(i);
140     TfLiteIntArray* node_inputs = node.inputs;
141     for (int j = 0; j < node_inputs->size; ++j) {
142       int tensor_index = node_inputs->data[j];
143       if (tensor_index != kOptionalTensor) {
144         refcounts[tensor_index]++;
145       }
146     }
147   }
148 
149   // Queue all graph inputs for allocation.
150   for (int tensor_index : graph_info_->inputs()) {
151     if (tensor_index != kOptionalTensor) {
152       TF_LITE_ENSURE_STATUS(allocate(0, tensor_index));
153     }
154   }
155   // Go through the graph in execution order.
156   for (int i = 0; i < graph_info_->num_nodes(); ++i) {
157     const TfLiteNode& node = graph_info_->node(i);
158 
159     // First queue output tensors for allocation.
160     TfLiteIntArray* node_outputs = node.outputs;
161     for (int j = 0; j < node_outputs->size; ++j) {
162       int tensor_index = node_outputs->data[j];
163       TF_LITE_ENSURE_STATUS(allocate(i, tensor_index));
164     }
165 
166     // Then update the ref-counts of the node's inputs, and if necessary queue
167     // them for deallocation.
168     if (!preserve_intermediates_) {
169       TfLiteIntArray* node_inputs = node.inputs;
170       for (int j = 0; j < node_inputs->size; ++j) {
171         int tensor_index = node_inputs->data[j];
172         if (tensor_index != kOptionalTensor) {
173           refcounts[tensor_index]--;
174           if (refcounts[tensor_index] == 0) {
175             TF_LITE_ENSURE_STATUS(deallocate(i, tensor_index));
176           }
177         }
178       }
179     }
180   }
181 
182   // Note that graph outputs will never be scheduled for deallocation. We
183   // could do that here for completeness, but it won't have any effect.
184   return kTfLiteOk;
185 }
186 
ExecuteAllocations(int first_node,int last_node)187 TfLiteStatus ArenaPlanner::ExecuteAllocations(int first_node, int last_node) {
188   // Grow the size of `allocs_` if necessary. This allows allocating temporary
189   // tensors in op's `prepare` function.
190   TF_LITE_ENSURE(context_, graph_info_->num_tensors() >= allocs_.size());
191   allocs_.resize(graph_info_->num_tensors());
192 
193   TF_LITE_ENSURE_STATUS(CalculateAllocations(first_node, last_node));
194   TF_LITE_ENSURE_STATUS(Commit());
195 
196   for (int i = 0; i < graph_info_->num_tensors(); ++i) {
197     // TODO(ahentz): we could do this only for the tensors that were modified
198     // in CalculateAllocations(), instead of redoing it for tensors that
199     // already had proper pointers. However we must be very careful, because
200     // SimpleMemoryArena::Commit() could move the base pointer.
201     TF_LITE_ENSURE_STATUS(ResolveTensorAllocation(i));
202   }
203 
204   return kTfLiteOk;
205 }
206 
Commit()207 TfLiteStatus ArenaPlanner::Commit() {
208   TF_LITE_ENSURE_STATUS(arena_.Commit(context_));
209   TF_LITE_ENSURE_STATUS(persistent_arena_.Commit(context_));
210   return kTfLiteOk;
211 }
212 
CalculateAllocations(int first_node,int last_node)213 TfLiteStatus ArenaPlanner::CalculateAllocations(int first_node, int last_node) {
214   int active_node = first_node;
215   // When dynamic tensors are present this method is called multiple times.
216   // The items in the alloc_queue_ referring to nodes before first_node were
217   // processed previously and should be skipped. Entries after last_node are
218   // not yet ready to be handled.
219   for (const auto& alloc_info : alloc_queue_) {
220     if (alloc_info.node < first_node) continue;
221     if (alloc_info.node > last_node) break;
222     if (alloc_info.node == active_node) {
223       // This is the first allocation/deallocation for a given node.  It is
224       // time to deallocate the previous temporaries and allocate new ones.
225       if (active_node != first_node) {
226         TF_LITE_ENSURE_STATUS(
227             CalculateDeallocationOfInternalTensors(active_node - 1));
228       }
229       TF_LITE_ENSURE_STATUS(CalculateAllocationOfInternalTensors(active_node));
230       ++active_node;
231     }
232     // Handle the current item.
233     if (alloc_info.type == AllocationInfo::ALLOC) {
234       TF_LITE_ENSURE_STATUS(CalculateTensorAllocation(alloc_info.tensor));
235     } else {
236       TF_LITE_ENSURE_STATUS(CalculateTensorDeallocation(alloc_info.tensor));
237     }
238   }
239 
240   // Don't forget to deallocate temporaries of last node.
241   TF_LITE_ENSURE_STATUS(
242       CalculateDeallocationOfInternalTensors(active_node - 1));
243 
244   return kTfLiteOk;
245 }
246 
ResolveTensorAllocation(int tensor_index)247 TfLiteStatus ArenaPlanner::ResolveTensorAllocation(int tensor_index) {
248   TfLiteTensor& tensor = *graph_info_->tensor(tensor_index);
249   if (tensor.allocation_type == kTfLiteArenaRw) {
250     // Skip resolution if the size of the tensor is zero, leaving it as a
251     // nullptr.
252     if (allocs_[tensor_index].size != 0) {
253       TF_LITE_ENSURE_STATUS(arena_.ResolveAlloc(context_, allocs_[tensor_index],
254                                                 &tensor.data.raw));
255     }
256   }
257   if (tensor.allocation_type == kTfLiteArenaRwPersistent) {
258     TF_LITE_ENSURE_STATUS(persistent_arena_.ResolveAlloc(
259         context_, allocs_[tensor_index], &tensor.data.raw));
260   }
261   return kTfLiteOk;
262 }
263 
CalculateTensorAllocation(int tensor_index)264 TfLiteStatus ArenaPlanner::CalculateTensorAllocation(int tensor_index) {
265   TfLiteTensor& tensor = *graph_info_->tensor(tensor_index);
266   if (tensor.allocation_type == kTfLiteArenaRw) {
267     TF_LITE_ENSURE_STATUS(arena_.Allocate(
268         context_, tensor_alignment_, tensor.bytes, &allocs_[tensor_index]));
269   }
270   if (tensor.allocation_type == kTfLiteArenaRwPersistent) {
271     TF_LITE_ENSURE_STATUS(persistent_arena_.Allocate(
272         context_, tensor_alignment_, tensor.bytes, &allocs_[tensor_index]));
273   }
274   return kTfLiteOk;
275 }
276 
CalculateTensorDeallocation(int tensor_index)277 TfLiteStatus ArenaPlanner::CalculateTensorDeallocation(int tensor_index) {
278   TfLiteTensor& tensor = *graph_info_->tensor(tensor_index);
279   if (tensor.allocation_type == kTfLiteArenaRw) {
280     TF_LITE_ENSURE_STATUS(arena_.Deallocate(context_, allocs_[tensor_index]));
281   }
282   return kTfLiteOk;
283 }
284 
CalculateAllocationOfInternalTensors(int node_index)285 TfLiteStatus ArenaPlanner::CalculateAllocationOfInternalTensors(
286     int node_index) {
287   if (node_index < graph_info_->num_nodes()) {
288     const TfLiteNode& node = graph_info_->node(node_index);
289     TfLiteIntArray* node_temporaries = node.temporaries;
290     for (int i = 0; i < node_temporaries->size; ++i) {
291       int tensor_index = node_temporaries->data[i];
292       TF_LITE_ENSURE_STATUS(CalculateTensorAllocation(tensor_index));
293     }
294   }
295   return kTfLiteOk;
296 }
297 
CalculateDeallocationOfInternalTensors(int node_index)298 TfLiteStatus ArenaPlanner::CalculateDeallocationOfInternalTensors(
299     int node_index) {
300   if (node_index < graph_info_->num_nodes()) {
301     const TfLiteNode& node = graph_info_->node(node_index);
302     TfLiteIntArray* node_temporaries = node.temporaries;
303     for (int i = 0; i < node_temporaries->size; ++i) {
304       int tensor_index = node_temporaries->data[i];
305       TF_LITE_ENSURE_STATUS(CalculateTensorDeallocation(tensor_index));
306     }
307   }
308   return kTfLiteOk;
309 }
310 
311 }  // namespace tflite
312