• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #ifndef TENSORFLOW_LITE_ARENA_PLANNER_H_
16 #define TENSORFLOW_LITE_ARENA_PLANNER_H_
17 
18 #include <memory>
19 #include <vector>
20 
21 #include "tensorflow/lite/c/c_api_internal.h"
22 #include "tensorflow/lite/graph_info.h"
23 #include "tensorflow/lite/memory_planner.h"
24 #include "tensorflow/lite/simple_memory_arena.h"
25 
26 namespace tflite {
27 
28 // Memory allocation tuning
29 constexpr const int kDefaultArenaAlignment = 64;
30 constexpr const int kDefaultTensorAlignment = 64;
31 
32 struct AllocationInfo;
33 
34 // A memory planner that makes all the allocations using arenas.
35 //
36 // Before a model is executed by the interpreter, this class determines when
37 // each tensor needs to be allocated and deallocated, and preallocates all the
38 // necessary memory (the PlanAllocations phase). It then assigns portions of
39 // this memory buffer to each tensor (the ExecuteAllocations phase). Tensors may
40 // share some of the buffer if a tensor B is to be allocated after another
41 // tensor A has been deallocated.
42 //
43 // If dynamic tensors are used the planning steps can be repeated during model
44 // execution. Since dynamic tensors don't have sizes until after the
45 // corresponding operation is executed, this class supports incremental
46 // planning.
47 class ArenaPlanner : public MemoryPlanner {
48  public:
49   // Ownership of 'context' is not taken and it must remain util the
50   // ArenaPlanner is destroyed. If 'preserve_inputs' is true the inputs to the
51   // graph will not share memory with any other tensor, effectively preserving
52   // them until the end of inference.
53   ArenaPlanner(TfLiteContext* context, std::unique_ptr<GraphInfo> graph_info,
54                bool preserve_inputs, bool preserve_intermediates,
55                int tensor_alignment = kDefaultTensorAlignment);
56   ~ArenaPlanner() override;
57   ArenaPlanner(const ArenaPlanner&) = delete;
58   ArenaPlanner& operator=(const ArenaPlanner&) = delete;
59 
60   TfLiteStatus ResetAllocations() override;
61   TfLiteStatus PlanAllocations() override;
62   TfLiteStatus ExecuteAllocations(int first_node, int last_node) override;
63 
64   // Returns the base arena location for a given allocation type.
65   int64_t BasePointer(TfLiteAllocationType type);
66 
67  private:
68   // Make sure all the arenas have reserved enough memory to store all their
69   // tensors.
70   TfLiteStatus Commit();
71 
72   // Traverse the allocation queue and reserve space in the appropriate arena
73   // for all tensors affected by ops in the interval [first_node, last_node].
74   TfLiteStatus CalculateAllocations(int first_node, int last_node);
75 
76   // Assign absolute memory location to a tensor, based on its relative
77   // position inside the corresponding arena buffer.
78   TfLiteStatus ResolveTensorAllocation(int tensor_index);
79 
80   // Register an allocation for the given tensor.
81   TfLiteStatus CalculateTensorAllocation(int tensor_index);
82 
83   // Register a deallocation for the given tensor.
84   TfLiteStatus CalculateTensorDeallocation(int tensor_index);
85 
86   // Register an allocation for all internal (temporary) tensors of
87   // 'node_index'.
88   TfLiteStatus CalculateAllocationOfInternalTensors(int node_index);
89 
90   // Register a deallocation for all internal (temporary) tensors of
91   // 'node_index'.
92   TfLiteStatus CalculateDeallocationOfInternalTensors(int node_index);
93 
94   TfLiteContext* context_;
95   std::unique_ptr<GraphInfo> graph_info_;
96 
97   // Stores allocation data for all tensors.
98   std::vector<ArenaAlloc> allocs_;
99 
100   // A chronological list of instructions to allocated and deallocate tensors,
101   // reflecting the way they are used in the graph.
102   std::vector<AllocationInfo> alloc_queue_;
103 
104   // Raw memory buffer that is allocated for all temporary and graph outputs.
105   // that are declared kTfLiteArenaRw.
106   SimpleMemoryArena arena_;
107 
108   // Raw memory buffer that is allocated for persistent tensors that are
109   // declared as kTfLiteArenaRwPersistent.
110   SimpleMemoryArena persistent_arena_;
111 
112   // Ensure that the memory self-allocated for inputs is never reused by the
113   // allocator. This allows for example, multiple runs without getting
114   // unpredictable results.
115   bool preserve_inputs_;
116 
117   // If true, then no overlapping of memory areas is done, meaning intermediates
118   // results can be queried after running (modulo running delegates).
119   bool preserve_intermediates_;
120 
121   // Number of bytes that tensor buffers should be aligned to.
122   int tensor_alignment_;
123 };
124 
125 }  // namespace tflite
126 
127 #endif  // TENSORFLOW_LITE_ARENA_PLANNER_H_
128