• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_MEMORY_MANAGEMENT_H_
17 #define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_MEMORY_MANAGEMENT_H_
18 
19 #include <stddef.h>
20 
21 #include <vector>
22 
23 #include "absl/memory/memory.h"
24 #include "tensorflow/lite/delegates/gpu/common/memory_management/equality_assignment.h"
25 #include "tensorflow/lite/delegates/gpu/common/memory_management/naive_assignment.h"
26 #include "tensorflow/lite/delegates/gpu/common/memory_management/types.h"
27 #include "tensorflow/lite/delegates/gpu/common/shape.h"
28 #include "tensorflow/lite/delegates/gpu/common/status.h"
29 #include "tensorflow/lite/delegates/gpu/common/types.h"
30 
31 namespace tflite {
32 namespace gpu {
33 
34 using TaskId = size_t;
35 
36 // Converts given assignment of tensors to shared objects to the assignment of
37 // the same tensors to offsets in continuous memory block.
38 OffsetsAssignment ObjectsToOffsets(
39     const ObjectsAssignment<size_t>& obj_assignment);
40 
41 enum class MemoryStrategy {
42   // Naive strategy is to allocate each object separately.
43   // Can be useful for debugging to see all intermediate outputs.
44   NAIVE,
45 
46   // Equality strategy allows to reuse the same part of memory for several
47   // tensors with the same size, but non-intersecting usage intervals.
48   EQUALITY,
49 
50   // Greedy strategy uses greedy algorithm, iterating through all the tensors in
51   // order of their first_task, to reuse memory from tensors, that
52   // won't be used anymore, for new ones.
53   GREEDY_IN_ORDER,
54 
55   // Greedy by size strategy uses greedy algorithm, iterating through all the
56   // tasks in non-increasing of their breadth, and calculating allocations for
57   // tensors used in these tasks. By breadth of the task we understand sum of
58   // sizes of all tensors in its TaskProfile.
59   GREEDY_BY_BREADTH,
60 
61   // Greedy by size strategy uses greedy algorithm, iterating through all the
62   // tensors in non-increasing of their size, to reuse memory from tensors, that
63   // won't be used anymore, for new ones.
64   GREEDY_BY_SIZE,
65 
66   // Choose greedy strategy from several fast algorithms, that provides best
67   // memory allocation for the given usage records.
68   GREEDY_BEST,
69 
70   // Mincostflow strategy consists of building auxiliary flow graph and solving
71   // the minimum-cost flow problem in it. In the end edges with zero residual
72   // capacity determine assignment of shared objects to tensors.
73   MINCOSTFLOW,
74 };
75 
76 // Chooses greedy algorithm with the lowest memory consumption for given usage
77 // records and returns corresponding shared objects assignment.
78 absl::Status BestGreedy(
79     const std::vector<TensorUsageRecord<size_t>>& usage_records,
80     ObjectsAssignment<size_t>* assignment);
81 
82 // Calculates the assignment of shared objects to given tensors, including
83 // objects' sizes. Below there are specializations for different types, that
84 // support more memory strategies.
85 // If reallocation_graph is provided, assignment of shared objects support
86 // parallel order of operation execution, but memory consumption in this case
87 // can be larger. Currently only GREEDY_IN_ORDER strategy can use this
88 // reallocation_graph.
89 template <typename TensorSizeT>
90 absl::Status AssignObjectsToTensors(
91     const std::vector<TensorUsageRecord<TensorSizeT>>& usage_records,
92     MemoryStrategy strategy, ObjectsAssignment<TensorSizeT>* assignment,
93     const UsageGraph* reallocation_graph = nullptr) {
94   switch (strategy) {
95     case MemoryStrategy::NAIVE:
96       return NaiveAssignment(usage_records, assignment);
97     case MemoryStrategy::EQUALITY:
98       return EqualityAssignment(usage_records, assignment);
99     default:
100       return absl::InternalError(
101           "MemoryStrategy is not supported with current tensor size type.");
102   }
103   return absl::OkStatus();
104 }
105 
106 template <>
107 absl::Status AssignObjectsToTensors(
108     const std::vector<TensorUsageRecord<size_t>>& usage_records,
109     MemoryStrategy strategy, ObjectsAssignment<size_t>* assignment,
110     const UsageGraph* reallocation_graph);
111 
112 template <>
113 absl::Status AssignObjectsToTensors(
114     const std::vector<TensorUsageRecord<BHWC>>& usage_records,
115     MemoryStrategy strategy, ObjectsAssignment<BHWC>* assignment,
116     const UsageGraph* reallocation_graph);
117 
118 template <>
119 absl::Status AssignObjectsToTensors(
120     const std::vector<TensorUsageRecord<uint2>>& usage_records,
121     MemoryStrategy strategy, ObjectsAssignment<uint2>* assignment,
122     const UsageGraph* reallocation_graph);
123 
124 template <>
125 absl::Status AssignObjectsToTensors(
126     const std::vector<TensorUsageRecord<uint3>>& usage_records,
127     MemoryStrategy strategy, ObjectsAssignment<uint3>* assignment,
128     const UsageGraph* reallocation_graph);
129 
130 // Calculates the assignment of tensors to offsets, considering those tensors
131 // are going to be allocated in one continuous memory block.
132 absl::Status AssignOffsetsToTensors(
133     const std::vector<TensorUsageRecord<size_t>>& usage_records,
134     const MemoryStrategy& strategy, OffsetsAssignment* assignment,
135     const UsageGraph* reallocation_graph = nullptr);
136 
137 }  // namespace gpu
138 }  // namespace tflite
139 
140 #endif  // TENSORFLOW_LITE_DELEGATES_GPU_COMMON_MEMORY_MANAGEMENT_H_
141