• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_MEMORY_MANAGEMENT_TYPES_H_
17 #define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_MEMORY_MANAGEMENT_TYPES_H_
18 
19 #include <stddef.h>
20 
21 #include <cstddef>
22 #include <vector>
23 
24 namespace tflite {
25 namespace gpu {
26 
27 using TaskId = size_t;
28 using UsageGraph = std::vector<std::vector<size_t>>;
29 
30 // Record, containing tensor size/shape and IDs of the first and the last task,
31 // that use this tensor as input or output. For example: tensor #3 with size
32 // tensor_size=65536 is first introduced in program #2 (first_task=2) and used
33 // for the last time in program #7 (last_task=7).
34 template <typename TensorSizeT>
35 struct TensorUsageRecord {
36   TensorSizeT tensor_size;
37   TaskId first_task;
38   TaskId last_task;
39 
TensorUsageRecordTensorUsageRecord40   TensorUsageRecord(TensorSizeT size, TaskId first, TaskId last)
41       : tensor_size(size), first_task(first), last_task(last) {}
42 
43   // Default order of tensor usage records is increasing order of first_task.
44   bool operator<(const TensorUsageRecord<TensorSizeT>& other) const {
45     return first_task < other.first_task;
46   }
47 };
48 
49 // Information about assignment of tensors to shared objects
50 template <typename TensorSizeT>
51 struct ObjectsAssignment {
52   // shared_object_ids_[i] is ID of shared object, that tensor i will be using.
53   std::vector<size_t> object_ids;
54   // shared_object_sizes_[i] is a size of shared object with ID equal to i.
55   std::vector<TensorSizeT> object_sizes;
56 };
57 
58 // Information about assignment of tensors to offsets for the case, when all of
59 // them are going to be allocated in one continuous memory block.
60 struct OffsetsAssignment {
61   std::vector<size_t> offsets;
62   size_t total_size;
63 };
64 
65 // This function takes the graph of tensor dependencies as an input and returns
66 // reallocation graph as an output. Tensor dependencies graph is a directed
67 // graph, with edge x->y existing if and only if tensor x is used for
68 // calculating of tensor y. This graph can be generated with following
69 // pseudocode: for op in operations do
70 //   for input_tensor in op.input_tensors do
71 //       for output_tensor in op.output_tensors do
72 //         if both input_tensor and output_tensor are intermediate tensors then
73 //           deps_graph[input_tensor].push_back(output_tensor)
74 // Reallocation graph is an undirected graph, that has edge x<->y if and only if
75 // tensors x and y can share memory in ANY order of operations parallel
76 // execution.
77 UsageGraph ReallocationGraph(const UsageGraph& deps_graph);
78 
79 }  // namespace gpu
80 }  // namespace tflite
81 
82 #endif  // TENSORFLOW_LITE_DELEGATES_GPU_COMMON_MEMORY_MANAGEMENT_TYPES_H_
83