1 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_MEMORY_MANAGEMENT_EQUALITY_ASSIGNMENT_H_
17 #define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_MEMORY_MANAGEMENT_EQUALITY_ASSIGNMENT_H_
18
19 #include <stddef.h>
20
21 #include <cstddef>
22 #include <queue>
23 #include <vector>
24
25 #include "absl/container/flat_hash_map.h"
26 #include "tensorflow/lite/delegates/gpu/common/memory_management/internal.h"
27 #include "tensorflow/lite/delegates/gpu/common/memory_management/types.h"
28 #include "tensorflow/lite/delegates/gpu/common/status.h"
29
30 namespace tflite {
31 namespace gpu {
32
33 // Fast version of Equality Assignments for hashable types.
34 template <typename TensorSizeT>
EqualityAssignmentWithHash(const std::vector<TensorUsageRecord<TensorSizeT>> & usage_records,ObjectsAssignment<TensorSizeT> * assignment)35 absl::Status EqualityAssignmentWithHash(
36 const std::vector<TensorUsageRecord<TensorSizeT>>& usage_records,
37 ObjectsAssignment<TensorSizeT>* assignment) {
38 size_t num_records = usage_records.size();
39 assignment->object_sizes.clear();
40 assignment->object_ids.assign(num_records, kNotAssigned);
41
42 // Pool is a map with size as a key and vector with ids of free shared objects
43 // of this size as a value.
44 absl::flat_hash_map<TensorSizeT, std::vector<size_t>> pool;
45 std::priority_queue<QueueRecord> objects_in_use;
46 for (size_t i = 0; i < num_records; ++i) {
47 // Pop from the queue and add to the pool all objects that are no longer
48 // in use at the time of execution of the first_task of i-th intermediate
49 // tensor.
50 while (!objects_in_use.empty() &&
51 objects_in_use.top().last_task < usage_records[i].first_task) {
52 auto object_id = objects_in_use.top().object_id;
53 pool[assignment->object_sizes[object_id]].push_back(object_id);
54 objects_in_use.pop();
55 }
56
57 const TensorSizeT tensor_size = usage_records[i].tensor_size;
58 auto pool_it = pool.find(tensor_size);
59 if (pool_it == pool.end() || pool_it->second.empty()) {
60 // No free shared object with size equal to tensor_size. Create a new one,
61 // assign i-th tensor to it and add to the queue of objects in use.
62 assignment->object_ids[i] = assignment->object_sizes.size();
63 assignment->object_sizes.push_back(tensor_size);
64 objects_in_use.push(
65 {usage_records[i].last_task, assignment->object_ids[i]});
66 } else {
67 // Shared object with id it->second has size equal to tensor_size. Reuse
68 // this object: erase it from pool and add to the queue of objects in use.
69 assignment->object_ids[i] = pool_it->second.back();
70 pool_it->second.pop_back();
71 objects_in_use.push(
72 {usage_records[i].last_task, assignment->object_ids[i]});
73 }
74 }
75 return absl::OkStatus();
76 }
77
78 // Slower version of Equality Assignments for unhashable types.
79 template <typename TensorSizeT>
EqualityAssignment(const std::vector<TensorUsageRecord<TensorSizeT>> & usage_records,ObjectsAssignment<TensorSizeT> * assignment)80 absl::Status EqualityAssignment(
81 const std::vector<TensorUsageRecord<TensorSizeT>>& usage_records,
82 ObjectsAssignment<TensorSizeT>* assignment) {
83 size_t num_records = usage_records.size();
84 assignment->object_sizes.clear();
85 assignment->object_ids.assign(num_records, kNotAssigned);
86
87 // Index of operation, after execution of which the shared object can be
88 // deallocated.
89 std::vector<size_t> dealloc_task;
90 for (size_t i = 0; i < num_records; ++i) {
91 const TensorSizeT tensor_size = usage_records[i].tensor_size;
92 size_t best_obj = kNotAssigned;
93 for (size_t obj = 0; obj < assignment->object_sizes.size(); ++obj) {
94 // Find a shared object, that has equal size with current tensor and has
95 // been deallocated before the execution of its first_task.
96 if (dealloc_task[obj] < usage_records[i].first_task &&
97 assignment->object_sizes[obj] == tensor_size) {
98 best_obj = obj;
99 break;
100 }
101 }
102 if (best_obj == kNotAssigned) {
103 // No free shared object with size equal to tensor_size. Create a new one,
104 // assign i-th tensor to it and save its last task as deallocation task.
105 assignment->object_ids[i] = assignment->object_sizes.size();
106 assignment->object_sizes.push_back(tensor_size);
107 dealloc_task.push_back(usage_records[i].last_task);
108 } else {
109 // Shared object with id it->second has size equal to tensor_size. Reuse
110 // this object and update its deallocation task.
111 assignment->object_ids[i] = best_obj;
112 dealloc_task[best_obj] = usage_records[i].last_task;
113 }
114 }
115 return absl::OkStatus();
116 }
117
118 } // namespace gpu
119 } // namespace tflite
120
121 #endif // TENSORFLOW_LITE_DELEGATES_GPU_COMMON_MEMORY_MANAGEMENT_EQUALITY_ASSIGNMENT_H_
122