1 /**************************************************************************
2 *
3 * Copyright 2007-2008 VMware, Inc.
4 * Copyright 2015 Advanced Micro Devices, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 #include "pb_cache.h"
30 #include "util/u_memory.h"
31 #include "util/u_time.h"
32
33
34 /**
35 * Actually destroy the buffer.
36 */
37 static void
destroy_buffer_locked(struct pb_cache_entry * entry)38 destroy_buffer_locked(struct pb_cache_entry *entry)
39 {
40 struct pb_cache *mgr = entry->mgr;
41 struct pb_buffer *buf = entry->buffer;
42
43 assert(!pipe_is_referenced(&buf->reference));
44 if (entry->head.next) {
45 LIST_DEL(&entry->head);
46 assert(mgr->num_buffers);
47 --mgr->num_buffers;
48 mgr->cache_size -= buf->size;
49 }
50 mgr->destroy_buffer(buf);
51 }
52
53 /**
54 * Free as many cache buffers from the list head as possible.
55 */
56 static void
release_expired_buffers_locked(struct list_head * cache)57 release_expired_buffers_locked(struct list_head *cache)
58 {
59 struct list_head *curr, *next;
60 struct pb_cache_entry *entry;
61 int64_t now;
62
63 now = os_time_get();
64
65 curr = cache->next;
66 next = curr->next;
67 while (curr != cache) {
68 entry = LIST_ENTRY(struct pb_cache_entry, curr, head);
69
70 if (!os_time_timeout(entry->start, entry->end, now))
71 break;
72
73 destroy_buffer_locked(entry);
74
75 curr = next;
76 next = curr->next;
77 }
78 }
79
80 /**
81 * Add a buffer to the cache. This is typically done when the buffer is
82 * being released.
83 */
84 void
pb_cache_add_buffer(struct pb_cache_entry * entry)85 pb_cache_add_buffer(struct pb_cache_entry *entry)
86 {
87 struct pb_cache *mgr = entry->mgr;
88 struct list_head *cache = &mgr->buckets[entry->bucket_index];
89 struct pb_buffer *buf = entry->buffer;
90 unsigned i;
91
92 pipe_mutex_lock(mgr->mutex);
93 assert(!pipe_is_referenced(&buf->reference));
94
95 for (i = 0; i < ARRAY_SIZE(mgr->buckets); i++)
96 release_expired_buffers_locked(&mgr->buckets[i]);
97
98 /* Directly release any buffer that exceeds the limit. */
99 if (mgr->cache_size + buf->size > mgr->max_cache_size) {
100 mgr->destroy_buffer(buf);
101 pipe_mutex_unlock(mgr->mutex);
102 return;
103 }
104
105 entry->start = os_time_get();
106 entry->end = entry->start + mgr->usecs;
107 LIST_ADDTAIL(&entry->head, cache);
108 ++mgr->num_buffers;
109 mgr->cache_size += buf->size;
110 pipe_mutex_unlock(mgr->mutex);
111 }
112
113 /**
114 * \return 1 if compatible and can be reclaimed
115 * 0 if incompatible
116 * -1 if compatible and can't be reclaimed
117 */
118 static int
pb_cache_is_buffer_compat(struct pb_cache_entry * entry,pb_size size,unsigned alignment,unsigned usage)119 pb_cache_is_buffer_compat(struct pb_cache_entry *entry,
120 pb_size size, unsigned alignment, unsigned usage)
121 {
122 struct pb_cache *mgr = entry->mgr;
123 struct pb_buffer *buf = entry->buffer;
124
125 if (!pb_check_usage(usage, buf->usage))
126 return 0;
127
128 /* be lenient with size */
129 if (buf->size < size ||
130 buf->size > (unsigned) (mgr->size_factor * size))
131 return 0;
132
133 if (usage & mgr->bypass_usage)
134 return 0;
135
136 if (!pb_check_alignment(alignment, buf->alignment))
137 return 0;
138
139 return mgr->can_reclaim(buf) ? 1 : -1;
140 }
141
142 /**
143 * Find a compatible buffer in the cache, return it, and remove it
144 * from the cache.
145 */
146 struct pb_buffer *
pb_cache_reclaim_buffer(struct pb_cache * mgr,pb_size size,unsigned alignment,unsigned usage,unsigned bucket_index)147 pb_cache_reclaim_buffer(struct pb_cache *mgr, pb_size size,
148 unsigned alignment, unsigned usage,
149 unsigned bucket_index)
150 {
151 struct pb_cache_entry *entry;
152 struct pb_cache_entry *cur_entry;
153 struct list_head *cur, *next;
154 int64_t now;
155 int ret = 0;
156 struct list_head *cache = &mgr->buckets[bucket_index];
157
158 pipe_mutex_lock(mgr->mutex);
159
160 entry = NULL;
161 cur = cache->next;
162 next = cur->next;
163
164 /* search in the expired buffers, freeing them in the process */
165 now = os_time_get();
166 while (cur != cache) {
167 cur_entry = LIST_ENTRY(struct pb_cache_entry, cur, head);
168
169 if (!entry && (ret = pb_cache_is_buffer_compat(cur_entry, size,
170 alignment, usage) > 0))
171 entry = cur_entry;
172 else if (os_time_timeout(cur_entry->start, cur_entry->end, now))
173 destroy_buffer_locked(cur_entry);
174 else
175 /* This buffer (and all hereafter) are still hot in cache */
176 break;
177
178 /* the buffer is busy (and probably all remaining ones too) */
179 if (ret == -1)
180 break;
181
182 cur = next;
183 next = cur->next;
184 }
185
186 /* keep searching in the hot buffers */
187 if (!entry && ret != -1) {
188 while (cur != cache) {
189 cur_entry = LIST_ENTRY(struct pb_cache_entry, cur, head);
190 ret = pb_cache_is_buffer_compat(cur_entry, size, alignment, usage);
191
192 if (ret > 0) {
193 entry = cur_entry;
194 break;
195 }
196 if (ret == -1)
197 break;
198 /* no need to check the timeout here */
199 cur = next;
200 next = cur->next;
201 }
202 }
203
204 /* found a compatible buffer, return it */
205 if (entry) {
206 struct pb_buffer *buf = entry->buffer;
207
208 mgr->cache_size -= buf->size;
209 LIST_DEL(&entry->head);
210 --mgr->num_buffers;
211 pipe_mutex_unlock(mgr->mutex);
212 /* Increase refcount */
213 pipe_reference_init(&buf->reference, 1);
214 return buf;
215 }
216
217 pipe_mutex_unlock(mgr->mutex);
218 return NULL;
219 }
220
221 /**
222 * Empty the cache. Useful when there is not enough memory.
223 */
224 void
pb_cache_release_all_buffers(struct pb_cache * mgr)225 pb_cache_release_all_buffers(struct pb_cache *mgr)
226 {
227 struct list_head *curr, *next;
228 struct pb_cache_entry *buf;
229 unsigned i;
230
231 pipe_mutex_lock(mgr->mutex);
232 for (i = 0; i < ARRAY_SIZE(mgr->buckets); i++) {
233 struct list_head *cache = &mgr->buckets[i];
234
235 curr = cache->next;
236 next = curr->next;
237 while (curr != cache) {
238 buf = LIST_ENTRY(struct pb_cache_entry, curr, head);
239 destroy_buffer_locked(buf);
240 curr = next;
241 next = curr->next;
242 }
243 }
244 pipe_mutex_unlock(mgr->mutex);
245 }
246
247 void
pb_cache_init_entry(struct pb_cache * mgr,struct pb_cache_entry * entry,struct pb_buffer * buf,unsigned bucket_index)248 pb_cache_init_entry(struct pb_cache *mgr, struct pb_cache_entry *entry,
249 struct pb_buffer *buf, unsigned bucket_index)
250 {
251 memset(entry, 0, sizeof(*entry));
252 entry->buffer = buf;
253 entry->mgr = mgr;
254 entry->bucket_index = bucket_index;
255 }
256
257 /**
258 * Initialize a caching buffer manager.
259 *
260 * @param mgr The cache buffer manager
261 * @param usecs Unused buffers may be released from the cache after this
262 * time
263 * @param size_factor Declare buffers that are size_factor times bigger than
264 * the requested size as cache hits.
265 * @param bypass_usage Bitmask. If (requested usage & bypass_usage) != 0,
266 * buffer allocation requests are rejected.
267 * @param maximum_cache_size Maximum size of all unused buffers the cache can
268 * hold.
269 * @param destroy_buffer Function that destroys a buffer for good.
270 * @param can_reclaim Whether a buffer can be reclaimed (e.g. is not busy)
271 */
272 void
pb_cache_init(struct pb_cache * mgr,uint usecs,float size_factor,unsigned bypass_usage,uint64_t maximum_cache_size,void (* destroy_buffer)(struct pb_buffer * buf),bool (* can_reclaim)(struct pb_buffer * buf))273 pb_cache_init(struct pb_cache *mgr, uint usecs, float size_factor,
274 unsigned bypass_usage, uint64_t maximum_cache_size,
275 void (*destroy_buffer)(struct pb_buffer *buf),
276 bool (*can_reclaim)(struct pb_buffer *buf))
277 {
278 unsigned i;
279
280 for (i = 0; i < ARRAY_SIZE(mgr->buckets); i++)
281 LIST_INITHEAD(&mgr->buckets[i]);
282
283 pipe_mutex_init(mgr->mutex);
284 mgr->cache_size = 0;
285 mgr->max_cache_size = maximum_cache_size;
286 mgr->usecs = usecs;
287 mgr->num_buffers = 0;
288 mgr->bypass_usage = bypass_usage;
289 mgr->size_factor = size_factor;
290 mgr->destroy_buffer = destroy_buffer;
291 mgr->can_reclaim = can_reclaim;
292 }
293
294 /**
295 * Deinitialize the manager completely.
296 */
297 void
pb_cache_deinit(struct pb_cache * mgr)298 pb_cache_deinit(struct pb_cache *mgr)
299 {
300 pb_cache_release_all_buffers(mgr);
301 pipe_mutex_destroy(mgr->mutex);
302 }
303