1 /*
2 * Copyright © 2019 Raspberry Pi Ltd
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "v3dv_private.h"
25
26 #include <errno.h>
27 #include <sys/mman.h>
28
29 #include "drm-uapi/v3d_drm.h"
30 #include "util/u_memory.h"
31
32 /* Default max size of the bo cache, in MB.
33 *
34 * This value comes from testing different Vulkan application. Greater values
35 * didn't get any further performance benefit. This looks somewhat small, but
36 * from testing those applications, the main consumer of the bo cache are
37 * the bos used for the CLs, that are usually small.
38 */
39 #define DEFAULT_MAX_BO_CACHE_SIZE 64
40
41 /* Discarded to use a V3D_DEBUG for this, as it would mean adding a run-time
42 * check for most of the calls
43 */
44 static const bool dump_stats = false;
45
46 static void
bo_dump_stats(struct v3dv_device * device)47 bo_dump_stats(struct v3dv_device *device)
48 {
49 struct v3dv_bo_cache *cache = &device->bo_cache;
50
51 fprintf(stderr, " BOs allocated: %d\n", device->bo_count);
52 fprintf(stderr, " BOs size: %dkb\n", device->bo_size / 1024);
53 fprintf(stderr, " BOs cached: %d\n", cache->cache_count);
54 fprintf(stderr, " BOs cached size: %dkb\n", cache->cache_size / 1024);
55
56 if (!list_is_empty(&cache->time_list)) {
57 struct v3dv_bo *first = list_first_entry(&cache->time_list,
58 struct v3dv_bo,
59 time_list);
60 struct v3dv_bo *last = list_last_entry(&cache->time_list,
61 struct v3dv_bo,
62 time_list);
63
64 fprintf(stderr, " oldest cache time: %ld\n",
65 (long)first->free_time);
66 fprintf(stderr, " newest cache time: %ld\n",
67 (long)last->free_time);
68
69 struct timespec time;
70 clock_gettime(CLOCK_MONOTONIC, &time);
71 fprintf(stderr, " now: %lld\n",
72 (long long)time.tv_sec);
73 }
74
75 if (cache->size_list_size) {
76 uint32_t empty_size_list = 0;
77 for (uint32_t i = 0; i < cache->size_list_size; i++) {
78 if (list_is_empty(&cache->size_list[i]))
79 empty_size_list++;
80 }
81 fprintf(stderr, " Empty size_list lists: %d\n", empty_size_list);
82 }
83 }
84
85 static void
bo_remove_from_cache(struct v3dv_bo_cache * cache,struct v3dv_bo * bo)86 bo_remove_from_cache(struct v3dv_bo_cache *cache, struct v3dv_bo *bo)
87 {
88 list_del(&bo->time_list);
89 list_del(&bo->size_list);
90
91 cache->cache_count--;
92 cache->cache_size -= bo->size;
93 }
94
95 static struct v3dv_bo *
bo_from_cache(struct v3dv_device * device,uint32_t size,const char * name)96 bo_from_cache(struct v3dv_device *device, uint32_t size, const char *name)
97 {
98 struct v3dv_bo_cache *cache = &device->bo_cache;
99 uint32_t page_index = size / 4096 - 1;
100
101 if (cache->size_list_size <= page_index)
102 return NULL;
103
104 struct v3dv_bo *bo = NULL;
105
106 mtx_lock(&cache->lock);
107 if (!list_is_empty(&cache->size_list[page_index])) {
108 bo = list_first_entry(&cache->size_list[page_index],
109 struct v3dv_bo, size_list);
110
111 /* Check that the BO has gone idle. If not, then we want to
112 * allocate something new instead, since we assume that the
113 * user will proceed to CPU map it and fill it with stuff.
114 */
115 if (!v3dv_bo_wait(device, bo, 0)) {
116 mtx_unlock(&cache->lock);
117 return NULL;
118 }
119
120 bo_remove_from_cache(cache, bo);
121 bo->name = name;
122 p_atomic_set(&bo->refcnt, 1);
123 }
124 mtx_unlock(&cache->lock);
125 return bo;
126 }
127
128 static bool
bo_free(struct v3dv_device * device,struct v3dv_bo * bo)129 bo_free(struct v3dv_device *device,
130 struct v3dv_bo *bo)
131 {
132 if (!bo)
133 return true;
134
135 assert(p_atomic_read(&bo->refcnt) == 0);
136 assert(bo->map == NULL);
137
138 if (!bo->is_import) {
139 device->bo_count--;
140 device->bo_size -= bo->size;
141
142 if (dump_stats) {
143 fprintf(stderr, "Freed %s%s%dkb:\n",
144 bo->name ? bo->name : "",
145 bo->name ? " " : "",
146 bo->size / 1024);
147 bo_dump_stats(device);
148 }
149 }
150
151 uint32_t handle = bo->handle;
152 /* Our BO structs are stored in a sparse array in the physical device,
153 * so we don't want to free the BO pointer, instead we want to reset it
154 * to 0, to signal that array entry as being free.
155 *
156 * We must do the reset before we actually free the BO in the kernel, since
157 * otherwise there is a chance the application creates another BO in a
158 * different thread and gets the same array entry, causing a race.
159 */
160 memset(bo, 0, sizeof(*bo));
161
162 struct drm_gem_close c;
163 memset(&c, 0, sizeof(c));
164 c.handle = handle;
165 int ret = v3dv_ioctl(device->pdevice->render_fd, DRM_IOCTL_GEM_CLOSE, &c);
166 if (ret != 0)
167 fprintf(stderr, "close object %d: %s\n", handle, strerror(errno));
168
169 return ret == 0;
170 }
171
172 static void
bo_cache_free_all(struct v3dv_device * device,bool with_lock)173 bo_cache_free_all(struct v3dv_device *device,
174 bool with_lock)
175 {
176 struct v3dv_bo_cache *cache = &device->bo_cache;
177
178 if (with_lock)
179 mtx_lock(&cache->lock);
180 list_for_each_entry_safe(struct v3dv_bo, bo, &cache->time_list,
181 time_list) {
182 bo_remove_from_cache(cache, bo);
183 bo_free(device, bo);
184 }
185 if (with_lock)
186 mtx_unlock(&cache->lock);
187
188 }
189
190 void
v3dv_bo_init(struct v3dv_bo * bo,uint32_t handle,uint32_t size,uint32_t offset,const char * name,bool private)191 v3dv_bo_init(struct v3dv_bo *bo,
192 uint32_t handle,
193 uint32_t size,
194 uint32_t offset,
195 const char *name,
196 bool private)
197 {
198 p_atomic_set(&bo->refcnt, 1);
199 bo->handle = handle;
200 bo->handle_bit = 1ull << (handle % 64);
201 bo->size = size;
202 bo->offset = offset;
203 bo->map = NULL;
204 bo->map_size = 0;
205 bo->name = name;
206 bo->private = private;
207 bo->dumb_handle = -1;
208 bo->is_import = false;
209 list_inithead(&bo->list_link);
210 }
211
212 void
v3dv_bo_init_import(struct v3dv_bo * bo,uint32_t handle,uint32_t size,uint32_t offset,bool private)213 v3dv_bo_init_import(struct v3dv_bo *bo,
214 uint32_t handle,
215 uint32_t size,
216 uint32_t offset,
217 bool private)
218 {
219 v3dv_bo_init(bo, handle, size, offset, "import", private);
220 bo->is_import = true;
221 }
222
223 struct v3dv_bo *
v3dv_bo_alloc(struct v3dv_device * device,uint32_t size,const char * name,bool private)224 v3dv_bo_alloc(struct v3dv_device *device,
225 uint32_t size,
226 const char *name,
227 bool private)
228 {
229 struct v3dv_bo *bo;
230
231 const uint32_t page_align = 4096; /* Always allocate full pages */
232 size = align(size, page_align);
233
234 if (private) {
235 bo = bo_from_cache(device, size, name);
236 if (bo) {
237 if (dump_stats) {
238 fprintf(stderr, "Allocated %s %dkb from cache:\n",
239 name, size / 1024);
240 bo_dump_stats(device);
241 }
242 return bo;
243 }
244 }
245
246 retry:
247 ;
248
249 bool cleared_and_retried = false;
250 struct drm_v3d_create_bo create = {
251 .size = size
252 };
253
254 int ret = v3dv_ioctl(device->pdevice->render_fd,
255 DRM_IOCTL_V3D_CREATE_BO, &create);
256 if (ret != 0) {
257 if (!list_is_empty(&device->bo_cache.time_list) &&
258 !cleared_and_retried) {
259 cleared_and_retried = true;
260 bo_cache_free_all(device, true);
261 goto retry;
262 }
263
264 fprintf(stderr, "Failed to allocate device memory for BO\n");
265 return NULL;
266 }
267
268 assert(create.offset % page_align == 0);
269 assert((create.offset & 0xffffffff) == create.offset);
270
271 bo = v3dv_device_lookup_bo(device->pdevice, create.handle);
272 assert(bo && bo->handle == 0);
273
274 v3dv_bo_init(bo, create.handle, size, create.offset, name, private);
275
276 device->bo_count++;
277 device->bo_size += bo->size;
278 if (dump_stats) {
279 fprintf(stderr, "Allocated %s %dkb:\n", name, size / 1024);
280 bo_dump_stats(device);
281 }
282
283 return bo;
284 }
285
286 bool
v3dv_bo_map_unsynchronized(struct v3dv_device * device,struct v3dv_bo * bo,uint32_t size)287 v3dv_bo_map_unsynchronized(struct v3dv_device *device,
288 struct v3dv_bo *bo,
289 uint32_t size)
290 {
291 assert(bo != NULL && size <= bo->size);
292
293 if (bo->map)
294 return bo->map;
295
296 struct drm_v3d_mmap_bo map;
297 memset(&map, 0, sizeof(map));
298 map.handle = bo->handle;
299 int ret = v3dv_ioctl(device->pdevice->render_fd,
300 DRM_IOCTL_V3D_MMAP_BO, &map);
301 if (ret != 0) {
302 fprintf(stderr, "map ioctl failure\n");
303 return false;
304 }
305
306 bo->map = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
307 device->pdevice->render_fd, map.offset);
308 if (bo->map == MAP_FAILED) {
309 fprintf(stderr, "mmap of bo %d (offset 0x%016llx, size %d) failed\n",
310 bo->handle, (long long)map.offset, (uint32_t)bo->size);
311 return false;
312 }
313 VG(VALGRIND_MALLOCLIKE_BLOCK(bo->map, bo->size, 0, false));
314
315 bo->map_size = size;
316
317 return true;
318 }
319
320 bool
v3dv_bo_wait(struct v3dv_device * device,struct v3dv_bo * bo,uint64_t timeout_ns)321 v3dv_bo_wait(struct v3dv_device *device,
322 struct v3dv_bo *bo,
323 uint64_t timeout_ns)
324 {
325 struct drm_v3d_wait_bo wait = {
326 .handle = bo->handle,
327 .timeout_ns = timeout_ns,
328 };
329 return v3dv_ioctl(device->pdevice->render_fd,
330 DRM_IOCTL_V3D_WAIT_BO, &wait) == 0;
331 }
332
333 bool
v3dv_bo_map(struct v3dv_device * device,struct v3dv_bo * bo,uint32_t size)334 v3dv_bo_map(struct v3dv_device *device, struct v3dv_bo *bo, uint32_t size)
335 {
336 assert(bo && size <= bo->size);
337
338 bool ok = v3dv_bo_map_unsynchronized(device, bo, size);
339 if (!ok)
340 return false;
341
342 ok = v3dv_bo_wait(device, bo, OS_TIMEOUT_INFINITE);
343 if (!ok) {
344 fprintf(stderr, "memory wait for map failed\n");
345 return false;
346 }
347
348 return true;
349 }
350
351 void
v3dv_bo_unmap(struct v3dv_device * device,struct v3dv_bo * bo)352 v3dv_bo_unmap(struct v3dv_device *device, struct v3dv_bo *bo)
353 {
354 assert(bo && bo->map && bo->map_size > 0);
355
356 munmap(bo->map, bo->map_size);
357 VG(VALGRIND_FREELIKE_BLOCK(bo->map, 0));
358 bo->map = NULL;
359 bo->map_size = 0;
360 }
361
362 static bool
reallocate_size_list(struct v3dv_bo_cache * cache,struct v3dv_device * device,uint32_t size)363 reallocate_size_list(struct v3dv_bo_cache *cache,
364 struct v3dv_device *device,
365 uint32_t size)
366 {
367 struct list_head *new_list =
368 vk_alloc(&device->vk.alloc, sizeof(struct list_head) * size, 8,
369 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
370
371 if (!new_list) {
372 fprintf(stderr, "Failed to allocate host memory for cache bo list\n");
373 return false;
374 }
375 struct list_head *old_list = cache->size_list;
376
377 /* Move old list contents over (since the array has moved, and
378 * therefore the pointers to the list heads have to change).
379 */
380 for (int i = 0; i < cache->size_list_size; i++) {
381 struct list_head *old_head = &cache->size_list[i];
382 if (list_is_empty(old_head)) {
383 list_inithead(&new_list[i]);
384 } else {
385 new_list[i].next = old_head->next;
386 new_list[i].prev = old_head->prev;
387 new_list[i].next->prev = &new_list[i];
388 new_list[i].prev->next = &new_list[i];
389 }
390 }
391 for (int i = cache->size_list_size; i < size; i++)
392 list_inithead(&new_list[i]);
393
394 cache->size_list = new_list;
395 cache->size_list_size = size;
396 vk_free(&device->vk.alloc, old_list);
397
398 return true;
399 }
400
401 void
v3dv_bo_cache_init(struct v3dv_device * device)402 v3dv_bo_cache_init(struct v3dv_device *device)
403 {
404 device->bo_size = 0;
405 device->bo_count = 0;
406 list_inithead(&device->bo_cache.time_list);
407 /* FIXME: perhaps set a initial size for the size-list, to avoid run-time
408 * reallocations
409 */
410 device->bo_cache.size_list_size = 0;
411
412 const char *max_cache_size_str = getenv("V3DV_MAX_BO_CACHE_SIZE");
413 if (max_cache_size_str == NULL)
414 device->bo_cache.max_cache_size = DEFAULT_MAX_BO_CACHE_SIZE;
415 else
416 device->bo_cache.max_cache_size = atoll(max_cache_size_str);
417
418 if (dump_stats) {
419 fprintf(stderr, "MAX BO CACHE SIZE: %iMB\n", device->bo_cache.max_cache_size);
420 }
421
422 mtx_lock(&device->bo_cache.lock);
423 device->bo_cache.max_cache_size *= 1024 * 1024;
424 device->bo_cache.cache_count = 0;
425 device->bo_cache.cache_size = 0;
426 mtx_unlock(&device->bo_cache.lock);
427 }
428
429 void
v3dv_bo_cache_destroy(struct v3dv_device * device)430 v3dv_bo_cache_destroy(struct v3dv_device *device)
431 {
432 bo_cache_free_all(device, true);
433 vk_free(&device->vk.alloc, device->bo_cache.size_list);
434
435 if (dump_stats) {
436 fprintf(stderr, "BO stats after screen destroy:\n");
437 bo_dump_stats(device);
438 }
439 }
440
441
442 static void
free_stale_bos(struct v3dv_device * device,time_t time)443 free_stale_bos(struct v3dv_device *device,
444 time_t time)
445 {
446 struct v3dv_bo_cache *cache = &device->bo_cache;
447 bool freed_any = false;
448
449 list_for_each_entry_safe(struct v3dv_bo, bo, &cache->time_list,
450 time_list) {
451 /* If it's more than a second old, free it. */
452 if (time - bo->free_time > 2) {
453 if (dump_stats && !freed_any) {
454 fprintf(stderr, "Freeing stale BOs:\n");
455 bo_dump_stats(device);
456 freed_any = true;
457 }
458
459 bo_remove_from_cache(cache, bo);
460 bo_free(device, bo);
461 } else {
462 break;
463 }
464 }
465
466 if (dump_stats && freed_any) {
467 fprintf(stderr, "Freed stale BOs:\n");
468 bo_dump_stats(device);
469 }
470 }
471
472 bool
v3dv_bo_free(struct v3dv_device * device,struct v3dv_bo * bo)473 v3dv_bo_free(struct v3dv_device *device,
474 struct v3dv_bo *bo)
475 {
476 if (!bo)
477 return true;
478
479 if (!p_atomic_dec_zero(&bo->refcnt))
480 return true;
481
482 if (bo->map)
483 v3dv_bo_unmap(device, bo);
484
485 struct timespec time;
486 struct v3dv_bo_cache *cache = &device->bo_cache;
487 uint32_t page_index = bo->size / 4096 - 1;
488
489 if (bo->private &&
490 bo->size > cache->max_cache_size - cache->cache_size) {
491 clock_gettime(CLOCK_MONOTONIC, &time);
492 mtx_lock(&cache->lock);
493 free_stale_bos(device, time.tv_sec);
494 mtx_unlock(&cache->lock);
495 }
496
497 if (!bo->private ||
498 bo->size > cache->max_cache_size - cache->cache_size) {
499 return bo_free(device, bo);
500 }
501
502 clock_gettime(CLOCK_MONOTONIC, &time);
503 mtx_lock(&cache->lock);
504
505 if (cache->size_list_size <= page_index) {
506 if (!reallocate_size_list(cache, device, page_index + 1)) {
507 bool outcome = bo_free(device, bo);
508 /* If the reallocation failed, it usually means that we are out of
509 * memory, so we also free all the bo cache. We need to call it to
510 * not use the cache lock, as we are already under it.
511 */
512 bo_cache_free_all(device, false);
513 mtx_unlock(&cache->lock);
514 return outcome;
515 }
516 }
517
518 bo->free_time = time.tv_sec;
519 list_addtail(&bo->size_list, &cache->size_list[page_index]);
520 list_addtail(&bo->time_list, &cache->time_list);
521
522 cache->cache_count++;
523 cache->cache_size += bo->size;
524
525 if (dump_stats) {
526 fprintf(stderr, "Freed %s %dkb to cache:\n",
527 bo->name, bo->size / 1024);
528 bo_dump_stats(device);
529 }
530 bo->name = NULL;
531
532 free_stale_bos(device, time.tv_sec);
533
534 mtx_unlock(&cache->lock);
535
536 return true;
537 }
538