1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3 *
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28 /*
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30 */
31
32 #define pr_fmt(fmt) "[TTM] " fmt
33
34 #include <drm/ttm/ttm_bo.h>
35 #include <drm/ttm/ttm_placement.h>
36 #include <drm/ttm/ttm_tt.h>
37
38 #include <linux/jiffies.h>
39 #include <linux/slab.h>
40 #include <linux/sched.h>
41 #include <linux/mm.h>
42 #include <linux/file.h>
43 #include <linux/module.h>
44 #include <linux/atomic.h>
45 #include <linux/dma-resv.h>
46
47 #include "ttm_module.h"
48
49 #include <linux/android_kabi.h>
50 ANDROID_KABI_DECLONLY(dma_buf);
51 ANDROID_KABI_DECLONLY(dma_buf_attachment);
52 ANDROID_KABI_DECLONLY(sg_table);
53
ttm_bo_mem_space_debug(struct ttm_buffer_object * bo,struct ttm_placement * placement)54 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
55 struct ttm_placement *placement)
56 {
57 struct drm_printer p = drm_dbg_printer(NULL, DRM_UT_CORE, TTM_PFX);
58 struct ttm_resource_manager *man;
59 int i, mem_type;
60
61 for (i = 0; i < placement->num_placement; i++) {
62 mem_type = placement->placement[i].mem_type;
63 drm_printf(&p, " placement[%d]=0x%08X (%d)\n",
64 i, placement->placement[i].flags, mem_type);
65 man = ttm_manager_type(bo->bdev, mem_type);
66 ttm_resource_manager_debug(man, &p);
67 }
68 }
69
70 /**
71 * ttm_bo_move_to_lru_tail
72 *
73 * @bo: The buffer object.
74 *
75 * Move this BO to the tail of all lru lists used to lookup and reserve an
76 * object. This function must be called with struct ttm_global::lru_lock
77 * held, and is used to make a BO less likely to be considered for eviction.
78 */
ttm_bo_move_to_lru_tail(struct ttm_buffer_object * bo)79 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
80 {
81 dma_resv_assert_held(bo->base.resv);
82
83 if (bo->resource)
84 ttm_resource_move_to_lru_tail(bo->resource);
85 }
86 EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
87
88 /**
89 * ttm_bo_set_bulk_move - update BOs bulk move object
90 *
91 * @bo: The buffer object.
92 * @bulk: bulk move structure
93 *
94 * Update the BOs bulk move object, making sure that resources are added/removed
95 * as well. A bulk move allows to move many resource on the LRU at once,
96 * resulting in much less overhead of maintaining the LRU.
97 * The only requirement is that the resources stay together on the LRU and are
98 * never separated. This is enforces by setting the bulk_move structure on a BO.
99 * ttm_lru_bulk_move_tail() should be used to move all resources to the tail of
100 * their LRU list.
101 */
ttm_bo_set_bulk_move(struct ttm_buffer_object * bo,struct ttm_lru_bulk_move * bulk)102 void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
103 struct ttm_lru_bulk_move *bulk)
104 {
105 dma_resv_assert_held(bo->base.resv);
106
107 if (bo->bulk_move == bulk)
108 return;
109
110 spin_lock(&bo->bdev->lru_lock);
111 if (bo->resource)
112 ttm_resource_del_bulk_move(bo->resource, bo);
113 bo->bulk_move = bulk;
114 if (bo->resource)
115 ttm_resource_add_bulk_move(bo->resource, bo);
116 spin_unlock(&bo->bdev->lru_lock);
117 }
118 EXPORT_SYMBOL(ttm_bo_set_bulk_move);
119
ttm_bo_handle_move_mem(struct ttm_buffer_object * bo,struct ttm_resource * mem,bool evict,struct ttm_operation_ctx * ctx,struct ttm_place * hop)120 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
121 struct ttm_resource *mem, bool evict,
122 struct ttm_operation_ctx *ctx,
123 struct ttm_place *hop)
124 {
125 struct ttm_device *bdev = bo->bdev;
126 bool old_use_tt, new_use_tt;
127 int ret;
128
129 old_use_tt = !bo->resource || ttm_manager_type(bdev, bo->resource->mem_type)->use_tt;
130 new_use_tt = ttm_manager_type(bdev, mem->mem_type)->use_tt;
131
132 ttm_bo_unmap_virtual(bo);
133
134 /*
135 * Create and bind a ttm if required.
136 */
137
138 if (new_use_tt) {
139 /* Zero init the new TTM structure if the old location should
140 * have used one as well.
141 */
142 ret = ttm_tt_create(bo, old_use_tt);
143 if (ret)
144 goto out_err;
145
146 if (mem->mem_type != TTM_PL_SYSTEM) {
147 ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
148 if (ret)
149 goto out_err;
150 }
151 }
152
153 ret = dma_resv_reserve_fences(bo->base.resv, 1);
154 if (ret)
155 goto out_err;
156
157 ret = bdev->funcs->move(bo, evict, ctx, mem, hop);
158 if (ret) {
159 if (ret == -EMULTIHOP)
160 return ret;
161 goto out_err;
162 }
163
164 ctx->bytes_moved += bo->base.size;
165 return 0;
166
167 out_err:
168 if (!old_use_tt)
169 ttm_bo_tt_destroy(bo);
170
171 return ret;
172 }
173
174 /*
175 * Call bo::reserved.
176 * Will release GPU memory type usage on destruction.
177 * This is the place to put in driver specific hooks to release
178 * driver private resources.
179 * Will release the bo::reserved lock.
180 */
181
ttm_bo_cleanup_memtype_use(struct ttm_buffer_object * bo)182 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
183 {
184 if (bo->bdev->funcs->delete_mem_notify)
185 bo->bdev->funcs->delete_mem_notify(bo);
186
187 ttm_bo_tt_destroy(bo);
188 ttm_resource_free(bo, &bo->resource);
189 }
190
ttm_bo_individualize_resv(struct ttm_buffer_object * bo)191 static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
192 {
193 int r;
194
195 if (bo->base.resv == &bo->base._resv)
196 return 0;
197
198 BUG_ON(!dma_resv_trylock(&bo->base._resv));
199
200 r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
201 dma_resv_unlock(&bo->base._resv);
202 if (r)
203 return r;
204
205 if (bo->type != ttm_bo_type_sg) {
206 /* This works because the BO is about to be destroyed and nobody
207 * reference it any more. The only tricky case is the trylock on
208 * the resv object while holding the lru_lock.
209 */
210 spin_lock(&bo->bdev->lru_lock);
211 bo->base.resv = &bo->base._resv;
212 spin_unlock(&bo->bdev->lru_lock);
213 }
214
215 return r;
216 }
217
ttm_bo_flush_all_fences(struct ttm_buffer_object * bo)218 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
219 {
220 struct dma_resv *resv = &bo->base._resv;
221 struct dma_resv_iter cursor;
222 struct dma_fence *fence;
223
224 dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP);
225 dma_resv_for_each_fence_unlocked(&cursor, fence) {
226 if (!fence->ops->signaled)
227 dma_fence_enable_sw_signaling(fence);
228 }
229 dma_resv_iter_end(&cursor);
230 }
231
232 /*
233 * Block for the dma_resv object to become idle, lock the buffer and clean up
234 * the resource and tt object.
235 */
ttm_bo_delayed_delete(struct work_struct * work)236 static void ttm_bo_delayed_delete(struct work_struct *work)
237 {
238 struct ttm_buffer_object *bo;
239
240 bo = container_of(work, typeof(*bo), delayed_delete);
241
242 dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, false,
243 MAX_SCHEDULE_TIMEOUT);
244 dma_resv_lock(bo->base.resv, NULL);
245 ttm_bo_cleanup_memtype_use(bo);
246 dma_resv_unlock(bo->base.resv);
247 ttm_bo_put(bo);
248 }
249
ttm_bo_release(struct kref * kref)250 static void ttm_bo_release(struct kref *kref)
251 {
252 struct ttm_buffer_object *bo =
253 container_of(kref, struct ttm_buffer_object, kref);
254 struct ttm_device *bdev = bo->bdev;
255 int ret;
256
257 WARN_ON_ONCE(bo->pin_count);
258 WARN_ON_ONCE(bo->bulk_move);
259
260 if (!bo->deleted) {
261 ret = ttm_bo_individualize_resv(bo);
262 if (ret) {
263 /* Last resort, if we fail to allocate memory for the
264 * fences block for the BO to become idle
265 */
266 dma_resv_wait_timeout(bo->base.resv,
267 DMA_RESV_USAGE_BOOKKEEP, false,
268 30 * HZ);
269 }
270
271 if (bo->bdev->funcs->release_notify)
272 bo->bdev->funcs->release_notify(bo);
273
274 drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
275 ttm_mem_io_free(bdev, bo->resource);
276
277 if (!dma_resv_test_signaled(bo->base.resv,
278 DMA_RESV_USAGE_BOOKKEEP) ||
279 (want_init_on_free() && (bo->ttm != NULL)) ||
280 bo->type == ttm_bo_type_sg ||
281 !dma_resv_trylock(bo->base.resv)) {
282 /* The BO is not idle, resurrect it for delayed destroy */
283 ttm_bo_flush_all_fences(bo);
284 bo->deleted = true;
285
286 spin_lock(&bo->bdev->lru_lock);
287
288 /*
289 * Make pinned bos immediately available to
290 * shrinkers, now that they are queued for
291 * destruction.
292 *
293 * FIXME: QXL is triggering this. Can be removed when the
294 * driver is fixed.
295 */
296 if (bo->pin_count) {
297 bo->pin_count = 0;
298 ttm_resource_move_to_lru_tail(bo->resource);
299 }
300
301 kref_init(&bo->kref);
302 spin_unlock(&bo->bdev->lru_lock);
303
304 INIT_WORK(&bo->delayed_delete, ttm_bo_delayed_delete);
305
306 /* Schedule the worker on the closest NUMA node. This
307 * improves performance since system memory might be
308 * cleared on free and that is best done on a CPU core
309 * close to it.
310 */
311 queue_work_node(bdev->pool.nid, bdev->wq, &bo->delayed_delete);
312 return;
313 }
314
315 ttm_bo_cleanup_memtype_use(bo);
316 dma_resv_unlock(bo->base.resv);
317 }
318
319 atomic_dec(&ttm_glob.bo_count);
320 bo->destroy(bo);
321 }
322
323 /**
324 * ttm_bo_put
325 *
326 * @bo: The buffer object.
327 *
328 * Unreference a buffer object.
329 */
ttm_bo_put(struct ttm_buffer_object * bo)330 void ttm_bo_put(struct ttm_buffer_object *bo)
331 {
332 kref_put(&bo->kref, ttm_bo_release);
333 }
334 EXPORT_SYMBOL(ttm_bo_put);
335
ttm_bo_bounce_temp_buffer(struct ttm_buffer_object * bo,struct ttm_operation_ctx * ctx,struct ttm_place * hop)336 static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
337 struct ttm_operation_ctx *ctx,
338 struct ttm_place *hop)
339 {
340 struct ttm_placement hop_placement;
341 struct ttm_resource *hop_mem;
342 int ret;
343
344 hop_placement.num_placement = 1;
345 hop_placement.placement = hop;
346
347 /* find space in the bounce domain */
348 ret = ttm_bo_mem_space(bo, &hop_placement, &hop_mem, ctx);
349 if (ret)
350 return ret;
351 /* move to the bounce domain */
352 ret = ttm_bo_handle_move_mem(bo, hop_mem, false, ctx, NULL);
353 if (ret) {
354 ttm_resource_free(bo, &hop_mem);
355 return ret;
356 }
357 return 0;
358 }
359
ttm_bo_evict(struct ttm_buffer_object * bo,struct ttm_operation_ctx * ctx)360 static int ttm_bo_evict(struct ttm_buffer_object *bo,
361 struct ttm_operation_ctx *ctx)
362 {
363 struct ttm_device *bdev = bo->bdev;
364 struct ttm_resource *evict_mem;
365 struct ttm_placement placement;
366 struct ttm_place hop;
367 int ret = 0;
368
369 memset(&hop, 0, sizeof(hop));
370
371 dma_resv_assert_held(bo->base.resv);
372
373 placement.num_placement = 0;
374 bdev->funcs->evict_flags(bo, &placement);
375
376 if (!placement.num_placement) {
377 ret = ttm_bo_wait_ctx(bo, ctx);
378 if (ret)
379 return ret;
380
381 /*
382 * Since we've already synced, this frees backing store
383 * immediately.
384 */
385 return ttm_bo_pipeline_gutting(bo);
386 }
387
388 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
389 if (ret) {
390 if (ret != -ERESTARTSYS) {
391 pr_err("Failed to find memory space for buffer 0x%p eviction\n",
392 bo);
393 ttm_bo_mem_space_debug(bo, &placement);
394 }
395 goto out;
396 }
397
398 do {
399 ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
400 if (ret != -EMULTIHOP)
401 break;
402
403 ret = ttm_bo_bounce_temp_buffer(bo, ctx, &hop);
404 } while (!ret);
405
406 if (ret) {
407 ttm_resource_free(bo, &evict_mem);
408 if (ret != -ERESTARTSYS && ret != -EINTR)
409 pr_err("Buffer eviction failed\n");
410 }
411 out:
412 return ret;
413 }
414
415 /**
416 * ttm_bo_eviction_valuable
417 *
418 * @bo: The buffer object to evict
419 * @place: the placement we need to make room for
420 *
421 * Check if it is valuable to evict the BO to make room for the given placement.
422 */
ttm_bo_eviction_valuable(struct ttm_buffer_object * bo,const struct ttm_place * place)423 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
424 const struct ttm_place *place)
425 {
426 struct ttm_resource *res = bo->resource;
427 struct ttm_device *bdev = bo->bdev;
428
429 dma_resv_assert_held(bo->base.resv);
430 if (bo->resource->mem_type == TTM_PL_SYSTEM)
431 return true;
432
433 /* Don't evict this BO if it's outside of the
434 * requested placement range
435 */
436 return ttm_resource_intersects(bdev, res, place, bo->base.size);
437 }
438 EXPORT_SYMBOL(ttm_bo_eviction_valuable);
439
440 /**
441 * ttm_bo_evict_first() - Evict the first bo on the manager's LRU list.
442 * @bdev: The ttm device.
443 * @man: The manager whose bo to evict.
444 * @ctx: The TTM operation ctx governing the eviction.
445 *
446 * Return: 0 if successful or the resource disappeared. Negative error code on error.
447 */
ttm_bo_evict_first(struct ttm_device * bdev,struct ttm_resource_manager * man,struct ttm_operation_ctx * ctx)448 int ttm_bo_evict_first(struct ttm_device *bdev, struct ttm_resource_manager *man,
449 struct ttm_operation_ctx *ctx)
450 {
451 struct ttm_resource_cursor cursor;
452 struct ttm_buffer_object *bo;
453 struct ttm_resource *res;
454 unsigned int mem_type;
455 int ret = 0;
456
457 spin_lock(&bdev->lru_lock);
458 res = ttm_resource_manager_first(man, &cursor);
459 ttm_resource_cursor_fini(&cursor);
460 if (!res) {
461 ret = -ENOENT;
462 goto out_no_ref;
463 }
464 bo = res->bo;
465 if (!ttm_bo_get_unless_zero(bo))
466 goto out_no_ref;
467 mem_type = res->mem_type;
468 spin_unlock(&bdev->lru_lock);
469 ret = ttm_bo_reserve(bo, ctx->interruptible, ctx->no_wait_gpu, NULL);
470 if (ret)
471 goto out_no_lock;
472 if (!bo->resource || bo->resource->mem_type != mem_type)
473 goto out_bo_moved;
474
475 if (bo->deleted) {
476 ret = ttm_bo_wait_ctx(bo, ctx);
477 if (!ret)
478 ttm_bo_cleanup_memtype_use(bo);
479 } else {
480 ret = ttm_bo_evict(bo, ctx);
481 }
482 out_bo_moved:
483 dma_resv_unlock(bo->base.resv);
484 out_no_lock:
485 ttm_bo_put(bo);
486 return ret;
487
488 out_no_ref:
489 spin_unlock(&bdev->lru_lock);
490 return ret;
491 }
492
493 /**
494 * struct ttm_bo_evict_walk - Parameters for the evict walk.
495 */
496 struct ttm_bo_evict_walk {
497 /** @walk: The walk base parameters. */
498 struct ttm_lru_walk walk;
499 /** @place: The place passed to the resource allocation. */
500 const struct ttm_place *place;
501 /** @evictor: The buffer object we're trying to make room for. */
502 struct ttm_buffer_object *evictor;
503 /** @res: The allocated resource if any. */
504 struct ttm_resource **res;
505 /** @evicted: Number of successful evictions. */
506 unsigned long evicted;
507 };
508
ttm_bo_evict_cb(struct ttm_lru_walk * walk,struct ttm_buffer_object * bo)509 static s64 ttm_bo_evict_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
510 {
511 struct ttm_bo_evict_walk *evict_walk =
512 container_of(walk, typeof(*evict_walk), walk);
513 s64 lret;
514
515 if (bo->pin_count || !bo->bdev->funcs->eviction_valuable(bo, evict_walk->place))
516 return 0;
517
518 if (bo->deleted) {
519 lret = ttm_bo_wait_ctx(bo, walk->ctx);
520 if (!lret)
521 ttm_bo_cleanup_memtype_use(bo);
522 } else {
523 lret = ttm_bo_evict(bo, walk->ctx);
524 }
525
526 if (lret)
527 goto out;
528
529 evict_walk->evicted++;
530 if (evict_walk->res)
531 lret = ttm_resource_alloc(evict_walk->evictor, evict_walk->place,
532 evict_walk->res);
533 if (lret == 0)
534 return 1;
535 out:
536 /* Errors that should terminate the walk. */
537 if (lret == -ENOSPC)
538 return -EBUSY;
539
540 return lret;
541 }
542
543 static const struct ttm_lru_walk_ops ttm_evict_walk_ops = {
544 .process_bo = ttm_bo_evict_cb,
545 };
546
ttm_bo_evict_alloc(struct ttm_device * bdev,struct ttm_resource_manager * man,const struct ttm_place * place,struct ttm_buffer_object * evictor,struct ttm_operation_ctx * ctx,struct ww_acquire_ctx * ticket,struct ttm_resource ** res)547 static int ttm_bo_evict_alloc(struct ttm_device *bdev,
548 struct ttm_resource_manager *man,
549 const struct ttm_place *place,
550 struct ttm_buffer_object *evictor,
551 struct ttm_operation_ctx *ctx,
552 struct ww_acquire_ctx *ticket,
553 struct ttm_resource **res)
554 {
555 struct ttm_bo_evict_walk evict_walk = {
556 .walk = {
557 .ops = &ttm_evict_walk_ops,
558 .ctx = ctx,
559 .ticket = ticket,
560 },
561 .place = place,
562 .evictor = evictor,
563 .res = res,
564 };
565 s64 lret;
566
567 evict_walk.walk.trylock_only = true;
568 lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
569 if (lret || !ticket)
570 goto out;
571
572 /* If ticket-locking, repeat while making progress. */
573 evict_walk.walk.trylock_only = false;
574 do {
575 /* The walk may clear the evict_walk.walk.ticket field */
576 evict_walk.walk.ticket = ticket;
577 evict_walk.evicted = 0;
578 lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1);
579 } while (!lret && evict_walk.evicted);
580 out:
581 if (lret < 0)
582 return lret;
583 if (lret == 0)
584 return -EBUSY;
585 return 0;
586 }
587
588 /**
589 * ttm_bo_pin - Pin the buffer object.
590 * @bo: The buffer object to pin
591 *
592 * Make sure the buffer is not evicted any more during memory pressure.
593 * @bo must be unpinned again by calling ttm_bo_unpin().
594 */
ttm_bo_pin(struct ttm_buffer_object * bo)595 void ttm_bo_pin(struct ttm_buffer_object *bo)
596 {
597 dma_resv_assert_held(bo->base.resv);
598 WARN_ON_ONCE(!kref_read(&bo->kref));
599 spin_lock(&bo->bdev->lru_lock);
600 if (bo->resource)
601 ttm_resource_del_bulk_move(bo->resource, bo);
602 ++bo->pin_count;
603 spin_unlock(&bo->bdev->lru_lock);
604 }
605 EXPORT_SYMBOL(ttm_bo_pin);
606
607 /**
608 * ttm_bo_unpin - Unpin the buffer object.
609 * @bo: The buffer object to unpin
610 *
611 * Allows the buffer object to be evicted again during memory pressure.
612 */
ttm_bo_unpin(struct ttm_buffer_object * bo)613 void ttm_bo_unpin(struct ttm_buffer_object *bo)
614 {
615 dma_resv_assert_held(bo->base.resv);
616 WARN_ON_ONCE(!kref_read(&bo->kref));
617 if (WARN_ON_ONCE(!bo->pin_count))
618 return;
619
620 spin_lock(&bo->bdev->lru_lock);
621 --bo->pin_count;
622 if (bo->resource)
623 ttm_resource_add_bulk_move(bo->resource, bo);
624 spin_unlock(&bo->bdev->lru_lock);
625 }
626 EXPORT_SYMBOL(ttm_bo_unpin);
627
628 /*
629 * Add the last move fence to the BO as kernel dependency and reserve a new
630 * fence slot.
631 */
ttm_bo_add_move_fence(struct ttm_buffer_object * bo,struct ttm_resource_manager * man,bool no_wait_gpu)632 static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
633 struct ttm_resource_manager *man,
634 bool no_wait_gpu)
635 {
636 struct dma_fence *fence;
637 int ret;
638
639 spin_lock(&man->move_lock);
640 fence = dma_fence_get(man->move);
641 spin_unlock(&man->move_lock);
642
643 if (!fence)
644 return 0;
645
646 if (no_wait_gpu) {
647 ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
648 dma_fence_put(fence);
649 return ret;
650 }
651
652 dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
653
654 ret = dma_resv_reserve_fences(bo->base.resv, 1);
655 dma_fence_put(fence);
656 return ret;
657 }
658
659 /**
660 * ttm_bo_alloc_resource - Allocate backing store for a BO
661 *
662 * @bo: Pointer to a struct ttm_buffer_object of which we want a resource for
663 * @placement: Proposed new placement for the buffer object
664 * @ctx: if and how to sleep, lock buffers and alloc memory
665 * @force_space: If we should evict buffers to force space
666 * @res: The resulting struct ttm_resource.
667 *
668 * Allocates a resource for the buffer object pointed to by @bo, using the
669 * placement flags in @placement, potentially evicting other buffer objects when
670 * @force_space is true.
671 * This function may sleep while waiting for resources to become available.
672 * Returns:
673 * -EBUSY: No space available (only if no_wait == true).
674 * -ENOSPC: Could not allocate space for the buffer object, either due to
675 * fragmentation or concurrent allocators.
676 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
677 */
ttm_bo_alloc_resource(struct ttm_buffer_object * bo,struct ttm_placement * placement,struct ttm_operation_ctx * ctx,bool force_space,struct ttm_resource ** res)678 static int ttm_bo_alloc_resource(struct ttm_buffer_object *bo,
679 struct ttm_placement *placement,
680 struct ttm_operation_ctx *ctx,
681 bool force_space,
682 struct ttm_resource **res)
683 {
684 struct ttm_device *bdev = bo->bdev;
685 struct ww_acquire_ctx *ticket;
686 int i, ret;
687
688 ticket = dma_resv_locking_ctx(bo->base.resv);
689 ret = dma_resv_reserve_fences(bo->base.resv, 1);
690 if (unlikely(ret))
691 return ret;
692
693 for (i = 0; i < placement->num_placement; ++i) {
694 const struct ttm_place *place = &placement->placement[i];
695 struct ttm_resource_manager *man;
696 bool may_evict;
697
698 man = ttm_manager_type(bdev, place->mem_type);
699 if (!man || !ttm_resource_manager_used(man))
700 continue;
701
702 if (place->flags & (force_space ? TTM_PL_FLAG_DESIRED :
703 TTM_PL_FLAG_FALLBACK))
704 continue;
705
706 may_evict = (force_space && place->mem_type != TTM_PL_SYSTEM);
707 ret = ttm_resource_alloc(bo, place, res);
708 if (ret) {
709 if (ret != -ENOSPC)
710 return ret;
711 if (!may_evict)
712 continue;
713
714 ret = ttm_bo_evict_alloc(bdev, man, place, bo, ctx,
715 ticket, res);
716 if (ret == -EBUSY)
717 continue;
718 if (ret)
719 return ret;
720 }
721
722 ret = ttm_bo_add_move_fence(bo, man, ctx->no_wait_gpu);
723 if (unlikely(ret)) {
724 ttm_resource_free(bo, res);
725 if (ret == -EBUSY)
726 continue;
727
728 return ret;
729 }
730 return 0;
731 }
732
733 return -ENOSPC;
734 }
735
736 /*
737 * ttm_bo_mem_space - Wrapper around ttm_bo_alloc_resource
738 *
739 * @bo: Pointer to a struct ttm_buffer_object of which we want a resource for
740 * @placement: Proposed new placement for the buffer object
741 * @res: The resulting struct ttm_resource.
742 * @ctx: if and how to sleep, lock buffers and alloc memory
743 *
744 * Tries both idle allocation and forcefully eviction of buffers. See
745 * ttm_bo_alloc_resource for details.
746 */
ttm_bo_mem_space(struct ttm_buffer_object * bo,struct ttm_placement * placement,struct ttm_resource ** res,struct ttm_operation_ctx * ctx)747 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
748 struct ttm_placement *placement,
749 struct ttm_resource **res,
750 struct ttm_operation_ctx *ctx)
751 {
752 bool force_space = false;
753 int ret;
754
755 do {
756 ret = ttm_bo_alloc_resource(bo, placement, ctx,
757 force_space, res);
758 force_space = !force_space;
759 } while (ret == -ENOSPC && force_space);
760
761 return ret;
762 }
763 EXPORT_SYMBOL(ttm_bo_mem_space);
764
765 /**
766 * ttm_bo_validate
767 *
768 * @bo: The buffer object.
769 * @placement: Proposed placement for the buffer object.
770 * @ctx: validation parameters.
771 *
772 * Changes placement and caching policy of the buffer object
773 * according proposed placement.
774 * Returns
775 * -EINVAL on invalid proposed placement.
776 * -ENOMEM on out-of-memory condition.
777 * -EBUSY if no_wait is true and buffer busy.
778 * -ERESTARTSYS if interrupted by a signal.
779 */
ttm_bo_validate(struct ttm_buffer_object * bo,struct ttm_placement * placement,struct ttm_operation_ctx * ctx)780 int ttm_bo_validate(struct ttm_buffer_object *bo,
781 struct ttm_placement *placement,
782 struct ttm_operation_ctx *ctx)
783 {
784 struct ttm_resource *res;
785 struct ttm_place hop;
786 bool force_space;
787 int ret;
788
789 dma_resv_assert_held(bo->base.resv);
790
791 /*
792 * Remove the backing store if no placement is given.
793 */
794 if (!placement->num_placement)
795 return ttm_bo_pipeline_gutting(bo);
796
797 force_space = false;
798 do {
799 /* Check whether we need to move buffer. */
800 if (bo->resource &&
801 ttm_resource_compatible(bo->resource, placement,
802 force_space))
803 return 0;
804
805 /* Moving of pinned BOs is forbidden */
806 if (bo->pin_count)
807 return -EINVAL;
808
809 /*
810 * Determine where to move the buffer.
811 *
812 * If driver determines move is going to need
813 * an extra step then it will return -EMULTIHOP
814 * and the buffer will be moved to the temporary
815 * stop and the driver will be called to make
816 * the second hop.
817 */
818 ret = ttm_bo_alloc_resource(bo, placement, ctx, force_space,
819 &res);
820 force_space = !force_space;
821 if (ret == -ENOSPC)
822 continue;
823 if (ret)
824 return ret;
825
826 bounce:
827 ret = ttm_bo_handle_move_mem(bo, res, false, ctx, &hop);
828 if (ret == -EMULTIHOP) {
829 ret = ttm_bo_bounce_temp_buffer(bo, ctx, &hop);
830 /* try and move to final place now. */
831 if (!ret)
832 goto bounce;
833 }
834 if (ret) {
835 ttm_resource_free(bo, &res);
836 return ret;
837 }
838
839 } while (ret && force_space);
840
841 /* For backward compatibility with userspace */
842 if (ret == -ENOSPC)
843 return -ENOMEM;
844
845 /*
846 * We might need to add a TTM.
847 */
848 if (!bo->resource || bo->resource->mem_type == TTM_PL_SYSTEM) {
849 ret = ttm_tt_create(bo, true);
850 if (ret)
851 return ret;
852 }
853 return 0;
854 }
855 EXPORT_SYMBOL(ttm_bo_validate);
856
857 /**
858 * ttm_bo_init_reserved
859 *
860 * @bdev: Pointer to a ttm_device struct.
861 * @bo: Pointer to a ttm_buffer_object to be initialized.
862 * @type: Requested type of buffer object.
863 * @placement: Initial placement for buffer object.
864 * @alignment: Data alignment in pages.
865 * @ctx: TTM operation context for memory allocation.
866 * @sg: Scatter-gather table.
867 * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
868 * @destroy: Destroy function. Use NULL for kfree().
869 *
870 * This function initializes a pre-allocated struct ttm_buffer_object.
871 * As this object may be part of a larger structure, this function,
872 * together with the @destroy function, enables driver-specific objects
873 * derived from a ttm_buffer_object.
874 *
875 * On successful return, the caller owns an object kref to @bo. The kref and
876 * list_kref are usually set to 1, but note that in some situations, other
877 * tasks may already be holding references to @bo as well.
878 * Furthermore, if resv == NULL, the buffer's reservation lock will be held,
879 * and it is the caller's responsibility to call ttm_bo_unreserve.
880 *
881 * If a failure occurs, the function will call the @destroy function. Thus,
882 * after a failure, dereferencing @bo is illegal and will likely cause memory
883 * corruption.
884 *
885 * Returns
886 * -ENOMEM: Out of memory.
887 * -EINVAL: Invalid placement flags.
888 * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
889 */
ttm_bo_init_reserved(struct ttm_device * bdev,struct ttm_buffer_object * bo,enum ttm_bo_type type,struct ttm_placement * placement,uint32_t alignment,struct ttm_operation_ctx * ctx,struct sg_table * sg,struct dma_resv * resv,void (* destroy)(struct ttm_buffer_object *))890 int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
891 enum ttm_bo_type type, struct ttm_placement *placement,
892 uint32_t alignment, struct ttm_operation_ctx *ctx,
893 struct sg_table *sg, struct dma_resv *resv,
894 void (*destroy) (struct ttm_buffer_object *))
895 {
896 int ret;
897
898 kref_init(&bo->kref);
899 bo->bdev = bdev;
900 bo->type = type;
901 bo->page_alignment = alignment;
902 bo->destroy = destroy;
903 bo->pin_count = 0;
904 bo->sg = sg;
905 bo->bulk_move = NULL;
906 if (resv)
907 bo->base.resv = resv;
908 else
909 bo->base.resv = &bo->base._resv;
910 atomic_inc(&ttm_glob.bo_count);
911
912 /*
913 * For ttm_bo_type_device buffers, allocate
914 * address space from the device.
915 */
916 if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) {
917 ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
918 PFN_UP(bo->base.size));
919 if (ret)
920 goto err_put;
921 }
922
923 /* passed reservation objects should already be locked,
924 * since otherwise lockdep will be angered in radeon.
925 */
926 if (!resv)
927 WARN_ON(!dma_resv_trylock(bo->base.resv));
928 else
929 dma_resv_assert_held(resv);
930
931 ret = ttm_bo_validate(bo, placement, ctx);
932 if (unlikely(ret))
933 goto err_unlock;
934
935 return 0;
936
937 err_unlock:
938 if (!resv)
939 dma_resv_unlock(bo->base.resv);
940
941 err_put:
942 ttm_bo_put(bo);
943 return ret;
944 }
945 EXPORT_SYMBOL(ttm_bo_init_reserved);
946
947 /**
948 * ttm_bo_init_validate
949 *
950 * @bdev: Pointer to a ttm_device struct.
951 * @bo: Pointer to a ttm_buffer_object to be initialized.
952 * @type: Requested type of buffer object.
953 * @placement: Initial placement for buffer object.
954 * @alignment: Data alignment in pages.
955 * @interruptible: If needing to sleep to wait for GPU resources,
956 * sleep interruptible.
957 * pinned in physical memory. If this behaviour is not desired, this member
958 * holds a pointer to a persistent shmem object. Typically, this would
959 * point to the shmem object backing a GEM object if TTM is used to back a
960 * GEM user interface.
961 * @sg: Scatter-gather table.
962 * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
963 * @destroy: Destroy function. Use NULL for kfree().
964 *
965 * This function initializes a pre-allocated struct ttm_buffer_object.
966 * As this object may be part of a larger structure, this function,
967 * together with the @destroy function,
968 * enables driver-specific objects derived from a ttm_buffer_object.
969 *
970 * On successful return, the caller owns an object kref to @bo. The kref and
971 * list_kref are usually set to 1, but note that in some situations, other
972 * tasks may already be holding references to @bo as well.
973 *
974 * If a failure occurs, the function will call the @destroy function, Thus,
975 * after a failure, dereferencing @bo is illegal and will likely cause memory
976 * corruption.
977 *
978 * Returns
979 * -ENOMEM: Out of memory.
980 * -EINVAL: Invalid placement flags.
981 * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
982 */
ttm_bo_init_validate(struct ttm_device * bdev,struct ttm_buffer_object * bo,enum ttm_bo_type type,struct ttm_placement * placement,uint32_t alignment,bool interruptible,struct sg_table * sg,struct dma_resv * resv,void (* destroy)(struct ttm_buffer_object *))983 int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
984 enum ttm_bo_type type, struct ttm_placement *placement,
985 uint32_t alignment, bool interruptible,
986 struct sg_table *sg, struct dma_resv *resv,
987 void (*destroy) (struct ttm_buffer_object *))
988 {
989 struct ttm_operation_ctx ctx = { interruptible, false };
990 int ret;
991
992 ret = ttm_bo_init_reserved(bdev, bo, type, placement, alignment, &ctx,
993 sg, resv, destroy);
994 if (ret)
995 return ret;
996
997 if (!resv)
998 ttm_bo_unreserve(bo);
999
1000 return 0;
1001 }
1002 EXPORT_SYMBOL(ttm_bo_init_validate);
1003
1004 /*
1005 * buffer object vm functions.
1006 */
1007
1008 /**
1009 * ttm_bo_unmap_virtual
1010 *
1011 * @bo: tear down the virtual mappings for this BO
1012 */
ttm_bo_unmap_virtual(struct ttm_buffer_object * bo)1013 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1014 {
1015 struct ttm_device *bdev = bo->bdev;
1016
1017 drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
1018 ttm_mem_io_free(bdev, bo->resource);
1019 }
1020 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1021
1022 /**
1023 * ttm_bo_wait_ctx - wait for buffer idle.
1024 *
1025 * @bo: The buffer object.
1026 * @ctx: defines how to wait
1027 *
1028 * Waits for the buffer to be idle. Used timeout depends on the context.
1029 * Returns -EBUSY if wait timed outt, -ERESTARTSYS if interrupted by a signal or
1030 * zero on success.
1031 */
ttm_bo_wait_ctx(struct ttm_buffer_object * bo,struct ttm_operation_ctx * ctx)1032 int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
1033 {
1034 long ret;
1035
1036 if (ctx->no_wait_gpu) {
1037 if (dma_resv_test_signaled(bo->base.resv,
1038 DMA_RESV_USAGE_BOOKKEEP))
1039 return 0;
1040 else
1041 return -EBUSY;
1042 }
1043
1044 ret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
1045 ctx->interruptible, 15 * HZ);
1046 if (unlikely(ret < 0))
1047 return ret;
1048 if (unlikely(ret == 0))
1049 return -EBUSY;
1050 return 0;
1051 }
1052 EXPORT_SYMBOL(ttm_bo_wait_ctx);
1053
1054 /**
1055 * struct ttm_bo_swapout_walk - Parameters for the swapout walk
1056 */
1057 struct ttm_bo_swapout_walk {
1058 /** @walk: The walk base parameters. */
1059 struct ttm_lru_walk walk;
1060 /** @gfp_flags: The gfp flags to use for ttm_tt_swapout() */
1061 gfp_t gfp_flags;
1062 };
1063
1064 static s64
ttm_bo_swapout_cb(struct ttm_lru_walk * walk,struct ttm_buffer_object * bo)1065 ttm_bo_swapout_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo)
1066 {
1067 struct ttm_place place = {.mem_type = bo->resource->mem_type};
1068 struct ttm_bo_swapout_walk *swapout_walk =
1069 container_of(walk, typeof(*swapout_walk), walk);
1070 struct ttm_operation_ctx *ctx = walk->ctx;
1071 s64 ret;
1072
1073 /*
1074 * While the bo may already reside in SYSTEM placement, set
1075 * SYSTEM as new placement to cover also the move further below.
1076 * The driver may use the fact that we're moving from SYSTEM
1077 * as an indication that we're about to swap out.
1078 */
1079 if (bo->pin_count || !bo->bdev->funcs->eviction_valuable(bo, &place)) {
1080 ret = -EBUSY;
1081 goto out;
1082 }
1083
1084 if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
1085 bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL ||
1086 bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED) {
1087 ret = -EBUSY;
1088 goto out;
1089 }
1090
1091 if (bo->deleted) {
1092 pgoff_t num_pages = bo->ttm->num_pages;
1093
1094 ret = ttm_bo_wait_ctx(bo, ctx);
1095 if (ret)
1096 goto out;
1097
1098 ttm_bo_cleanup_memtype_use(bo);
1099 ret = num_pages;
1100 goto out;
1101 }
1102
1103 /*
1104 * Move to system cached
1105 */
1106 if (bo->resource->mem_type != TTM_PL_SYSTEM) {
1107 struct ttm_resource *evict_mem;
1108 struct ttm_place hop;
1109
1110 memset(&hop, 0, sizeof(hop));
1111 place.mem_type = TTM_PL_SYSTEM;
1112 ret = ttm_resource_alloc(bo, &place, &evict_mem);
1113 if (ret)
1114 goto out;
1115
1116 ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop);
1117 if (ret) {
1118 WARN(ret == -EMULTIHOP,
1119 "Unexpected multihop in swapout - likely driver bug.\n");
1120 ttm_resource_free(bo, &evict_mem);
1121 goto out;
1122 }
1123 }
1124
1125 /*
1126 * Make sure BO is idle.
1127 */
1128 ret = ttm_bo_wait_ctx(bo, ctx);
1129 if (ret)
1130 goto out;
1131
1132 ttm_bo_unmap_virtual(bo);
1133 if (bo->bdev->funcs->swap_notify)
1134 bo->bdev->funcs->swap_notify(bo);
1135
1136 if (ttm_tt_is_populated(bo->ttm))
1137 ret = ttm_tt_swapout(bo->bdev, bo->ttm, swapout_walk->gfp_flags);
1138
1139 out:
1140 /* Consider -ENOMEM and -ENOSPC non-fatal. */
1141 if (ret == -ENOMEM || ret == -ENOSPC)
1142 ret = -EBUSY;
1143
1144 return ret;
1145 }
1146
1147 const struct ttm_lru_walk_ops ttm_swap_ops = {
1148 .process_bo = ttm_bo_swapout_cb,
1149 };
1150
1151 /**
1152 * ttm_bo_swapout() - Swap out buffer objects on the LRU list to shmem.
1153 * @bdev: The ttm device.
1154 * @ctx: The ttm_operation_ctx governing the swapout operation.
1155 * @man: The resource manager whose resources / buffer objects are
1156 * goint to be swapped out.
1157 * @gfp_flags: The gfp flags used for shmem page allocations.
1158 * @target: The desired number of bytes to swap out.
1159 *
1160 * Return: The number of bytes actually swapped out, or negative error code
1161 * on error.
1162 */
ttm_bo_swapout(struct ttm_device * bdev,struct ttm_operation_ctx * ctx,struct ttm_resource_manager * man,gfp_t gfp_flags,s64 target)1163 s64 ttm_bo_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
1164 struct ttm_resource_manager *man, gfp_t gfp_flags,
1165 s64 target)
1166 {
1167 struct ttm_bo_swapout_walk swapout_walk = {
1168 .walk = {
1169 .ops = &ttm_swap_ops,
1170 .ctx = ctx,
1171 .trylock_only = true,
1172 },
1173 .gfp_flags = gfp_flags,
1174 };
1175
1176 return ttm_lru_walk_for_evict(&swapout_walk.walk, bdev, man, target);
1177 }
1178
ttm_bo_tt_destroy(struct ttm_buffer_object * bo)1179 void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
1180 {
1181 if (bo->ttm == NULL)
1182 return;
1183
1184 ttm_tt_unpopulate(bo->bdev, bo->ttm);
1185 ttm_tt_destroy(bo->bdev, bo->ttm);
1186 bo->ttm = NULL;
1187 }
1188