1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /**************************************************************************
3 *
4 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28 /*
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30 */
31
32 #include <linux/vmalloc.h>
33
34 #include <drm/ttm/ttm_bo.h>
35 #include <drm/ttm/ttm_placement.h>
36 #include <drm/ttm/ttm_tt.h>
37
38 #include <drm/drm_cache.h>
39
40 struct ttm_transfer_obj {
41 struct ttm_buffer_object base;
42 struct ttm_buffer_object *bo;
43 };
44
ttm_mem_io_reserve(struct ttm_device * bdev,struct ttm_resource * mem)45 int ttm_mem_io_reserve(struct ttm_device *bdev,
46 struct ttm_resource *mem)
47 {
48 if (mem->bus.offset || mem->bus.addr)
49 return 0;
50
51 mem->bus.is_iomem = false;
52 if (!bdev->funcs->io_mem_reserve)
53 return 0;
54
55 return bdev->funcs->io_mem_reserve(bdev, mem);
56 }
57
ttm_mem_io_free(struct ttm_device * bdev,struct ttm_resource * mem)58 void ttm_mem_io_free(struct ttm_device *bdev,
59 struct ttm_resource *mem)
60 {
61 if (!mem)
62 return;
63
64 if (!mem->bus.offset && !mem->bus.addr)
65 return;
66
67 if (bdev->funcs->io_mem_free)
68 bdev->funcs->io_mem_free(bdev, mem);
69
70 mem->bus.offset = 0;
71 mem->bus.addr = NULL;
72 }
73
74 /**
75 * ttm_move_memcpy - Helper to perform a memcpy ttm move operation.
76 * @clear: Whether to clear rather than copy.
77 * @num_pages: Number of pages of the operation.
78 * @dst_iter: A struct ttm_kmap_iter representing the destination resource.
79 * @src_iter: A struct ttm_kmap_iter representing the source resource.
80 *
81 * This function is intended to be able to move out async under a
82 * dma-fence if desired.
83 */
ttm_move_memcpy(bool clear,u32 num_pages,struct ttm_kmap_iter * dst_iter,struct ttm_kmap_iter * src_iter)84 void ttm_move_memcpy(bool clear,
85 u32 num_pages,
86 struct ttm_kmap_iter *dst_iter,
87 struct ttm_kmap_iter *src_iter)
88 {
89 const struct ttm_kmap_iter_ops *dst_ops = dst_iter->ops;
90 const struct ttm_kmap_iter_ops *src_ops = src_iter->ops;
91 struct iosys_map src_map, dst_map;
92 pgoff_t i;
93
94 /* Single TTM move. NOP */
95 if (dst_ops->maps_tt && src_ops->maps_tt)
96 return;
97
98 /* Don't move nonexistent data. Clear destination instead. */
99 if (clear) {
100 for (i = 0; i < num_pages; ++i) {
101 dst_ops->map_local(dst_iter, &dst_map, i);
102 if (dst_map.is_iomem)
103 memset_io(dst_map.vaddr_iomem, 0, PAGE_SIZE);
104 else
105 memset(dst_map.vaddr, 0, PAGE_SIZE);
106 if (dst_ops->unmap_local)
107 dst_ops->unmap_local(dst_iter, &dst_map);
108 }
109 return;
110 }
111
112 for (i = 0; i < num_pages; ++i) {
113 dst_ops->map_local(dst_iter, &dst_map, i);
114 src_ops->map_local(src_iter, &src_map, i);
115
116 drm_memcpy_from_wc(&dst_map, &src_map, PAGE_SIZE);
117
118 if (src_ops->unmap_local)
119 src_ops->unmap_local(src_iter, &src_map);
120 if (dst_ops->unmap_local)
121 dst_ops->unmap_local(dst_iter, &dst_map);
122 }
123 }
124 EXPORT_SYMBOL(ttm_move_memcpy);
125
126 /**
127 * ttm_bo_move_memcpy
128 *
129 * @bo: A pointer to a struct ttm_buffer_object.
130 * @ctx: operation context
131 * @dst_mem: struct ttm_resource indicating where to move.
132 *
133 * Fallback move function for a mappable buffer object in mappable memory.
134 * The function will, if successful,
135 * free any old aperture space, and set (@new_mem)->mm_node to NULL,
136 * and update the (@bo)->mem placement flags. If unsuccessful, the old
137 * data remains untouched, and it's up to the caller to free the
138 * memory space indicated by @new_mem.
139 * Returns:
140 * !0: Failure.
141 */
ttm_bo_move_memcpy(struct ttm_buffer_object * bo,struct ttm_operation_ctx * ctx,struct ttm_resource * dst_mem)142 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
143 struct ttm_operation_ctx *ctx,
144 struct ttm_resource *dst_mem)
145 {
146 struct ttm_device *bdev = bo->bdev;
147 struct ttm_resource_manager *dst_man =
148 ttm_manager_type(bo->bdev, dst_mem->mem_type);
149 struct ttm_tt *ttm = bo->ttm;
150 struct ttm_resource *src_mem = bo->resource;
151 struct ttm_resource_manager *src_man;
152 union {
153 struct ttm_kmap_iter_tt tt;
154 struct ttm_kmap_iter_linear_io io;
155 } _dst_iter, _src_iter;
156 struct ttm_kmap_iter *dst_iter, *src_iter;
157 bool clear;
158 int ret = 0;
159
160 if (WARN_ON(!src_mem))
161 return -EINVAL;
162
163 src_man = ttm_manager_type(bdev, src_mem->mem_type);
164 if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) ||
165 dst_man->use_tt)) {
166 ret = ttm_tt_populate(bdev, ttm, ctx);
167 if (ret)
168 return ret;
169 }
170
171 dst_iter = ttm_kmap_iter_linear_io_init(&_dst_iter.io, bdev, dst_mem);
172 if (PTR_ERR(dst_iter) == -EINVAL && dst_man->use_tt)
173 dst_iter = ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm);
174 if (IS_ERR(dst_iter))
175 return PTR_ERR(dst_iter);
176
177 src_iter = ttm_kmap_iter_linear_io_init(&_src_iter.io, bdev, src_mem);
178 if (PTR_ERR(src_iter) == -EINVAL && src_man->use_tt)
179 src_iter = ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm);
180 if (IS_ERR(src_iter)) {
181 ret = PTR_ERR(src_iter);
182 goto out_src_iter;
183 }
184
185 clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm));
186 if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)))
187 ttm_move_memcpy(clear, PFN_UP(dst_mem->size), dst_iter, src_iter);
188
189 if (!src_iter->ops->maps_tt)
190 ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
191 ttm_bo_move_sync_cleanup(bo, dst_mem);
192
193 out_src_iter:
194 if (!dst_iter->ops->maps_tt)
195 ttm_kmap_iter_linear_io_fini(&_dst_iter.io, bdev, dst_mem);
196
197 return ret;
198 }
199 EXPORT_SYMBOL(ttm_bo_move_memcpy);
200
ttm_transfered_destroy(struct ttm_buffer_object * bo)201 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
202 {
203 struct ttm_transfer_obj *fbo;
204
205 fbo = container_of(bo, struct ttm_transfer_obj, base);
206 dma_resv_fini(&fbo->base.base._resv);
207 ttm_bo_put(fbo->bo);
208 kfree(fbo);
209 }
210
211 /**
212 * ttm_buffer_object_transfer
213 *
214 * @bo: A pointer to a struct ttm_buffer_object.
215 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
216 * holding the data of @bo with the old placement.
217 *
218 * This is a utility function that may be called after an accelerated move
219 * has been scheduled. A new buffer object is created as a placeholder for
220 * the old data while it's being copied. When that buffer object is idle,
221 * it can be destroyed, releasing the space of the old placement.
222 * Returns:
223 * !0: Failure.
224 */
225
ttm_buffer_object_transfer(struct ttm_buffer_object * bo,struct ttm_buffer_object ** new_obj)226 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
227 struct ttm_buffer_object **new_obj)
228 {
229 struct ttm_transfer_obj *fbo;
230 int ret;
231
232 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
233 if (!fbo)
234 return -ENOMEM;
235
236 fbo->base = *bo;
237
238 /**
239 * Fix up members that we shouldn't copy directly:
240 * TODO: Explicit member copy would probably be better here.
241 */
242
243 atomic_inc(&ttm_glob.bo_count);
244 drm_vma_node_reset(&fbo->base.base.vma_node);
245
246 kref_init(&fbo->base.kref);
247 fbo->base.destroy = &ttm_transfered_destroy;
248 fbo->base.pin_count = 0;
249 if (bo->type != ttm_bo_type_sg)
250 fbo->base.base.resv = &fbo->base.base._resv;
251
252 dma_resv_init(&fbo->base.base._resv);
253 fbo->base.base.dev = NULL;
254 ret = dma_resv_trylock(&fbo->base.base._resv);
255 WARN_ON(!ret);
256
257 ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1);
258 if (ret) {
259 dma_resv_unlock(&fbo->base.base._resv);
260 kfree(fbo);
261 return ret;
262 }
263
264 if (fbo->base.resource) {
265 ttm_resource_set_bo(fbo->base.resource, &fbo->base);
266 bo->resource = NULL;
267 ttm_bo_set_bulk_move(&fbo->base, NULL);
268 } else {
269 fbo->base.bulk_move = NULL;
270 }
271
272 ttm_bo_get(bo);
273 fbo->bo = bo;
274
275 ttm_bo_move_to_lru_tail_unlocked(&fbo->base);
276
277 *new_obj = &fbo->base;
278 return 0;
279 }
280
281 /**
282 * ttm_io_prot
283 *
284 * @bo: ttm buffer object
285 * @res: ttm resource object
286 * @tmp: Page protection flag for a normal, cached mapping.
287 *
288 * Utility function that returns the pgprot_t that should be used for
289 * setting up a PTE with the caching model indicated by @c_state.
290 */
ttm_io_prot(struct ttm_buffer_object * bo,struct ttm_resource * res,pgprot_t tmp)291 pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
292 pgprot_t tmp)
293 {
294 struct ttm_resource_manager *man;
295 enum ttm_caching caching;
296
297 man = ttm_manager_type(bo->bdev, res->mem_type);
298 if (man->use_tt) {
299 caching = bo->ttm->caching;
300 if (bo->ttm->page_flags & TTM_TT_FLAG_DECRYPTED)
301 tmp = pgprot_decrypted(tmp);
302 } else {
303 caching = res->bus.caching;
304 }
305
306 return ttm_prot_from_caching(caching, tmp);
307 }
308 EXPORT_SYMBOL(ttm_io_prot);
309
ttm_bo_ioremap(struct ttm_buffer_object * bo,unsigned long offset,unsigned long size,struct ttm_bo_kmap_obj * map)310 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
311 unsigned long offset,
312 unsigned long size,
313 struct ttm_bo_kmap_obj *map)
314 {
315 struct ttm_resource *mem = bo->resource;
316
317 if (bo->resource->bus.addr) {
318 map->bo_kmap_type = ttm_bo_map_premapped;
319 map->virtual = ((u8 *)bo->resource->bus.addr) + offset;
320 } else {
321 resource_size_t res = bo->resource->bus.offset + offset;
322
323 map->bo_kmap_type = ttm_bo_map_iomap;
324 if (mem->bus.caching == ttm_write_combined)
325 map->virtual = ioremap_wc(res, size);
326 #ifdef CONFIG_X86
327 else if (mem->bus.caching == ttm_cached)
328 map->virtual = ioremap_cache(res, size);
329 #endif
330 else
331 map->virtual = ioremap(res, size);
332 }
333 return (!map->virtual) ? -ENOMEM : 0;
334 }
335
ttm_bo_kmap_ttm(struct ttm_buffer_object * bo,unsigned long start_page,unsigned long num_pages,struct ttm_bo_kmap_obj * map)336 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
337 unsigned long start_page,
338 unsigned long num_pages,
339 struct ttm_bo_kmap_obj *map)
340 {
341 struct ttm_resource *mem = bo->resource;
342 struct ttm_operation_ctx ctx = {
343 .interruptible = false,
344 .no_wait_gpu = false
345 };
346 struct ttm_tt *ttm = bo->ttm;
347 struct ttm_resource_manager *man =
348 ttm_manager_type(bo->bdev, bo->resource->mem_type);
349 pgprot_t prot;
350 int ret;
351
352 BUG_ON(!ttm);
353
354 ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
355 if (ret)
356 return ret;
357
358 if (num_pages == 1 && ttm->caching == ttm_cached &&
359 !(man->use_tt && (ttm->page_flags & TTM_TT_FLAG_DECRYPTED))) {
360 /*
361 * We're mapping a single page, and the desired
362 * page protection is consistent with the bo.
363 */
364
365 map->bo_kmap_type = ttm_bo_map_kmap;
366 map->page = ttm->pages[start_page];
367 map->virtual = kmap(map->page);
368 } else {
369 /*
370 * We need to use vmap to get the desired page protection
371 * or to make the buffer object look contiguous.
372 */
373 prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
374 map->bo_kmap_type = ttm_bo_map_vmap;
375 map->virtual = vmap(ttm->pages + start_page, num_pages,
376 0, prot);
377 }
378 return (!map->virtual) ? -ENOMEM : 0;
379 }
380
381 /**
382 * ttm_bo_kmap
383 *
384 * @bo: The buffer object.
385 * @start_page: The first page to map.
386 * @num_pages: Number of pages to map.
387 * @map: pointer to a struct ttm_bo_kmap_obj representing the map.
388 *
389 * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
390 * data in the buffer object. The ttm_kmap_obj_virtual function can then be
391 * used to obtain a virtual address to the data.
392 *
393 * Returns
394 * -ENOMEM: Out of memory.
395 * -EINVAL: Invalid range.
396 */
ttm_bo_kmap(struct ttm_buffer_object * bo,unsigned long start_page,unsigned long num_pages,struct ttm_bo_kmap_obj * map)397 int ttm_bo_kmap(struct ttm_buffer_object *bo,
398 unsigned long start_page, unsigned long num_pages,
399 struct ttm_bo_kmap_obj *map)
400 {
401 unsigned long offset, size;
402 int ret;
403
404 map->virtual = NULL;
405 map->bo = bo;
406 if (num_pages > PFN_UP(bo->resource->size))
407 return -EINVAL;
408 if ((start_page + num_pages) > PFN_UP(bo->resource->size))
409 return -EINVAL;
410
411 ret = ttm_mem_io_reserve(bo->bdev, bo->resource);
412 if (ret)
413 return ret;
414 if (!bo->resource->bus.is_iomem) {
415 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
416 } else {
417 offset = start_page << PAGE_SHIFT;
418 size = num_pages << PAGE_SHIFT;
419 return ttm_bo_ioremap(bo, offset, size, map);
420 }
421 }
422 EXPORT_SYMBOL(ttm_bo_kmap);
423
424 /**
425 * ttm_bo_kunmap
426 *
427 * @map: Object describing the map to unmap.
428 *
429 * Unmaps a kernel map set up by ttm_bo_kmap.
430 */
ttm_bo_kunmap(struct ttm_bo_kmap_obj * map)431 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
432 {
433 if (!map->virtual)
434 return;
435 switch (map->bo_kmap_type) {
436 case ttm_bo_map_iomap:
437 iounmap(map->virtual);
438 break;
439 case ttm_bo_map_vmap:
440 vunmap(map->virtual);
441 break;
442 case ttm_bo_map_kmap:
443 kunmap(map->page);
444 break;
445 case ttm_bo_map_premapped:
446 break;
447 default:
448 BUG();
449 }
450 ttm_mem_io_free(map->bo->bdev, map->bo->resource);
451 map->virtual = NULL;
452 map->page = NULL;
453 }
454 EXPORT_SYMBOL(ttm_bo_kunmap);
455
456 /**
457 * ttm_bo_vmap
458 *
459 * @bo: The buffer object.
460 * @map: pointer to a struct iosys_map representing the map.
461 *
462 * Sets up a kernel virtual mapping, using ioremap or vmap to the
463 * data in the buffer object. The parameter @map returns the virtual
464 * address as struct iosys_map. Unmap the buffer with ttm_bo_vunmap().
465 *
466 * Returns
467 * -ENOMEM: Out of memory.
468 * -EINVAL: Invalid range.
469 */
ttm_bo_vmap(struct ttm_buffer_object * bo,struct iosys_map * map)470 int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map)
471 {
472 struct ttm_resource *mem = bo->resource;
473 int ret;
474
475 dma_resv_assert_held(bo->base.resv);
476
477 ret = ttm_mem_io_reserve(bo->bdev, mem);
478 if (ret)
479 return ret;
480
481 if (mem->bus.is_iomem) {
482 void __iomem *vaddr_iomem;
483
484 if (mem->bus.addr)
485 vaddr_iomem = (void __iomem *)mem->bus.addr;
486 else if (mem->bus.caching == ttm_write_combined)
487 vaddr_iomem = ioremap_wc(mem->bus.offset,
488 bo->base.size);
489 #ifdef CONFIG_X86
490 else if (mem->bus.caching == ttm_cached)
491 vaddr_iomem = ioremap_cache(mem->bus.offset,
492 bo->base.size);
493 #endif
494 else
495 vaddr_iomem = ioremap(mem->bus.offset, bo->base.size);
496
497 if (!vaddr_iomem)
498 return -ENOMEM;
499
500 iosys_map_set_vaddr_iomem(map, vaddr_iomem);
501
502 } else {
503 struct ttm_operation_ctx ctx = {
504 .interruptible = false,
505 .no_wait_gpu = false
506 };
507 struct ttm_tt *ttm = bo->ttm;
508 pgprot_t prot;
509 void *vaddr;
510
511 ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
512 if (ret)
513 return ret;
514
515 /*
516 * We need to use vmap to get the desired page protection
517 * or to make the buffer object look contiguous.
518 */
519 prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
520 vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot);
521 if (!vaddr)
522 return -ENOMEM;
523
524 iosys_map_set_vaddr(map, vaddr);
525 }
526
527 return 0;
528 }
529 EXPORT_SYMBOL(ttm_bo_vmap);
530
531 /**
532 * ttm_bo_vunmap
533 *
534 * @bo: The buffer object.
535 * @map: Object describing the map to unmap.
536 *
537 * Unmaps a kernel map set up by ttm_bo_vmap().
538 */
ttm_bo_vunmap(struct ttm_buffer_object * bo,struct iosys_map * map)539 void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map)
540 {
541 struct ttm_resource *mem = bo->resource;
542
543 dma_resv_assert_held(bo->base.resv);
544
545 if (iosys_map_is_null(map))
546 return;
547
548 if (!map->is_iomem)
549 vunmap(map->vaddr);
550 else if (!mem->bus.addr)
551 iounmap(map->vaddr_iomem);
552 iosys_map_clear(map);
553
554 ttm_mem_io_free(bo->bdev, bo->resource);
555 }
556 EXPORT_SYMBOL(ttm_bo_vunmap);
557
ttm_bo_wait_free_node(struct ttm_buffer_object * bo,bool dst_use_tt)558 static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
559 bool dst_use_tt)
560 {
561 long ret;
562
563 ret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
564 false, 15 * HZ);
565 if (ret == 0)
566 return -EBUSY;
567 if (ret < 0)
568 return ret;
569
570 if (!dst_use_tt)
571 ttm_bo_tt_destroy(bo);
572 ttm_resource_free(bo, &bo->resource);
573 return 0;
574 }
575
ttm_bo_move_to_ghost(struct ttm_buffer_object * bo,struct dma_fence * fence,bool dst_use_tt)576 static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
577 struct dma_fence *fence,
578 bool dst_use_tt)
579 {
580 struct ttm_buffer_object *ghost_obj;
581 int ret;
582
583 /**
584 * This should help pipeline ordinary buffer moves.
585 *
586 * Hang old buffer memory on a new buffer object,
587 * and leave it to be released when the GPU
588 * operation has completed.
589 */
590
591 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
592 if (ret)
593 return ret;
594
595 dma_resv_add_fence(&ghost_obj->base._resv, fence,
596 DMA_RESV_USAGE_KERNEL);
597
598 /**
599 * If we're not moving to fixed memory, the TTM object
600 * needs to stay alive. Otherwhise hang it on the ghost
601 * bo to be unbound and destroyed.
602 */
603
604 if (dst_use_tt)
605 ghost_obj->ttm = NULL;
606 else
607 bo->ttm = NULL;
608
609 dma_resv_unlock(&ghost_obj->base._resv);
610 ttm_bo_put(ghost_obj);
611 return 0;
612 }
613
ttm_bo_move_pipeline_evict(struct ttm_buffer_object * bo,struct dma_fence * fence)614 static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
615 struct dma_fence *fence)
616 {
617 struct ttm_device *bdev = bo->bdev;
618 struct ttm_resource_manager *from;
619
620 from = ttm_manager_type(bdev, bo->resource->mem_type);
621
622 /**
623 * BO doesn't have a TTM we need to bind/unbind. Just remember
624 * this eviction and free up the allocation
625 */
626 spin_lock(&from->move_lock);
627 if (!from->move || dma_fence_is_later(fence, from->move)) {
628 dma_fence_put(from->move);
629 from->move = dma_fence_get(fence);
630 }
631 spin_unlock(&from->move_lock);
632
633 ttm_resource_free(bo, &bo->resource);
634 }
635
636 /**
637 * ttm_bo_move_accel_cleanup - cleanup helper for hw copies
638 *
639 * @bo: A pointer to a struct ttm_buffer_object.
640 * @fence: A fence object that signals when moving is complete.
641 * @evict: This is an evict move. Don't return until the buffer is idle.
642 * @pipeline: evictions are to be pipelined.
643 * @new_mem: struct ttm_resource indicating where to move.
644 *
645 * Accelerated move function to be called when an accelerated move
646 * has been scheduled. The function will create a new temporary buffer object
647 * representing the old placement, and put the sync object on both buffer
648 * objects. After that the newly created buffer object is unref'd to be
649 * destroyed when the move is complete. This will help pipeline
650 * buffer moves.
651 */
ttm_bo_move_accel_cleanup(struct ttm_buffer_object * bo,struct dma_fence * fence,bool evict,bool pipeline,struct ttm_resource * new_mem)652 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
653 struct dma_fence *fence,
654 bool evict,
655 bool pipeline,
656 struct ttm_resource *new_mem)
657 {
658 struct ttm_device *bdev = bo->bdev;
659 struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->resource->mem_type);
660 struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
661 int ret = 0;
662
663 dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
664 if (!evict)
665 ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
666 else if (!from->use_tt && pipeline)
667 ttm_bo_move_pipeline_evict(bo, fence);
668 else
669 ret = ttm_bo_wait_free_node(bo, man->use_tt);
670
671 if (ret)
672 return ret;
673
674 ttm_bo_assign_mem(bo, new_mem);
675
676 return 0;
677 }
678 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
679
680 /**
681 * ttm_bo_move_sync_cleanup - cleanup by waiting for the move to finish
682 *
683 * @bo: A pointer to a struct ttm_buffer_object.
684 * @new_mem: struct ttm_resource indicating where to move.
685 *
686 * Special case of ttm_bo_move_accel_cleanup where the bo is guaranteed
687 * by the caller to be idle. Typically used after memcpy buffer moves.
688 */
ttm_bo_move_sync_cleanup(struct ttm_buffer_object * bo,struct ttm_resource * new_mem)689 void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
690 struct ttm_resource *new_mem)
691 {
692 struct ttm_device *bdev = bo->bdev;
693 struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
694 int ret;
695
696 ret = ttm_bo_wait_free_node(bo, man->use_tt);
697 if (WARN_ON(ret))
698 return;
699
700 ttm_bo_assign_mem(bo, new_mem);
701 }
702 EXPORT_SYMBOL(ttm_bo_move_sync_cleanup);
703
704 /**
705 * ttm_bo_pipeline_gutting - purge the contents of a bo
706 * @bo: The buffer object
707 *
708 * Purge the contents of a bo, async if the bo is not idle.
709 * After a successful call, the bo is left unpopulated in
710 * system placement. The function may wait uninterruptible
711 * for idle on OOM.
712 *
713 * Return: 0 if successful, negative error code on failure.
714 */
ttm_bo_pipeline_gutting(struct ttm_buffer_object * bo)715 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
716 {
717 struct ttm_buffer_object *ghost;
718 struct ttm_tt *ttm;
719 int ret;
720
721 /* If already idle, no need for ghost object dance. */
722 if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP)) {
723 if (!bo->ttm) {
724 /* See comment below about clearing. */
725 ret = ttm_tt_create(bo, true);
726 if (ret)
727 return ret;
728 } else {
729 ttm_tt_unpopulate(bo->bdev, bo->ttm);
730 if (bo->type == ttm_bo_type_device)
731 ttm_tt_mark_for_clear(bo->ttm);
732 }
733 ttm_resource_free(bo, &bo->resource);
734 return 0;
735 }
736
737 /*
738 * We need an unpopulated ttm_tt after giving our current one,
739 * if any, to the ghost object. And we can't afford to fail
740 * creating one *after* the operation. If the bo subsequently gets
741 * resurrected, make sure it's cleared (if ttm_bo_type_device)
742 * to avoid leaking sensitive information to user-space.
743 */
744
745 ttm = bo->ttm;
746 bo->ttm = NULL;
747 ret = ttm_tt_create(bo, true);
748 swap(bo->ttm, ttm);
749 if (ret)
750 return ret;
751
752 ret = ttm_buffer_object_transfer(bo, &ghost);
753 if (ret)
754 goto error_destroy_tt;
755
756 ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
757 /* Last resort, wait for the BO to be idle when we are OOM */
758 if (ret) {
759 dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
760 false, MAX_SCHEDULE_TIMEOUT);
761 }
762
763 dma_resv_unlock(&ghost->base._resv);
764 ttm_bo_put(ghost);
765 bo->ttm = ttm;
766 return 0;
767
768 error_destroy_tt:
769 ttm_tt_destroy(bo->bdev, ttm);
770 return ret;
771 }
772