1 /**************************************************************************
2 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27 /*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31 #include <drm/ttm/ttm_bo_driver.h>
32 #include <drm/ttm/ttm_placement.h>
33 #include <drm/drm_vma_manager.h>
34 #include <linux/io.h>
35 #include <linux/highmem.h>
36 #include <linux/wait.h>
37 #include <linux/slab.h>
38 #include <linux/vmalloc.h>
39 #include <linux/module.h>
40 #include <linux/reservation.h>
41
ttm_bo_free_old_node(struct ttm_buffer_object * bo)42 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
43 {
44 ttm_bo_mem_put(bo, &bo->mem);
45 }
46
ttm_bo_move_ttm(struct ttm_buffer_object * bo,bool interruptible,bool no_wait_gpu,struct ttm_mem_reg * new_mem)47 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
48 bool interruptible, bool no_wait_gpu,
49 struct ttm_mem_reg *new_mem)
50 {
51 struct ttm_tt *ttm = bo->ttm;
52 struct ttm_mem_reg *old_mem = &bo->mem;
53 int ret;
54
55 if (old_mem->mem_type != TTM_PL_SYSTEM) {
56 ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
57
58 if (unlikely(ret != 0)) {
59 if (ret != -ERESTARTSYS)
60 pr_err("Failed to expire sync object before unbinding TTM\n");
61 return ret;
62 }
63
64 ttm_tt_unbind(ttm);
65 ttm_bo_free_old_node(bo);
66 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
67 TTM_PL_MASK_MEM);
68 old_mem->mem_type = TTM_PL_SYSTEM;
69 }
70
71 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
72 if (unlikely(ret != 0))
73 return ret;
74
75 if (new_mem->mem_type != TTM_PL_SYSTEM) {
76 ret = ttm_tt_bind(ttm, new_mem);
77 if (unlikely(ret != 0))
78 return ret;
79 }
80
81 *old_mem = *new_mem;
82 new_mem->mm_node = NULL;
83
84 return 0;
85 }
86 EXPORT_SYMBOL(ttm_bo_move_ttm);
87
ttm_mem_io_lock(struct ttm_mem_type_manager * man,bool interruptible)88 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
89 {
90 if (likely(man->io_reserve_fastpath))
91 return 0;
92
93 if (interruptible)
94 return mutex_lock_interruptible(&man->io_reserve_mutex);
95
96 mutex_lock(&man->io_reserve_mutex);
97 return 0;
98 }
99 EXPORT_SYMBOL(ttm_mem_io_lock);
100
ttm_mem_io_unlock(struct ttm_mem_type_manager * man)101 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
102 {
103 if (likely(man->io_reserve_fastpath))
104 return;
105
106 mutex_unlock(&man->io_reserve_mutex);
107 }
108 EXPORT_SYMBOL(ttm_mem_io_unlock);
109
ttm_mem_io_evict(struct ttm_mem_type_manager * man)110 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
111 {
112 struct ttm_buffer_object *bo;
113
114 if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
115 return -EAGAIN;
116
117 bo = list_first_entry(&man->io_reserve_lru,
118 struct ttm_buffer_object,
119 io_reserve_lru);
120 list_del_init(&bo->io_reserve_lru);
121 ttm_bo_unmap_virtual_locked(bo);
122
123 return 0;
124 }
125
126
ttm_mem_io_reserve(struct ttm_bo_device * bdev,struct ttm_mem_reg * mem)127 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
128 struct ttm_mem_reg *mem)
129 {
130 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
131 int ret = 0;
132
133 if (!bdev->driver->io_mem_reserve)
134 return 0;
135 if (likely(man->io_reserve_fastpath))
136 return bdev->driver->io_mem_reserve(bdev, mem);
137
138 if (bdev->driver->io_mem_reserve &&
139 mem->bus.io_reserved_count++ == 0) {
140 retry:
141 ret = bdev->driver->io_mem_reserve(bdev, mem);
142 if (ret == -EAGAIN) {
143 ret = ttm_mem_io_evict(man);
144 if (ret == 0)
145 goto retry;
146 }
147 }
148 return ret;
149 }
150 EXPORT_SYMBOL(ttm_mem_io_reserve);
151
ttm_mem_io_free(struct ttm_bo_device * bdev,struct ttm_mem_reg * mem)152 void ttm_mem_io_free(struct ttm_bo_device *bdev,
153 struct ttm_mem_reg *mem)
154 {
155 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
156
157 if (likely(man->io_reserve_fastpath))
158 return;
159
160 if (bdev->driver->io_mem_reserve &&
161 --mem->bus.io_reserved_count == 0 &&
162 bdev->driver->io_mem_free)
163 bdev->driver->io_mem_free(bdev, mem);
164
165 }
166 EXPORT_SYMBOL(ttm_mem_io_free);
167
ttm_mem_io_reserve_vm(struct ttm_buffer_object * bo)168 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
169 {
170 struct ttm_mem_reg *mem = &bo->mem;
171 int ret;
172
173 if (!mem->bus.io_reserved_vm) {
174 struct ttm_mem_type_manager *man =
175 &bo->bdev->man[mem->mem_type];
176
177 ret = ttm_mem_io_reserve(bo->bdev, mem);
178 if (unlikely(ret != 0))
179 return ret;
180 mem->bus.io_reserved_vm = true;
181 if (man->use_io_reserve_lru)
182 list_add_tail(&bo->io_reserve_lru,
183 &man->io_reserve_lru);
184 }
185 return 0;
186 }
187
ttm_mem_io_free_vm(struct ttm_buffer_object * bo)188 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
189 {
190 struct ttm_mem_reg *mem = &bo->mem;
191
192 if (mem->bus.io_reserved_vm) {
193 mem->bus.io_reserved_vm = false;
194 list_del_init(&bo->io_reserve_lru);
195 ttm_mem_io_free(bo->bdev, mem);
196 }
197 }
198
ttm_mem_reg_ioremap(struct ttm_bo_device * bdev,struct ttm_mem_reg * mem,void ** virtual)199 static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
200 void **virtual)
201 {
202 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
203 int ret;
204 void *addr;
205
206 *virtual = NULL;
207 (void) ttm_mem_io_lock(man, false);
208 ret = ttm_mem_io_reserve(bdev, mem);
209 ttm_mem_io_unlock(man);
210 if (ret || !mem->bus.is_iomem)
211 return ret;
212
213 if (mem->bus.addr) {
214 addr = mem->bus.addr;
215 } else {
216 if (mem->placement & TTM_PL_FLAG_WC)
217 addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
218 else
219 addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
220 if (!addr) {
221 (void) ttm_mem_io_lock(man, false);
222 ttm_mem_io_free(bdev, mem);
223 ttm_mem_io_unlock(man);
224 return -ENOMEM;
225 }
226 }
227 *virtual = addr;
228 return 0;
229 }
230
ttm_mem_reg_iounmap(struct ttm_bo_device * bdev,struct ttm_mem_reg * mem,void * virtual)231 static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
232 void *virtual)
233 {
234 struct ttm_mem_type_manager *man;
235
236 man = &bdev->man[mem->mem_type];
237
238 if (virtual && mem->bus.addr == NULL)
239 iounmap(virtual);
240 (void) ttm_mem_io_lock(man, false);
241 ttm_mem_io_free(bdev, mem);
242 ttm_mem_io_unlock(man);
243 }
244
ttm_copy_io_page(void * dst,void * src,unsigned long page)245 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
246 {
247 uint32_t *dstP =
248 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
249 uint32_t *srcP =
250 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
251
252 int i;
253 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
254 iowrite32(ioread32(srcP++), dstP++);
255 return 0;
256 }
257
ttm_copy_io_ttm_page(struct ttm_tt * ttm,void * src,unsigned long page,pgprot_t prot)258 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
259 unsigned long page,
260 pgprot_t prot)
261 {
262 struct page *d = ttm->pages[page];
263 void *dst;
264
265 if (!d)
266 return -ENOMEM;
267
268 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
269
270 #ifdef CONFIG_X86
271 dst = kmap_atomic_prot(d, prot);
272 #else
273 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
274 dst = vmap(&d, 1, 0, prot);
275 else
276 dst = kmap(d);
277 #endif
278 if (!dst)
279 return -ENOMEM;
280
281 memcpy_fromio(dst, src, PAGE_SIZE);
282
283 #ifdef CONFIG_X86
284 kunmap_atomic(dst);
285 #else
286 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
287 vunmap(dst);
288 else
289 kunmap(d);
290 #endif
291
292 return 0;
293 }
294
ttm_copy_ttm_io_page(struct ttm_tt * ttm,void * dst,unsigned long page,pgprot_t prot)295 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
296 unsigned long page,
297 pgprot_t prot)
298 {
299 struct page *s = ttm->pages[page];
300 void *src;
301
302 if (!s)
303 return -ENOMEM;
304
305 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
306 #ifdef CONFIG_X86
307 src = kmap_atomic_prot(s, prot);
308 #else
309 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
310 src = vmap(&s, 1, 0, prot);
311 else
312 src = kmap(s);
313 #endif
314 if (!src)
315 return -ENOMEM;
316
317 memcpy_toio(dst, src, PAGE_SIZE);
318
319 #ifdef CONFIG_X86
320 kunmap_atomic(src);
321 #else
322 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
323 vunmap(src);
324 else
325 kunmap(s);
326 #endif
327
328 return 0;
329 }
330
ttm_bo_move_memcpy(struct ttm_buffer_object * bo,bool interruptible,bool no_wait_gpu,struct ttm_mem_reg * new_mem)331 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
332 bool interruptible, bool no_wait_gpu,
333 struct ttm_mem_reg *new_mem)
334 {
335 struct ttm_bo_device *bdev = bo->bdev;
336 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
337 struct ttm_tt *ttm = bo->ttm;
338 struct ttm_mem_reg *old_mem = &bo->mem;
339 struct ttm_mem_reg old_copy = *old_mem;
340 void *old_iomap;
341 void *new_iomap;
342 int ret;
343 unsigned long i;
344 unsigned long page;
345 unsigned long add = 0;
346 int dir;
347
348 ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
349 if (ret)
350 return ret;
351
352 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
353 if (ret)
354 return ret;
355 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
356 if (ret)
357 goto out;
358
359 /*
360 * Single TTM move. NOP.
361 */
362 if (old_iomap == NULL && new_iomap == NULL)
363 goto out2;
364
365 /*
366 * Don't move nonexistent data. Clear destination instead.
367 */
368 if (old_iomap == NULL &&
369 (ttm == NULL || (ttm->state == tt_unpopulated &&
370 !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
371 memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
372 goto out2;
373 }
374
375 /*
376 * TTM might be null for moves within the same region.
377 */
378 if (ttm && ttm->state == tt_unpopulated) {
379 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
380 if (ret)
381 goto out1;
382 }
383
384 add = 0;
385 dir = 1;
386
387 if ((old_mem->mem_type == new_mem->mem_type) &&
388 (new_mem->start < old_mem->start + old_mem->size)) {
389 dir = -1;
390 add = new_mem->num_pages - 1;
391 }
392
393 for (i = 0; i < new_mem->num_pages; ++i) {
394 page = i * dir + add;
395 if (old_iomap == NULL) {
396 pgprot_t prot = ttm_io_prot(old_mem->placement,
397 PAGE_KERNEL);
398 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
399 prot);
400 } else if (new_iomap == NULL) {
401 pgprot_t prot = ttm_io_prot(new_mem->placement,
402 PAGE_KERNEL);
403 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
404 prot);
405 } else
406 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
407 if (ret)
408 goto out1;
409 }
410 mb();
411 out2:
412 old_copy = *old_mem;
413 *old_mem = *new_mem;
414 new_mem->mm_node = NULL;
415
416 if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
417 ttm_tt_destroy(ttm);
418 bo->ttm = NULL;
419 }
420
421 out1:
422 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
423 out:
424 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
425
426 /*
427 * On error, keep the mm node!
428 */
429 if (!ret)
430 ttm_bo_mem_put(bo, &old_copy);
431 return ret;
432 }
433 EXPORT_SYMBOL(ttm_bo_move_memcpy);
434
ttm_transfered_destroy(struct ttm_buffer_object * bo)435 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
436 {
437 kfree(bo);
438 }
439
440 /**
441 * ttm_buffer_object_transfer
442 *
443 * @bo: A pointer to a struct ttm_buffer_object.
444 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
445 * holding the data of @bo with the old placement.
446 *
447 * This is a utility function that may be called after an accelerated move
448 * has been scheduled. A new buffer object is created as a placeholder for
449 * the old data while it's being copied. When that buffer object is idle,
450 * it can be destroyed, releasing the space of the old placement.
451 * Returns:
452 * !0: Failure.
453 */
454
ttm_buffer_object_transfer(struct ttm_buffer_object * bo,struct ttm_buffer_object ** new_obj)455 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
456 struct ttm_buffer_object **new_obj)
457 {
458 struct ttm_buffer_object *fbo;
459 int ret;
460
461 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
462 if (!fbo)
463 return -ENOMEM;
464
465 *fbo = *bo;
466
467 /**
468 * Fix up members that we shouldn't copy directly:
469 * TODO: Explicit member copy would probably be better here.
470 */
471
472 atomic_inc(&bo->glob->bo_count);
473 INIT_LIST_HEAD(&fbo->ddestroy);
474 INIT_LIST_HEAD(&fbo->lru);
475 INIT_LIST_HEAD(&fbo->swap);
476 INIT_LIST_HEAD(&fbo->io_reserve_lru);
477 mutex_init(&fbo->wu_mutex);
478 fbo->moving = NULL;
479 drm_vma_node_reset(&fbo->vma_node);
480 atomic_set(&fbo->cpu_writers, 0);
481
482 kref_init(&fbo->list_kref);
483 kref_init(&fbo->kref);
484 fbo->destroy = &ttm_transfered_destroy;
485 fbo->acc_size = 0;
486 fbo->resv = &fbo->ttm_resv;
487 reservation_object_init(fbo->resv);
488 ret = ww_mutex_trylock(&fbo->resv->lock);
489 WARN_ON(!ret);
490
491 *new_obj = fbo;
492 return 0;
493 }
494
ttm_io_prot(uint32_t caching_flags,pgprot_t tmp)495 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
496 {
497 /* Cached mappings need no adjustment */
498 if (caching_flags & TTM_PL_FLAG_CACHED)
499 return tmp;
500
501 #if defined(__i386__) || defined(__x86_64__)
502 if (caching_flags & TTM_PL_FLAG_WC)
503 tmp = pgprot_writecombine(tmp);
504 else if (boot_cpu_data.x86 > 3)
505 tmp = pgprot_noncached(tmp);
506 #endif
507 #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
508 defined(__powerpc__)
509 if (caching_flags & TTM_PL_FLAG_WC)
510 tmp = pgprot_writecombine(tmp);
511 else
512 tmp = pgprot_noncached(tmp);
513 #endif
514 #if defined(__sparc__) || defined(__mips__)
515 tmp = pgprot_noncached(tmp);
516 #endif
517 return tmp;
518 }
519 EXPORT_SYMBOL(ttm_io_prot);
520
ttm_bo_ioremap(struct ttm_buffer_object * bo,unsigned long offset,unsigned long size,struct ttm_bo_kmap_obj * map)521 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
522 unsigned long offset,
523 unsigned long size,
524 struct ttm_bo_kmap_obj *map)
525 {
526 struct ttm_mem_reg *mem = &bo->mem;
527
528 if (bo->mem.bus.addr) {
529 map->bo_kmap_type = ttm_bo_map_premapped;
530 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
531 } else {
532 map->bo_kmap_type = ttm_bo_map_iomap;
533 if (mem->placement & TTM_PL_FLAG_WC)
534 map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
535 size);
536 else
537 map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
538 size);
539 }
540 return (!map->virtual) ? -ENOMEM : 0;
541 }
542
ttm_bo_kmap_ttm(struct ttm_buffer_object * bo,unsigned long start_page,unsigned long num_pages,struct ttm_bo_kmap_obj * map)543 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
544 unsigned long start_page,
545 unsigned long num_pages,
546 struct ttm_bo_kmap_obj *map)
547 {
548 struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
549 struct ttm_tt *ttm = bo->ttm;
550 int ret;
551
552 BUG_ON(!ttm);
553
554 if (ttm->state == tt_unpopulated) {
555 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
556 if (ret)
557 return ret;
558 }
559
560 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
561 /*
562 * We're mapping a single page, and the desired
563 * page protection is consistent with the bo.
564 */
565
566 map->bo_kmap_type = ttm_bo_map_kmap;
567 map->page = ttm->pages[start_page];
568 map->virtual = kmap(map->page);
569 } else {
570 /*
571 * We need to use vmap to get the desired page protection
572 * or to make the buffer object look contiguous.
573 */
574 prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
575 map->bo_kmap_type = ttm_bo_map_vmap;
576 map->virtual = vmap(ttm->pages + start_page, num_pages,
577 0, prot);
578 }
579 return (!map->virtual) ? -ENOMEM : 0;
580 }
581
ttm_bo_kmap(struct ttm_buffer_object * bo,unsigned long start_page,unsigned long num_pages,struct ttm_bo_kmap_obj * map)582 int ttm_bo_kmap(struct ttm_buffer_object *bo,
583 unsigned long start_page, unsigned long num_pages,
584 struct ttm_bo_kmap_obj *map)
585 {
586 struct ttm_mem_type_manager *man =
587 &bo->bdev->man[bo->mem.mem_type];
588 unsigned long offset, size;
589 int ret;
590
591 BUG_ON(!list_empty(&bo->swap));
592 map->virtual = NULL;
593 map->bo = bo;
594 if (num_pages > bo->num_pages)
595 return -EINVAL;
596 if (start_page > bo->num_pages)
597 return -EINVAL;
598 #if 0
599 if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
600 return -EPERM;
601 #endif
602 (void) ttm_mem_io_lock(man, false);
603 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
604 ttm_mem_io_unlock(man);
605 if (ret)
606 return ret;
607 if (!bo->mem.bus.is_iomem) {
608 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
609 } else {
610 offset = start_page << PAGE_SHIFT;
611 size = num_pages << PAGE_SHIFT;
612 return ttm_bo_ioremap(bo, offset, size, map);
613 }
614 }
615 EXPORT_SYMBOL(ttm_bo_kmap);
616
ttm_bo_kunmap(struct ttm_bo_kmap_obj * map)617 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
618 {
619 struct ttm_buffer_object *bo = map->bo;
620 struct ttm_mem_type_manager *man =
621 &bo->bdev->man[bo->mem.mem_type];
622
623 if (!map->virtual)
624 return;
625 switch (map->bo_kmap_type) {
626 case ttm_bo_map_iomap:
627 iounmap(map->virtual);
628 break;
629 case ttm_bo_map_vmap:
630 vunmap(map->virtual);
631 break;
632 case ttm_bo_map_kmap:
633 kunmap(map->page);
634 break;
635 case ttm_bo_map_premapped:
636 break;
637 default:
638 BUG();
639 }
640 (void) ttm_mem_io_lock(man, false);
641 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
642 ttm_mem_io_unlock(man);
643 map->virtual = NULL;
644 map->page = NULL;
645 }
646 EXPORT_SYMBOL(ttm_bo_kunmap);
647
ttm_bo_move_accel_cleanup(struct ttm_buffer_object * bo,struct dma_fence * fence,bool evict,struct ttm_mem_reg * new_mem)648 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
649 struct dma_fence *fence,
650 bool evict,
651 struct ttm_mem_reg *new_mem)
652 {
653 struct ttm_bo_device *bdev = bo->bdev;
654 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
655 struct ttm_mem_reg *old_mem = &bo->mem;
656 int ret;
657 struct ttm_buffer_object *ghost_obj;
658
659 reservation_object_add_excl_fence(bo->resv, fence);
660 if (evict) {
661 ret = ttm_bo_wait(bo, false, false);
662 if (ret)
663 return ret;
664
665 if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
666 ttm_tt_destroy(bo->ttm);
667 bo->ttm = NULL;
668 }
669 ttm_bo_free_old_node(bo);
670 } else {
671 /**
672 * This should help pipeline ordinary buffer moves.
673 *
674 * Hang old buffer memory on a new buffer object,
675 * and leave it to be released when the GPU
676 * operation has completed.
677 */
678
679 dma_fence_put(bo->moving);
680 bo->moving = dma_fence_get(fence);
681
682 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
683 if (ret)
684 return ret;
685
686 reservation_object_add_excl_fence(ghost_obj->resv, fence);
687
688 /**
689 * If we're not moving to fixed memory, the TTM object
690 * needs to stay alive. Otherwhise hang it on the ghost
691 * bo to be unbound and destroyed.
692 */
693
694 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
695 ghost_obj->ttm = NULL;
696 else
697 bo->ttm = NULL;
698
699 ttm_bo_unreserve(ghost_obj);
700 ttm_bo_unref(&ghost_obj);
701 }
702
703 *old_mem = *new_mem;
704 new_mem->mm_node = NULL;
705
706 return 0;
707 }
708 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
709
ttm_bo_pipeline_move(struct ttm_buffer_object * bo,struct dma_fence * fence,bool evict,struct ttm_mem_reg * new_mem)710 int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
711 struct dma_fence *fence, bool evict,
712 struct ttm_mem_reg *new_mem)
713 {
714 struct ttm_bo_device *bdev = bo->bdev;
715 struct ttm_mem_reg *old_mem = &bo->mem;
716
717 struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
718 struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
719
720 int ret;
721
722 reservation_object_add_excl_fence(bo->resv, fence);
723
724 if (!evict) {
725 struct ttm_buffer_object *ghost_obj;
726
727 /**
728 * This should help pipeline ordinary buffer moves.
729 *
730 * Hang old buffer memory on a new buffer object,
731 * and leave it to be released when the GPU
732 * operation has completed.
733 */
734
735 dma_fence_put(bo->moving);
736 bo->moving = dma_fence_get(fence);
737
738 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
739 if (ret)
740 return ret;
741
742 reservation_object_add_excl_fence(ghost_obj->resv, fence);
743
744 /**
745 * If we're not moving to fixed memory, the TTM object
746 * needs to stay alive. Otherwhise hang it on the ghost
747 * bo to be unbound and destroyed.
748 */
749
750 if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
751 ghost_obj->ttm = NULL;
752 else
753 bo->ttm = NULL;
754
755 ttm_bo_unreserve(ghost_obj);
756 ttm_bo_unref(&ghost_obj);
757
758 } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
759
760 /**
761 * BO doesn't have a TTM we need to bind/unbind. Just remember
762 * this eviction and free up the allocation
763 */
764
765 spin_lock(&from->move_lock);
766 if (!from->move || dma_fence_is_later(fence, from->move)) {
767 dma_fence_put(from->move);
768 from->move = dma_fence_get(fence);
769 }
770 spin_unlock(&from->move_lock);
771
772 ttm_bo_free_old_node(bo);
773
774 dma_fence_put(bo->moving);
775 bo->moving = dma_fence_get(fence);
776
777 } else {
778 /**
779 * Last resort, wait for the move to be completed.
780 *
781 * Should never happen in pratice.
782 */
783
784 ret = ttm_bo_wait(bo, false, false);
785 if (ret)
786 return ret;
787
788 if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
789 ttm_tt_destroy(bo->ttm);
790 bo->ttm = NULL;
791 }
792 ttm_bo_free_old_node(bo);
793 }
794
795 *old_mem = *new_mem;
796 new_mem->mm_node = NULL;
797
798 return 0;
799 }
800 EXPORT_SYMBOL(ttm_bo_pipeline_move);
801