1 /*
2 * Copyright 2007 Dave Airlied
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24 /*
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */
29
30 #include <linux/dma-mapping.h>
31 #include <linux/swiotlb.h>
32
33 #include "nouveau_drv.h"
34 #include "nouveau_dma.h"
35 #include "nouveau_fence.h"
36
37 #include "nouveau_bo.h"
38 #include "nouveau_ttm.h"
39 #include "nouveau_gem.h"
40
41 /*
42 * NV10-NV40 tiling helpers
43 */
44
45 static void
nv10_bo_update_tile_region(struct drm_device * dev,struct nouveau_drm_tile * reg,u32 addr,u32 size,u32 pitch,u32 flags)46 nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
47 u32 addr, u32 size, u32 pitch, u32 flags)
48 {
49 struct nouveau_drm *drm = nouveau_drm(dev);
50 int i = reg - drm->tile.reg;
51 struct nvkm_device *device = nvxx_device(&drm->device);
52 struct nvkm_fb *fb = device->fb;
53 struct nvkm_fb_tile *tile = &fb->tile.region[i];
54
55 nouveau_fence_unref(®->fence);
56
57 if (tile->pitch)
58 nvkm_fb_tile_fini(fb, i, tile);
59
60 if (pitch)
61 nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile);
62
63 nvkm_fb_tile_prog(fb, i, tile);
64 }
65
66 static struct nouveau_drm_tile *
nv10_bo_get_tile_region(struct drm_device * dev,int i)67 nv10_bo_get_tile_region(struct drm_device *dev, int i)
68 {
69 struct nouveau_drm *drm = nouveau_drm(dev);
70 struct nouveau_drm_tile *tile = &drm->tile.reg[i];
71
72 spin_lock(&drm->tile.lock);
73
74 if (!tile->used &&
75 (!tile->fence || nouveau_fence_done(tile->fence)))
76 tile->used = true;
77 else
78 tile = NULL;
79
80 spin_unlock(&drm->tile.lock);
81 return tile;
82 }
83
84 static void
nv10_bo_put_tile_region(struct drm_device * dev,struct nouveau_drm_tile * tile,struct fence * fence)85 nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
86 struct fence *fence)
87 {
88 struct nouveau_drm *drm = nouveau_drm(dev);
89
90 if (tile) {
91 spin_lock(&drm->tile.lock);
92 tile->fence = (struct nouveau_fence *)fence_get(fence);
93 tile->used = false;
94 spin_unlock(&drm->tile.lock);
95 }
96 }
97
98 static struct nouveau_drm_tile *
nv10_bo_set_tiling(struct drm_device * dev,u32 addr,u32 size,u32 pitch,u32 flags)99 nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
100 u32 size, u32 pitch, u32 flags)
101 {
102 struct nouveau_drm *drm = nouveau_drm(dev);
103 struct nvkm_fb *fb = nvxx_fb(&drm->device);
104 struct nouveau_drm_tile *tile, *found = NULL;
105 int i;
106
107 for (i = 0; i < fb->tile.regions; i++) {
108 tile = nv10_bo_get_tile_region(dev, i);
109
110 if (pitch && !found) {
111 found = tile;
112 continue;
113
114 } else if (tile && fb->tile.region[i].pitch) {
115 /* Kill an unused tile region. */
116 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
117 }
118
119 nv10_bo_put_tile_region(dev, tile, NULL);
120 }
121
122 if (found)
123 nv10_bo_update_tile_region(dev, found, addr, size,
124 pitch, flags);
125 return found;
126 }
127
128 static void
nouveau_bo_del_ttm(struct ttm_buffer_object * bo)129 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
130 {
131 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
132 struct drm_device *dev = drm->dev;
133 struct nouveau_bo *nvbo = nouveau_bo(bo);
134
135 if (unlikely(nvbo->gem.filp))
136 DRM_ERROR("bo %p still attached to GEM object\n", bo);
137 WARN_ON(nvbo->pin_refcnt > 0);
138 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
139 kfree(nvbo);
140 }
141
142 static void
nouveau_bo_fixup_align(struct nouveau_bo * nvbo,u32 flags,int * align,int * size)143 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
144 int *align, int *size)
145 {
146 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
147 struct nvif_device *device = &drm->device;
148
149 if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
150 if (nvbo->tile_mode) {
151 if (device->info.chipset >= 0x40) {
152 *align = 65536;
153 *size = roundup(*size, 64 * nvbo->tile_mode);
154
155 } else if (device->info.chipset >= 0x30) {
156 *align = 32768;
157 *size = roundup(*size, 64 * nvbo->tile_mode);
158
159 } else if (device->info.chipset >= 0x20) {
160 *align = 16384;
161 *size = roundup(*size, 64 * nvbo->tile_mode);
162
163 } else if (device->info.chipset >= 0x10) {
164 *align = 16384;
165 *size = roundup(*size, 32 * nvbo->tile_mode);
166 }
167 }
168 } else {
169 *size = roundup(*size, (1 << nvbo->page_shift));
170 *align = max((1 << nvbo->page_shift), *align);
171 }
172
173 *size = roundup(*size, PAGE_SIZE);
174 }
175
176 int
nouveau_bo_new(struct drm_device * dev,int size,int align,uint32_t flags,uint32_t tile_mode,uint32_t tile_flags,struct sg_table * sg,struct reservation_object * robj,struct nouveau_bo ** pnvbo)177 nouveau_bo_new(struct drm_device *dev, int size, int align,
178 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
179 struct sg_table *sg, struct reservation_object *robj,
180 struct nouveau_bo **pnvbo)
181 {
182 struct nouveau_drm *drm = nouveau_drm(dev);
183 struct nouveau_bo *nvbo;
184 size_t acc_size;
185 int ret;
186 int type = ttm_bo_type_device;
187 int lpg_shift = 12;
188 int max_size;
189
190 if (drm->client.vm)
191 lpg_shift = drm->client.vm->mmu->lpg_shift;
192 max_size = INT_MAX & ~((1 << lpg_shift) - 1);
193
194 if (size <= 0 || size > max_size) {
195 NV_WARN(drm, "skipped size %x\n", (u32)size);
196 return -EINVAL;
197 }
198
199 if (sg)
200 type = ttm_bo_type_sg;
201
202 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
203 if (!nvbo)
204 return -ENOMEM;
205 INIT_LIST_HEAD(&nvbo->head);
206 INIT_LIST_HEAD(&nvbo->entry);
207 INIT_LIST_HEAD(&nvbo->vma_list);
208 nvbo->tile_mode = tile_mode;
209 nvbo->tile_flags = tile_flags;
210 nvbo->bo.bdev = &drm->ttm.bdev;
211
212 if (!nvxx_device(&drm->device)->func->cpu_coherent)
213 nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
214
215 nvbo->page_shift = 12;
216 if (drm->client.vm) {
217 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
218 nvbo->page_shift = drm->client.vm->mmu->lpg_shift;
219 }
220
221 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
222 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
223 nouveau_bo_placement_set(nvbo, flags, 0);
224
225 acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
226 sizeof(struct nouveau_bo));
227
228 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
229 type, &nvbo->placement,
230 align >> PAGE_SHIFT, false, NULL, acc_size, sg,
231 robj, nouveau_bo_del_ttm);
232 if (ret) {
233 /* ttm will call nouveau_bo_del_ttm if it fails.. */
234 return ret;
235 }
236
237 *pnvbo = nvbo;
238 return 0;
239 }
240
241 static void
set_placement_list(struct ttm_place * pl,unsigned * n,uint32_t type,uint32_t flags)242 set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags)
243 {
244 *n = 0;
245
246 if (type & TTM_PL_FLAG_VRAM)
247 pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags;
248 if (type & TTM_PL_FLAG_TT)
249 pl[(*n)++].flags = TTM_PL_FLAG_TT | flags;
250 if (type & TTM_PL_FLAG_SYSTEM)
251 pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags;
252 }
253
254 static void
set_placement_range(struct nouveau_bo * nvbo,uint32_t type)255 set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
256 {
257 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
258 u32 vram_pages = drm->device.info.ram_size >> PAGE_SHIFT;
259 unsigned i, fpfn, lpfn;
260
261 if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
262 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
263 nvbo->bo.mem.num_pages < vram_pages / 4) {
264 /*
265 * Make sure that the color and depth buffers are handled
266 * by independent memory controller units. Up to a 9x
267 * speed up when alpha-blending and depth-test are enabled
268 * at the same time.
269 */
270 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
271 fpfn = vram_pages / 2;
272 lpfn = ~0;
273 } else {
274 fpfn = 0;
275 lpfn = vram_pages / 2;
276 }
277 for (i = 0; i < nvbo->placement.num_placement; ++i) {
278 nvbo->placements[i].fpfn = fpfn;
279 nvbo->placements[i].lpfn = lpfn;
280 }
281 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
282 nvbo->busy_placements[i].fpfn = fpfn;
283 nvbo->busy_placements[i].lpfn = lpfn;
284 }
285 }
286 }
287
288 void
nouveau_bo_placement_set(struct nouveau_bo * nvbo,uint32_t type,uint32_t busy)289 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
290 {
291 struct ttm_placement *pl = &nvbo->placement;
292 uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
293 TTM_PL_MASK_CACHING) |
294 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
295
296 pl->placement = nvbo->placements;
297 set_placement_list(nvbo->placements, &pl->num_placement,
298 type, flags);
299
300 pl->busy_placement = nvbo->busy_placements;
301 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
302 type | busy, flags);
303
304 set_placement_range(nvbo, type);
305 }
306
307 int
nouveau_bo_pin(struct nouveau_bo * nvbo,uint32_t memtype,bool contig)308 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
309 {
310 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
311 struct ttm_buffer_object *bo = &nvbo->bo;
312 bool force = false, evict = false;
313 int ret;
314
315 ret = ttm_bo_reserve(bo, false, false, NULL);
316 if (ret)
317 return ret;
318
319 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
320 memtype == TTM_PL_FLAG_VRAM && contig) {
321 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
322 if (bo->mem.mem_type == TTM_PL_VRAM) {
323 struct nvkm_mem *mem = bo->mem.mm_node;
324 if (!list_is_singular(&mem->regions))
325 evict = true;
326 }
327 nvbo->tile_flags &= ~NOUVEAU_GEM_TILE_NONCONTIG;
328 force = true;
329 }
330 }
331
332 if (nvbo->pin_refcnt) {
333 if (!(memtype & (1 << bo->mem.mem_type)) || evict) {
334 NV_ERROR(drm, "bo %p pinned elsewhere: "
335 "0x%08x vs 0x%08x\n", bo,
336 1 << bo->mem.mem_type, memtype);
337 ret = -EBUSY;
338 }
339 nvbo->pin_refcnt++;
340 goto out;
341 }
342
343 if (evict) {
344 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0);
345 ret = nouveau_bo_validate(nvbo, false, false);
346 if (ret)
347 goto out;
348 }
349
350 nvbo->pin_refcnt++;
351 nouveau_bo_placement_set(nvbo, memtype, 0);
352
353 /* drop pin_refcnt temporarily, so we don't trip the assertion
354 * in nouveau_bo_move() that makes sure we're not trying to
355 * move a pinned buffer
356 */
357 nvbo->pin_refcnt--;
358 ret = nouveau_bo_validate(nvbo, false, false);
359 if (ret)
360 goto out;
361 nvbo->pin_refcnt++;
362
363 switch (bo->mem.mem_type) {
364 case TTM_PL_VRAM:
365 drm->gem.vram_available -= bo->mem.size;
366 break;
367 case TTM_PL_TT:
368 drm->gem.gart_available -= bo->mem.size;
369 break;
370 default:
371 break;
372 }
373
374 out:
375 if (force && ret)
376 nvbo->tile_flags |= NOUVEAU_GEM_TILE_NONCONTIG;
377 ttm_bo_unreserve(bo);
378 return ret;
379 }
380
381 int
nouveau_bo_unpin(struct nouveau_bo * nvbo)382 nouveau_bo_unpin(struct nouveau_bo *nvbo)
383 {
384 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
385 struct ttm_buffer_object *bo = &nvbo->bo;
386 int ret, ref;
387
388 ret = ttm_bo_reserve(bo, false, false, NULL);
389 if (ret)
390 return ret;
391
392 ref = --nvbo->pin_refcnt;
393 WARN_ON_ONCE(ref < 0);
394 if (ref)
395 goto out;
396
397 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
398
399 ret = nouveau_bo_validate(nvbo, false, false);
400 if (ret == 0) {
401 switch (bo->mem.mem_type) {
402 case TTM_PL_VRAM:
403 drm->gem.vram_available += bo->mem.size;
404 break;
405 case TTM_PL_TT:
406 drm->gem.gart_available += bo->mem.size;
407 break;
408 default:
409 break;
410 }
411 }
412
413 out:
414 ttm_bo_unreserve(bo);
415 return ret;
416 }
417
418 int
nouveau_bo_map(struct nouveau_bo * nvbo)419 nouveau_bo_map(struct nouveau_bo *nvbo)
420 {
421 int ret;
422
423 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
424 if (ret)
425 return ret;
426
427 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
428
429 ttm_bo_unreserve(&nvbo->bo);
430 return ret;
431 }
432
433 void
nouveau_bo_unmap(struct nouveau_bo * nvbo)434 nouveau_bo_unmap(struct nouveau_bo *nvbo)
435 {
436 if (!nvbo)
437 return;
438
439 ttm_bo_kunmap(&nvbo->kmap);
440 }
441
442 void
nouveau_bo_sync_for_device(struct nouveau_bo * nvbo)443 nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
444 {
445 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
446 struct nvkm_device *device = nvxx_device(&drm->device);
447 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
448 int i;
449
450 if (!ttm_dma)
451 return;
452
453 /* Don't waste time looping if the object is coherent */
454 if (nvbo->force_coherent)
455 return;
456
457 for (i = 0; i < ttm_dma->ttm.num_pages; i++)
458 dma_sync_single_for_device(device->dev, ttm_dma->dma_address[i],
459 PAGE_SIZE, DMA_TO_DEVICE);
460 }
461
462 void
nouveau_bo_sync_for_cpu(struct nouveau_bo * nvbo)463 nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
464 {
465 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
466 struct nvkm_device *device = nvxx_device(&drm->device);
467 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
468 int i;
469
470 if (!ttm_dma)
471 return;
472
473 /* Don't waste time looping if the object is coherent */
474 if (nvbo->force_coherent)
475 return;
476
477 for (i = 0; i < ttm_dma->ttm.num_pages; i++)
478 dma_sync_single_for_cpu(device->dev, ttm_dma->dma_address[i],
479 PAGE_SIZE, DMA_FROM_DEVICE);
480 }
481
482 int
nouveau_bo_validate(struct nouveau_bo * nvbo,bool interruptible,bool no_wait_gpu)483 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
484 bool no_wait_gpu)
485 {
486 int ret;
487
488 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
489 interruptible, no_wait_gpu);
490 if (ret)
491 return ret;
492
493 nouveau_bo_sync_for_device(nvbo);
494
495 return 0;
496 }
497
498 void
nouveau_bo_wr16(struct nouveau_bo * nvbo,unsigned index,u16 val)499 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
500 {
501 bool is_iomem;
502 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
503
504 mem += index;
505
506 if (is_iomem)
507 iowrite16_native(val, (void __force __iomem *)mem);
508 else
509 *mem = val;
510 }
511
512 u32
nouveau_bo_rd32(struct nouveau_bo * nvbo,unsigned index)513 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
514 {
515 bool is_iomem;
516 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
517
518 mem += index;
519
520 if (is_iomem)
521 return ioread32_native((void __force __iomem *)mem);
522 else
523 return *mem;
524 }
525
526 void
nouveau_bo_wr32(struct nouveau_bo * nvbo,unsigned index,u32 val)527 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
528 {
529 bool is_iomem;
530 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
531
532 mem += index;
533
534 if (is_iomem)
535 iowrite32_native(val, (void __force __iomem *)mem);
536 else
537 *mem = val;
538 }
539
540 static struct ttm_tt *
nouveau_ttm_tt_create(struct ttm_bo_device * bdev,unsigned long size,uint32_t page_flags,struct page * dummy_read)541 nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
542 uint32_t page_flags, struct page *dummy_read)
543 {
544 #if IS_ENABLED(CONFIG_AGP)
545 struct nouveau_drm *drm = nouveau_bdev(bdev);
546
547 if (drm->agp.bridge) {
548 return ttm_agp_tt_create(bdev, drm->agp.bridge, size,
549 page_flags, dummy_read);
550 }
551 #endif
552
553 return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
554 }
555
556 static int
nouveau_bo_invalidate_caches(struct ttm_bo_device * bdev,uint32_t flags)557 nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
558 {
559 /* We'll do this from user space. */
560 return 0;
561 }
562
563 static int
nouveau_bo_init_mem_type(struct ttm_bo_device * bdev,uint32_t type,struct ttm_mem_type_manager * man)564 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
565 struct ttm_mem_type_manager *man)
566 {
567 struct nouveau_drm *drm = nouveau_bdev(bdev);
568
569 switch (type) {
570 case TTM_PL_SYSTEM:
571 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
572 man->available_caching = TTM_PL_MASK_CACHING;
573 man->default_caching = TTM_PL_FLAG_CACHED;
574 break;
575 case TTM_PL_VRAM:
576 man->flags = TTM_MEMTYPE_FLAG_FIXED |
577 TTM_MEMTYPE_FLAG_MAPPABLE;
578 man->available_caching = TTM_PL_FLAG_UNCACHED |
579 TTM_PL_FLAG_WC;
580 man->default_caching = TTM_PL_FLAG_WC;
581
582 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
583 /* Some BARs do not support being ioremapped WC */
584 if (nvxx_bar(&drm->device)->iomap_uncached) {
585 man->available_caching = TTM_PL_FLAG_UNCACHED;
586 man->default_caching = TTM_PL_FLAG_UNCACHED;
587 }
588
589 man->func = &nouveau_vram_manager;
590 man->io_reserve_fastpath = false;
591 man->use_io_reserve_lru = true;
592 } else {
593 man->func = &ttm_bo_manager_func;
594 }
595 break;
596 case TTM_PL_TT:
597 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
598 man->func = &nouveau_gart_manager;
599 else
600 if (!drm->agp.bridge)
601 man->func = &nv04_gart_manager;
602 else
603 man->func = &ttm_bo_manager_func;
604
605 if (drm->agp.bridge) {
606 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
607 man->available_caching = TTM_PL_FLAG_UNCACHED |
608 TTM_PL_FLAG_WC;
609 man->default_caching = TTM_PL_FLAG_WC;
610 } else {
611 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
612 TTM_MEMTYPE_FLAG_CMA;
613 man->available_caching = TTM_PL_MASK_CACHING;
614 man->default_caching = TTM_PL_FLAG_CACHED;
615 }
616
617 break;
618 default:
619 return -EINVAL;
620 }
621 return 0;
622 }
623
624 static void
nouveau_bo_evict_flags(struct ttm_buffer_object * bo,struct ttm_placement * pl)625 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
626 {
627 struct nouveau_bo *nvbo = nouveau_bo(bo);
628
629 switch (bo->mem.mem_type) {
630 case TTM_PL_VRAM:
631 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
632 TTM_PL_FLAG_SYSTEM);
633 break;
634 default:
635 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
636 break;
637 }
638
639 *pl = nvbo->placement;
640 }
641
642
643 static int
nve0_bo_move_init(struct nouveau_channel * chan,u32 handle)644 nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
645 {
646 int ret = RING_SPACE(chan, 2);
647 if (ret == 0) {
648 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
649 OUT_RING (chan, handle & 0x0000ffff);
650 FIRE_RING (chan);
651 }
652 return ret;
653 }
654
655 static int
nve0_bo_move_copy(struct nouveau_channel * chan,struct ttm_buffer_object * bo,struct ttm_mem_reg * old_mem,struct ttm_mem_reg * new_mem)656 nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
657 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
658 {
659 struct nvkm_mem *node = old_mem->mm_node;
660 int ret = RING_SPACE(chan, 10);
661 if (ret == 0) {
662 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
663 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
664 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
665 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
666 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
667 OUT_RING (chan, PAGE_SIZE);
668 OUT_RING (chan, PAGE_SIZE);
669 OUT_RING (chan, PAGE_SIZE);
670 OUT_RING (chan, new_mem->num_pages);
671 BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
672 }
673 return ret;
674 }
675
676 static int
nvc0_bo_move_init(struct nouveau_channel * chan,u32 handle)677 nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
678 {
679 int ret = RING_SPACE(chan, 2);
680 if (ret == 0) {
681 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
682 OUT_RING (chan, handle);
683 }
684 return ret;
685 }
686
687 static int
nvc0_bo_move_copy(struct nouveau_channel * chan,struct ttm_buffer_object * bo,struct ttm_mem_reg * old_mem,struct ttm_mem_reg * new_mem)688 nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
689 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
690 {
691 struct nvkm_mem *node = old_mem->mm_node;
692 u64 src_offset = node->vma[0].offset;
693 u64 dst_offset = node->vma[1].offset;
694 u32 page_count = new_mem->num_pages;
695 int ret;
696
697 page_count = new_mem->num_pages;
698 while (page_count) {
699 int line_count = (page_count > 8191) ? 8191 : page_count;
700
701 ret = RING_SPACE(chan, 11);
702 if (ret)
703 return ret;
704
705 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
706 OUT_RING (chan, upper_32_bits(src_offset));
707 OUT_RING (chan, lower_32_bits(src_offset));
708 OUT_RING (chan, upper_32_bits(dst_offset));
709 OUT_RING (chan, lower_32_bits(dst_offset));
710 OUT_RING (chan, PAGE_SIZE);
711 OUT_RING (chan, PAGE_SIZE);
712 OUT_RING (chan, PAGE_SIZE);
713 OUT_RING (chan, line_count);
714 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
715 OUT_RING (chan, 0x00000110);
716
717 page_count -= line_count;
718 src_offset += (PAGE_SIZE * line_count);
719 dst_offset += (PAGE_SIZE * line_count);
720 }
721
722 return 0;
723 }
724
725 static int
nvc0_bo_move_m2mf(struct nouveau_channel * chan,struct ttm_buffer_object * bo,struct ttm_mem_reg * old_mem,struct ttm_mem_reg * new_mem)726 nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
727 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
728 {
729 struct nvkm_mem *node = old_mem->mm_node;
730 u64 src_offset = node->vma[0].offset;
731 u64 dst_offset = node->vma[1].offset;
732 u32 page_count = new_mem->num_pages;
733 int ret;
734
735 page_count = new_mem->num_pages;
736 while (page_count) {
737 int line_count = (page_count > 2047) ? 2047 : page_count;
738
739 ret = RING_SPACE(chan, 12);
740 if (ret)
741 return ret;
742
743 BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
744 OUT_RING (chan, upper_32_bits(dst_offset));
745 OUT_RING (chan, lower_32_bits(dst_offset));
746 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
747 OUT_RING (chan, upper_32_bits(src_offset));
748 OUT_RING (chan, lower_32_bits(src_offset));
749 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
750 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
751 OUT_RING (chan, PAGE_SIZE); /* line_length */
752 OUT_RING (chan, line_count);
753 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
754 OUT_RING (chan, 0x00100110);
755
756 page_count -= line_count;
757 src_offset += (PAGE_SIZE * line_count);
758 dst_offset += (PAGE_SIZE * line_count);
759 }
760
761 return 0;
762 }
763
764 static int
nva3_bo_move_copy(struct nouveau_channel * chan,struct ttm_buffer_object * bo,struct ttm_mem_reg * old_mem,struct ttm_mem_reg * new_mem)765 nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
766 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
767 {
768 struct nvkm_mem *node = old_mem->mm_node;
769 u64 src_offset = node->vma[0].offset;
770 u64 dst_offset = node->vma[1].offset;
771 u32 page_count = new_mem->num_pages;
772 int ret;
773
774 page_count = new_mem->num_pages;
775 while (page_count) {
776 int line_count = (page_count > 8191) ? 8191 : page_count;
777
778 ret = RING_SPACE(chan, 11);
779 if (ret)
780 return ret;
781
782 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
783 OUT_RING (chan, upper_32_bits(src_offset));
784 OUT_RING (chan, lower_32_bits(src_offset));
785 OUT_RING (chan, upper_32_bits(dst_offset));
786 OUT_RING (chan, lower_32_bits(dst_offset));
787 OUT_RING (chan, PAGE_SIZE);
788 OUT_RING (chan, PAGE_SIZE);
789 OUT_RING (chan, PAGE_SIZE);
790 OUT_RING (chan, line_count);
791 BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
792 OUT_RING (chan, 0x00000110);
793
794 page_count -= line_count;
795 src_offset += (PAGE_SIZE * line_count);
796 dst_offset += (PAGE_SIZE * line_count);
797 }
798
799 return 0;
800 }
801
802 static int
nv98_bo_move_exec(struct nouveau_channel * chan,struct ttm_buffer_object * bo,struct ttm_mem_reg * old_mem,struct ttm_mem_reg * new_mem)803 nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
804 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
805 {
806 struct nvkm_mem *node = old_mem->mm_node;
807 int ret = RING_SPACE(chan, 7);
808 if (ret == 0) {
809 BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
810 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
811 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
812 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
813 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
814 OUT_RING (chan, 0x00000000 /* COPY */);
815 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
816 }
817 return ret;
818 }
819
820 static int
nv84_bo_move_exec(struct nouveau_channel * chan,struct ttm_buffer_object * bo,struct ttm_mem_reg * old_mem,struct ttm_mem_reg * new_mem)821 nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
822 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
823 {
824 struct nvkm_mem *node = old_mem->mm_node;
825 int ret = RING_SPACE(chan, 7);
826 if (ret == 0) {
827 BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
828 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
829 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
830 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
831 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
832 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
833 OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
834 }
835 return ret;
836 }
837
838 static int
nv50_bo_move_init(struct nouveau_channel * chan,u32 handle)839 nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
840 {
841 int ret = RING_SPACE(chan, 6);
842 if (ret == 0) {
843 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
844 OUT_RING (chan, handle);
845 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
846 OUT_RING (chan, chan->drm->ntfy.handle);
847 OUT_RING (chan, chan->vram.handle);
848 OUT_RING (chan, chan->vram.handle);
849 }
850
851 return ret;
852 }
853
854 static int
nv50_bo_move_m2mf(struct nouveau_channel * chan,struct ttm_buffer_object * bo,struct ttm_mem_reg * old_mem,struct ttm_mem_reg * new_mem)855 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
856 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
857 {
858 struct nvkm_mem *node = old_mem->mm_node;
859 u64 length = (new_mem->num_pages << PAGE_SHIFT);
860 u64 src_offset = node->vma[0].offset;
861 u64 dst_offset = node->vma[1].offset;
862 int src_tiled = !!node->memtype;
863 int dst_tiled = !!((struct nvkm_mem *)new_mem->mm_node)->memtype;
864 int ret;
865
866 while (length) {
867 u32 amount, stride, height;
868
869 ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
870 if (ret)
871 return ret;
872
873 amount = min(length, (u64)(4 * 1024 * 1024));
874 stride = 16 * 4;
875 height = amount / stride;
876
877 if (src_tiled) {
878 BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
879 OUT_RING (chan, 0);
880 OUT_RING (chan, 0);
881 OUT_RING (chan, stride);
882 OUT_RING (chan, height);
883 OUT_RING (chan, 1);
884 OUT_RING (chan, 0);
885 OUT_RING (chan, 0);
886 } else {
887 BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
888 OUT_RING (chan, 1);
889 }
890 if (dst_tiled) {
891 BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
892 OUT_RING (chan, 0);
893 OUT_RING (chan, 0);
894 OUT_RING (chan, stride);
895 OUT_RING (chan, height);
896 OUT_RING (chan, 1);
897 OUT_RING (chan, 0);
898 OUT_RING (chan, 0);
899 } else {
900 BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
901 OUT_RING (chan, 1);
902 }
903
904 BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
905 OUT_RING (chan, upper_32_bits(src_offset));
906 OUT_RING (chan, upper_32_bits(dst_offset));
907 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
908 OUT_RING (chan, lower_32_bits(src_offset));
909 OUT_RING (chan, lower_32_bits(dst_offset));
910 OUT_RING (chan, stride);
911 OUT_RING (chan, stride);
912 OUT_RING (chan, stride);
913 OUT_RING (chan, height);
914 OUT_RING (chan, 0x00000101);
915 OUT_RING (chan, 0x00000000);
916 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
917 OUT_RING (chan, 0);
918
919 length -= amount;
920 src_offset += amount;
921 dst_offset += amount;
922 }
923
924 return 0;
925 }
926
927 static int
nv04_bo_move_init(struct nouveau_channel * chan,u32 handle)928 nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
929 {
930 int ret = RING_SPACE(chan, 4);
931 if (ret == 0) {
932 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
933 OUT_RING (chan, handle);
934 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
935 OUT_RING (chan, chan->drm->ntfy.handle);
936 }
937
938 return ret;
939 }
940
941 static inline uint32_t
nouveau_bo_mem_ctxdma(struct ttm_buffer_object * bo,struct nouveau_channel * chan,struct ttm_mem_reg * mem)942 nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
943 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
944 {
945 if (mem->mem_type == TTM_PL_TT)
946 return NvDmaTT;
947 return chan->vram.handle;
948 }
949
950 static int
nv04_bo_move_m2mf(struct nouveau_channel * chan,struct ttm_buffer_object * bo,struct ttm_mem_reg * old_mem,struct ttm_mem_reg * new_mem)951 nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
952 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
953 {
954 u32 src_offset = old_mem->start << PAGE_SHIFT;
955 u32 dst_offset = new_mem->start << PAGE_SHIFT;
956 u32 page_count = new_mem->num_pages;
957 int ret;
958
959 ret = RING_SPACE(chan, 3);
960 if (ret)
961 return ret;
962
963 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
964 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
965 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
966
967 page_count = new_mem->num_pages;
968 while (page_count) {
969 int line_count = (page_count > 2047) ? 2047 : page_count;
970
971 ret = RING_SPACE(chan, 11);
972 if (ret)
973 return ret;
974
975 BEGIN_NV04(chan, NvSubCopy,
976 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
977 OUT_RING (chan, src_offset);
978 OUT_RING (chan, dst_offset);
979 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
980 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
981 OUT_RING (chan, PAGE_SIZE); /* line_length */
982 OUT_RING (chan, line_count);
983 OUT_RING (chan, 0x00000101);
984 OUT_RING (chan, 0x00000000);
985 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
986 OUT_RING (chan, 0);
987
988 page_count -= line_count;
989 src_offset += (PAGE_SIZE * line_count);
990 dst_offset += (PAGE_SIZE * line_count);
991 }
992
993 return 0;
994 }
995
996 static int
nouveau_bo_move_prep(struct nouveau_drm * drm,struct ttm_buffer_object * bo,struct ttm_mem_reg * mem)997 nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
998 struct ttm_mem_reg *mem)
999 {
1000 struct nvkm_mem *old_node = bo->mem.mm_node;
1001 struct nvkm_mem *new_node = mem->mm_node;
1002 u64 size = (u64)mem->num_pages << PAGE_SHIFT;
1003 int ret;
1004
1005 ret = nvkm_vm_get(drm->client.vm, size, old_node->page_shift,
1006 NV_MEM_ACCESS_RW, &old_node->vma[0]);
1007 if (ret)
1008 return ret;
1009
1010 ret = nvkm_vm_get(drm->client.vm, size, new_node->page_shift,
1011 NV_MEM_ACCESS_RW, &old_node->vma[1]);
1012 if (ret) {
1013 nvkm_vm_put(&old_node->vma[0]);
1014 return ret;
1015 }
1016
1017 nvkm_vm_map(&old_node->vma[0], old_node);
1018 nvkm_vm_map(&old_node->vma[1], new_node);
1019 return 0;
1020 }
1021
1022 static int
nouveau_bo_move_m2mf(struct ttm_buffer_object * bo,int evict,bool intr,bool no_wait_gpu,struct ttm_mem_reg * new_mem)1023 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
1024 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1025 {
1026 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1027 struct nouveau_channel *chan = drm->ttm.chan;
1028 struct nouveau_cli *cli = (void *)chan->user.client;
1029 struct nouveau_fence *fence;
1030 int ret;
1031
1032 /* create temporary vmas for the transfer and attach them to the
1033 * old nvkm_mem node, these will get cleaned up after ttm has
1034 * destroyed the ttm_mem_reg
1035 */
1036 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
1037 ret = nouveau_bo_move_prep(drm, bo, new_mem);
1038 if (ret)
1039 return ret;
1040 }
1041
1042 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
1043 ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr);
1044 if (ret == 0) {
1045 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
1046 if (ret == 0) {
1047 ret = nouveau_fence_new(chan, false, &fence);
1048 if (ret == 0) {
1049 ret = ttm_bo_move_accel_cleanup(bo,
1050 &fence->base,
1051 evict,
1052 new_mem);
1053 nouveau_fence_unref(&fence);
1054 }
1055 }
1056 }
1057 mutex_unlock(&cli->mutex);
1058 return ret;
1059 }
1060
1061 void
nouveau_bo_move_init(struct nouveau_drm * drm)1062 nouveau_bo_move_init(struct nouveau_drm *drm)
1063 {
1064 static const struct {
1065 const char *name;
1066 int engine;
1067 s32 oclass;
1068 int (*exec)(struct nouveau_channel *,
1069 struct ttm_buffer_object *,
1070 struct ttm_mem_reg *, struct ttm_mem_reg *);
1071 int (*init)(struct nouveau_channel *, u32 handle);
1072 } _methods[] = {
1073 { "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
1074 { "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init },
1075 { "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init },
1076 { "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1077 { "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
1078 { "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1079 { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
1080 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1081 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
1082 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
1083 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
1084 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
1085 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
1086 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
1087 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
1088 {},
1089 { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
1090 }, *mthd = _methods;
1091 const char *name = "CPU";
1092 int ret;
1093
1094 do {
1095 struct nouveau_channel *chan;
1096
1097 if (mthd->engine)
1098 chan = drm->cechan;
1099 else
1100 chan = drm->channel;
1101 if (chan == NULL)
1102 continue;
1103
1104 ret = nvif_object_init(&chan->user,
1105 mthd->oclass | (mthd->engine << 16),
1106 mthd->oclass, NULL, 0,
1107 &drm->ttm.copy);
1108 if (ret == 0) {
1109 ret = mthd->init(chan, drm->ttm.copy.handle);
1110 if (ret) {
1111 nvif_object_fini(&drm->ttm.copy);
1112 continue;
1113 }
1114
1115 drm->ttm.move = mthd->exec;
1116 drm->ttm.chan = chan;
1117 name = mthd->name;
1118 break;
1119 }
1120 } while ((++mthd)->exec);
1121
1122 NV_INFO(drm, "MM: using %s for buffer copies\n", name);
1123 }
1124
1125 static int
nouveau_bo_move_flipd(struct ttm_buffer_object * bo,bool evict,bool intr,bool no_wait_gpu,struct ttm_mem_reg * new_mem)1126 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
1127 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1128 {
1129 struct ttm_place placement_memtype = {
1130 .fpfn = 0,
1131 .lpfn = 0,
1132 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1133 };
1134 struct ttm_placement placement;
1135 struct ttm_mem_reg tmp_mem;
1136 int ret;
1137
1138 placement.num_placement = placement.num_busy_placement = 1;
1139 placement.placement = placement.busy_placement = &placement_memtype;
1140
1141 tmp_mem = *new_mem;
1142 tmp_mem.mm_node = NULL;
1143 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
1144 if (ret)
1145 return ret;
1146
1147 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
1148 if (ret)
1149 goto out;
1150
1151 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem);
1152 if (ret)
1153 goto out;
1154
1155 ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, new_mem);
1156 out:
1157 ttm_bo_mem_put(bo, &tmp_mem);
1158 return ret;
1159 }
1160
1161 static int
nouveau_bo_move_flips(struct ttm_buffer_object * bo,bool evict,bool intr,bool no_wait_gpu,struct ttm_mem_reg * new_mem)1162 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
1163 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1164 {
1165 struct ttm_place placement_memtype = {
1166 .fpfn = 0,
1167 .lpfn = 0,
1168 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1169 };
1170 struct ttm_placement placement;
1171 struct ttm_mem_reg tmp_mem;
1172 int ret;
1173
1174 placement.num_placement = placement.num_busy_placement = 1;
1175 placement.placement = placement.busy_placement = &placement_memtype;
1176
1177 tmp_mem = *new_mem;
1178 tmp_mem.mm_node = NULL;
1179 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
1180 if (ret)
1181 return ret;
1182
1183 ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, &tmp_mem);
1184 if (ret)
1185 goto out;
1186
1187 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem);
1188 if (ret)
1189 goto out;
1190
1191 out:
1192 ttm_bo_mem_put(bo, &tmp_mem);
1193 return ret;
1194 }
1195
1196 static void
nouveau_bo_move_ntfy(struct ttm_buffer_object * bo,struct ttm_mem_reg * new_mem)1197 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
1198 {
1199 struct nouveau_bo *nvbo = nouveau_bo(bo);
1200 struct nvkm_vma *vma;
1201
1202 /* ttm can now (stupidly) pass the driver bos it didn't create... */
1203 if (bo->destroy != nouveau_bo_del_ttm)
1204 return;
1205
1206 list_for_each_entry(vma, &nvbo->vma_list, head) {
1207 if (new_mem && new_mem->mem_type != TTM_PL_SYSTEM &&
1208 (new_mem->mem_type == TTM_PL_VRAM ||
1209 nvbo->page_shift != vma->vm->mmu->lpg_shift)) {
1210 nvkm_vm_map(vma, new_mem->mm_node);
1211 } else {
1212 WARN_ON(ttm_bo_wait(bo, false, false));
1213 nvkm_vm_unmap(vma);
1214 }
1215 }
1216 }
1217
1218 static int
nouveau_bo_vm_bind(struct ttm_buffer_object * bo,struct ttm_mem_reg * new_mem,struct nouveau_drm_tile ** new_tile)1219 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
1220 struct nouveau_drm_tile **new_tile)
1221 {
1222 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1223 struct drm_device *dev = drm->dev;
1224 struct nouveau_bo *nvbo = nouveau_bo(bo);
1225 u64 offset = new_mem->start << PAGE_SHIFT;
1226
1227 *new_tile = NULL;
1228 if (new_mem->mem_type != TTM_PL_VRAM)
1229 return 0;
1230
1231 if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
1232 *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
1233 nvbo->tile_mode,
1234 nvbo->tile_flags);
1235 }
1236
1237 return 0;
1238 }
1239
1240 static void
nouveau_bo_vm_cleanup(struct ttm_buffer_object * bo,struct nouveau_drm_tile * new_tile,struct nouveau_drm_tile ** old_tile)1241 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
1242 struct nouveau_drm_tile *new_tile,
1243 struct nouveau_drm_tile **old_tile)
1244 {
1245 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1246 struct drm_device *dev = drm->dev;
1247 struct fence *fence = reservation_object_get_excl(bo->resv);
1248
1249 nv10_bo_put_tile_region(dev, *old_tile, fence);
1250 *old_tile = new_tile;
1251 }
1252
1253 static int
nouveau_bo_move(struct ttm_buffer_object * bo,bool evict,bool intr,bool no_wait_gpu,struct ttm_mem_reg * new_mem)1254 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1255 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1256 {
1257 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1258 struct nouveau_bo *nvbo = nouveau_bo(bo);
1259 struct ttm_mem_reg *old_mem = &bo->mem;
1260 struct nouveau_drm_tile *new_tile = NULL;
1261 int ret = 0;
1262
1263 ret = ttm_bo_wait(bo, intr, no_wait_gpu);
1264 if (ret)
1265 return ret;
1266
1267 if (nvbo->pin_refcnt)
1268 NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
1269
1270 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1271 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
1272 if (ret)
1273 return ret;
1274 }
1275
1276 /* Fake bo copy. */
1277 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1278 BUG_ON(bo->mem.mm_node != NULL);
1279 bo->mem = *new_mem;
1280 new_mem->mm_node = NULL;
1281 goto out;
1282 }
1283
1284 /* Hardware assisted copy. */
1285 if (drm->ttm.move) {
1286 if (new_mem->mem_type == TTM_PL_SYSTEM)
1287 ret = nouveau_bo_move_flipd(bo, evict, intr,
1288 no_wait_gpu, new_mem);
1289 else if (old_mem->mem_type == TTM_PL_SYSTEM)
1290 ret = nouveau_bo_move_flips(bo, evict, intr,
1291 no_wait_gpu, new_mem);
1292 else
1293 ret = nouveau_bo_move_m2mf(bo, evict, intr,
1294 no_wait_gpu, new_mem);
1295 if (!ret)
1296 goto out;
1297 }
1298
1299 /* Fallback to software copy. */
1300 ret = ttm_bo_wait(bo, intr, no_wait_gpu);
1301 if (ret == 0)
1302 ret = ttm_bo_move_memcpy(bo, intr, no_wait_gpu, new_mem);
1303
1304 out:
1305 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1306 if (ret)
1307 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1308 else
1309 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1310 }
1311
1312 return ret;
1313 }
1314
1315 static int
nouveau_bo_verify_access(struct ttm_buffer_object * bo,struct file * filp)1316 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1317 {
1318 struct nouveau_bo *nvbo = nouveau_bo(bo);
1319
1320 return drm_vma_node_verify_access(&nvbo->gem.vma_node,
1321 filp->private_data);
1322 }
1323
1324 static int
nouveau_ttm_io_mem_reserve(struct ttm_bo_device * bdev,struct ttm_mem_reg * mem)1325 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1326 {
1327 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1328 struct nouveau_drm *drm = nouveau_bdev(bdev);
1329 struct nvkm_device *device = nvxx_device(&drm->device);
1330 struct nvkm_mem *node = mem->mm_node;
1331 int ret;
1332
1333 mem->bus.addr = NULL;
1334 mem->bus.offset = 0;
1335 mem->bus.size = mem->num_pages << PAGE_SHIFT;
1336 mem->bus.base = 0;
1337 mem->bus.is_iomem = false;
1338 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1339 return -EINVAL;
1340 switch (mem->mem_type) {
1341 case TTM_PL_SYSTEM:
1342 /* System memory */
1343 return 0;
1344 case TTM_PL_TT:
1345 #if IS_ENABLED(CONFIG_AGP)
1346 if (drm->agp.bridge) {
1347 mem->bus.offset = mem->start << PAGE_SHIFT;
1348 mem->bus.base = drm->agp.base;
1349 mem->bus.is_iomem = !drm->agp.cma;
1350 }
1351 #endif
1352 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype)
1353 /* untiled */
1354 break;
1355 /* fallthrough, tiled memory */
1356 case TTM_PL_VRAM:
1357 mem->bus.offset = mem->start << PAGE_SHIFT;
1358 mem->bus.base = device->func->resource_addr(device, 1);
1359 mem->bus.is_iomem = true;
1360 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
1361 struct nvkm_bar *bar = nvxx_bar(&drm->device);
1362 int page_shift = 12;
1363 if (drm->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
1364 page_shift = node->page_shift;
1365
1366 ret = nvkm_bar_umap(bar, node->size << 12, page_shift,
1367 &node->bar_vma);
1368 if (ret)
1369 return ret;
1370
1371 nvkm_vm_map(&node->bar_vma, node);
1372 mem->bus.offset = node->bar_vma.offset;
1373 }
1374 break;
1375 default:
1376 return -EINVAL;
1377 }
1378 return 0;
1379 }
1380
1381 static void
nouveau_ttm_io_mem_free(struct ttm_bo_device * bdev,struct ttm_mem_reg * mem)1382 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1383 {
1384 struct nvkm_mem *node = mem->mm_node;
1385
1386 if (!node->bar_vma.node)
1387 return;
1388
1389 nvkm_vm_unmap(&node->bar_vma);
1390 nvkm_vm_put(&node->bar_vma);
1391 }
1392
1393 static int
nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object * bo)1394 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1395 {
1396 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1397 struct nouveau_bo *nvbo = nouveau_bo(bo);
1398 struct nvkm_device *device = nvxx_device(&drm->device);
1399 u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
1400 int i, ret;
1401
1402 /* as long as the bo isn't in vram, and isn't tiled, we've got
1403 * nothing to do here.
1404 */
1405 if (bo->mem.mem_type != TTM_PL_VRAM) {
1406 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA ||
1407 !nouveau_bo_tile_layout(nvbo))
1408 return 0;
1409
1410 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1411 nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
1412
1413 ret = nouveau_bo_validate(nvbo, false, false);
1414 if (ret)
1415 return ret;
1416 }
1417 return 0;
1418 }
1419
1420 /* make sure bo is in mappable vram */
1421 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
1422 bo->mem.start + bo->mem.num_pages < mappable)
1423 return 0;
1424
1425 for (i = 0; i < nvbo->placement.num_placement; ++i) {
1426 nvbo->placements[i].fpfn = 0;
1427 nvbo->placements[i].lpfn = mappable;
1428 }
1429
1430 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
1431 nvbo->busy_placements[i].fpfn = 0;
1432 nvbo->busy_placements[i].lpfn = mappable;
1433 }
1434
1435 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
1436 return nouveau_bo_validate(nvbo, false, false);
1437 }
1438
1439 static int
nouveau_ttm_tt_populate(struct ttm_tt * ttm)1440 nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1441 {
1442 struct ttm_dma_tt *ttm_dma = (void *)ttm;
1443 struct nouveau_drm *drm;
1444 struct nvkm_device *device;
1445 struct drm_device *dev;
1446 struct device *pdev;
1447 unsigned i;
1448 int r;
1449 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1450
1451 if (ttm->state != tt_unpopulated)
1452 return 0;
1453
1454 if (slave && ttm->sg) {
1455 /* make userspace faulting work */
1456 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1457 ttm_dma->dma_address, ttm->num_pages);
1458 ttm->state = tt_unbound;
1459 return 0;
1460 }
1461
1462 drm = nouveau_bdev(ttm->bdev);
1463 device = nvxx_device(&drm->device);
1464 dev = drm->dev;
1465 pdev = device->dev;
1466
1467 #if IS_ENABLED(CONFIG_AGP)
1468 if (drm->agp.bridge) {
1469 return ttm_agp_tt_populate(ttm);
1470 }
1471 #endif
1472
1473 #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
1474 if (swiotlb_nr_tbl()) {
1475 return ttm_dma_populate((void *)ttm, dev->dev);
1476 }
1477 #endif
1478
1479 r = ttm_pool_populate(ttm);
1480 if (r) {
1481 return r;
1482 }
1483
1484 for (i = 0; i < ttm->num_pages; i++) {
1485 dma_addr_t addr;
1486
1487 addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE,
1488 DMA_BIDIRECTIONAL);
1489
1490 if (dma_mapping_error(pdev, addr)) {
1491 while (i--) {
1492 dma_unmap_page(pdev, ttm_dma->dma_address[i],
1493 PAGE_SIZE, DMA_BIDIRECTIONAL);
1494 ttm_dma->dma_address[i] = 0;
1495 }
1496 ttm_pool_unpopulate(ttm);
1497 return -EFAULT;
1498 }
1499
1500 ttm_dma->dma_address[i] = addr;
1501 }
1502 return 0;
1503 }
1504
1505 static void
nouveau_ttm_tt_unpopulate(struct ttm_tt * ttm)1506 nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1507 {
1508 struct ttm_dma_tt *ttm_dma = (void *)ttm;
1509 struct nouveau_drm *drm;
1510 struct nvkm_device *device;
1511 struct drm_device *dev;
1512 struct device *pdev;
1513 unsigned i;
1514 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1515
1516 if (slave)
1517 return;
1518
1519 drm = nouveau_bdev(ttm->bdev);
1520 device = nvxx_device(&drm->device);
1521 dev = drm->dev;
1522 pdev = device->dev;
1523
1524 #if IS_ENABLED(CONFIG_AGP)
1525 if (drm->agp.bridge) {
1526 ttm_agp_tt_unpopulate(ttm);
1527 return;
1528 }
1529 #endif
1530
1531 #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
1532 if (swiotlb_nr_tbl()) {
1533 ttm_dma_unpopulate((void *)ttm, dev->dev);
1534 return;
1535 }
1536 #endif
1537
1538 for (i = 0; i < ttm->num_pages; i++) {
1539 if (ttm_dma->dma_address[i]) {
1540 dma_unmap_page(pdev, ttm_dma->dma_address[i], PAGE_SIZE,
1541 DMA_BIDIRECTIONAL);
1542 }
1543 }
1544
1545 ttm_pool_unpopulate(ttm);
1546 }
1547
1548 void
nouveau_bo_fence(struct nouveau_bo * nvbo,struct nouveau_fence * fence,bool exclusive)1549 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
1550 {
1551 struct reservation_object *resv = nvbo->bo.resv;
1552
1553 if (exclusive)
1554 reservation_object_add_excl_fence(resv, &fence->base);
1555 else if (fence)
1556 reservation_object_add_shared_fence(resv, &fence->base);
1557 }
1558
1559 struct ttm_bo_driver nouveau_bo_driver = {
1560 .ttm_tt_create = &nouveau_ttm_tt_create,
1561 .ttm_tt_populate = &nouveau_ttm_tt_populate,
1562 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
1563 .invalidate_caches = nouveau_bo_invalidate_caches,
1564 .init_mem_type = nouveau_bo_init_mem_type,
1565 .evict_flags = nouveau_bo_evict_flags,
1566 .move_notify = nouveau_bo_move_ntfy,
1567 .move = nouveau_bo_move,
1568 .verify_access = nouveau_bo_verify_access,
1569 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1570 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1571 .io_mem_free = &nouveau_ttm_io_mem_free,
1572 .lru_tail = &ttm_bo_default_lru_tail,
1573 .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
1574 };
1575
1576 struct nvkm_vma *
nouveau_bo_vma_find(struct nouveau_bo * nvbo,struct nvkm_vm * vm)1577 nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nvkm_vm *vm)
1578 {
1579 struct nvkm_vma *vma;
1580 list_for_each_entry(vma, &nvbo->vma_list, head) {
1581 if (vma->vm == vm)
1582 return vma;
1583 }
1584
1585 return NULL;
1586 }
1587
1588 int
nouveau_bo_vma_add(struct nouveau_bo * nvbo,struct nvkm_vm * vm,struct nvkm_vma * vma)1589 nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nvkm_vm *vm,
1590 struct nvkm_vma *vma)
1591 {
1592 const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
1593 int ret;
1594
1595 ret = nvkm_vm_get(vm, size, nvbo->page_shift,
1596 NV_MEM_ACCESS_RW, vma);
1597 if (ret)
1598 return ret;
1599
1600 if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
1601 (nvbo->bo.mem.mem_type == TTM_PL_VRAM ||
1602 nvbo->page_shift != vma->vm->mmu->lpg_shift))
1603 nvkm_vm_map(vma, nvbo->bo.mem.mm_node);
1604
1605 list_add_tail(&vma->head, &nvbo->vma_list);
1606 vma->refcount = 1;
1607 return 0;
1608 }
1609
1610 void
nouveau_bo_vma_del(struct nouveau_bo * nvbo,struct nvkm_vma * vma)1611 nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
1612 {
1613 if (vma->node) {
1614 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM)
1615 nvkm_vm_unmap(vma);
1616 nvkm_vm_put(vma);
1617 list_del(&vma->head);
1618 }
1619 }
1620