• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2007 Dave Airlied
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 /*
25  * Authors: Dave Airlied <airlied@linux.ie>
26  *	    Ben Skeggs   <darktama@iinet.net.au>
27  *	    Jeremy Kolb  <jkolb@brandeis.edu>
28  */
29 
30 #include <linux/dma-mapping.h>
31 #include <linux/swiotlb.h>
32 
33 #include "nouveau_drv.h"
34 #include "nouveau_dma.h"
35 #include "nouveau_fence.h"
36 
37 #include "nouveau_bo.h"
38 #include "nouveau_ttm.h"
39 #include "nouveau_gem.h"
40 #include "nouveau_mem.h"
41 #include "nouveau_vmm.h"
42 
43 #include <nvif/class.h>
44 #include <nvif/if500b.h>
45 #include <nvif/if900b.h>
46 
47 /*
48  * NV10-NV40 tiling helpers
49  */
50 
51 static void
nv10_bo_update_tile_region(struct drm_device * dev,struct nouveau_drm_tile * reg,u32 addr,u32 size,u32 pitch,u32 flags)52 nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
53 			   u32 addr, u32 size, u32 pitch, u32 flags)
54 {
55 	struct nouveau_drm *drm = nouveau_drm(dev);
56 	int i = reg - drm->tile.reg;
57 	struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
58 	struct nvkm_fb_tile *tile = &fb->tile.region[i];
59 
60 	nouveau_fence_unref(&reg->fence);
61 
62 	if (tile->pitch)
63 		nvkm_fb_tile_fini(fb, i, tile);
64 
65 	if (pitch)
66 		nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile);
67 
68 	nvkm_fb_tile_prog(fb, i, tile);
69 }
70 
71 static struct nouveau_drm_tile *
nv10_bo_get_tile_region(struct drm_device * dev,int i)72 nv10_bo_get_tile_region(struct drm_device *dev, int i)
73 {
74 	struct nouveau_drm *drm = nouveau_drm(dev);
75 	struct nouveau_drm_tile *tile = &drm->tile.reg[i];
76 
77 	spin_lock(&drm->tile.lock);
78 
79 	if (!tile->used &&
80 	    (!tile->fence || nouveau_fence_done(tile->fence)))
81 		tile->used = true;
82 	else
83 		tile = NULL;
84 
85 	spin_unlock(&drm->tile.lock);
86 	return tile;
87 }
88 
89 static void
nv10_bo_put_tile_region(struct drm_device * dev,struct nouveau_drm_tile * tile,struct dma_fence * fence)90 nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
91 			struct dma_fence *fence)
92 {
93 	struct nouveau_drm *drm = nouveau_drm(dev);
94 
95 	if (tile) {
96 		spin_lock(&drm->tile.lock);
97 		tile->fence = (struct nouveau_fence *)dma_fence_get(fence);
98 		tile->used = false;
99 		spin_unlock(&drm->tile.lock);
100 	}
101 }
102 
103 static struct nouveau_drm_tile *
nv10_bo_set_tiling(struct drm_device * dev,u32 addr,u32 size,u32 pitch,u32 zeta)104 nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
105 		   u32 size, u32 pitch, u32 zeta)
106 {
107 	struct nouveau_drm *drm = nouveau_drm(dev);
108 	struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
109 	struct nouveau_drm_tile *tile, *found = NULL;
110 	int i;
111 
112 	for (i = 0; i < fb->tile.regions; i++) {
113 		tile = nv10_bo_get_tile_region(dev, i);
114 
115 		if (pitch && !found) {
116 			found = tile;
117 			continue;
118 
119 		} else if (tile && fb->tile.region[i].pitch) {
120 			/* Kill an unused tile region. */
121 			nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
122 		}
123 
124 		nv10_bo_put_tile_region(dev, tile, NULL);
125 	}
126 
127 	if (found)
128 		nv10_bo_update_tile_region(dev, found, addr, size, pitch, zeta);
129 	return found;
130 }
131 
132 static void
nouveau_bo_del_ttm(struct ttm_buffer_object * bo)133 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
134 {
135 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
136 	struct drm_device *dev = drm->dev;
137 	struct nouveau_bo *nvbo = nouveau_bo(bo);
138 
139 	WARN_ON(nvbo->pin_refcnt > 0);
140 	nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
141 
142 	/*
143 	 * If nouveau_bo_new() allocated this buffer, the GEM object was never
144 	 * initialized, so don't attempt to release it.
145 	 */
146 	if (bo->base.dev)
147 		drm_gem_object_release(&bo->base);
148 
149 	kfree(nvbo);
150 }
151 
152 static inline u64
roundup_64(u64 x,u32 y)153 roundup_64(u64 x, u32 y)
154 {
155 	x += y - 1;
156 	do_div(x, y);
157 	return x * y;
158 }
159 
160 static void
nouveau_bo_fixup_align(struct nouveau_bo * nvbo,u32 flags,int * align,u64 * size)161 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
162 		       int *align, u64 *size)
163 {
164 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
165 	struct nvif_device *device = &drm->client.device;
166 
167 	if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
168 		if (nvbo->mode) {
169 			if (device->info.chipset >= 0x40) {
170 				*align = 65536;
171 				*size = roundup_64(*size, 64 * nvbo->mode);
172 
173 			} else if (device->info.chipset >= 0x30) {
174 				*align = 32768;
175 				*size = roundup_64(*size, 64 * nvbo->mode);
176 
177 			} else if (device->info.chipset >= 0x20) {
178 				*align = 16384;
179 				*size = roundup_64(*size, 64 * nvbo->mode);
180 
181 			} else if (device->info.chipset >= 0x10) {
182 				*align = 16384;
183 				*size = roundup_64(*size, 32 * nvbo->mode);
184 			}
185 		}
186 	} else {
187 		*size = roundup_64(*size, (1 << nvbo->page));
188 		*align = max((1 <<  nvbo->page), *align);
189 	}
190 
191 	*size = roundup_64(*size, PAGE_SIZE);
192 }
193 
194 struct nouveau_bo *
nouveau_bo_alloc(struct nouveau_cli * cli,u64 * size,int * align,u32 flags,u32 tile_mode,u32 tile_flags)195 nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags,
196 		 u32 tile_mode, u32 tile_flags)
197 {
198 	struct nouveau_drm *drm = cli->drm;
199 	struct nouveau_bo *nvbo;
200 	struct nvif_mmu *mmu = &cli->mmu;
201 	struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm;
202 	int i, pi = -1;
203 
204 	if (!*size) {
205 		NV_WARN(drm, "skipped size %016llx\n", *size);
206 		return ERR_PTR(-EINVAL);
207 	}
208 
209 	nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
210 	if (!nvbo)
211 		return ERR_PTR(-ENOMEM);
212 	INIT_LIST_HEAD(&nvbo->head);
213 	INIT_LIST_HEAD(&nvbo->entry);
214 	INIT_LIST_HEAD(&nvbo->vma_list);
215 	nvbo->bo.bdev = &drm->ttm.bdev;
216 
217 	/* This is confusing, and doesn't actually mean we want an uncached
218 	 * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated
219 	 * into in nouveau_gem_new().
220 	 */
221 	if (flags & TTM_PL_FLAG_UNCACHED) {
222 		/* Determine if we can get a cache-coherent map, forcing
223 		 * uncached mapping if we can't.
224 		 */
225 		if (!nouveau_drm_use_coherent_gpu_mapping(drm))
226 			nvbo->force_coherent = true;
227 	}
228 
229 	if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
230 		nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
231 		if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
232 			kfree(nvbo);
233 			return ERR_PTR(-EINVAL);
234 		}
235 
236 		nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
237 	} else
238 	if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
239 		nvbo->kind = (tile_flags & 0x00007f00) >> 8;
240 		nvbo->comp = (tile_flags & 0x00030000) >> 16;
241 		if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
242 			kfree(nvbo);
243 			return ERR_PTR(-EINVAL);
244 		}
245 	} else {
246 		nvbo->zeta = (tile_flags & 0x00000007);
247 	}
248 	nvbo->mode = tile_mode;
249 	nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
250 
251 	/* Determine the desirable target GPU page size for the buffer. */
252 	for (i = 0; i < vmm->page_nr; i++) {
253 		/* Because we cannot currently allow VMM maps to fail
254 		 * during buffer migration, we need to determine page
255 		 * size for the buffer up-front, and pre-allocate its
256 		 * page tables.
257 		 *
258 		 * Skip page sizes that can't support needed domains.
259 		 */
260 		if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
261 		    (flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram)
262 			continue;
263 		if ((flags & TTM_PL_FLAG_TT) &&
264 		    (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
265 			continue;
266 
267 		/* Select this page size if it's the first that supports
268 		 * the potential memory domains, or when it's compatible
269 		 * with the requested compression settings.
270 		 */
271 		if (pi < 0 || !nvbo->comp || vmm->page[i].comp)
272 			pi = i;
273 
274 		/* Stop once the buffer is larger than the current page size. */
275 		if (*size >= 1ULL << vmm->page[i].shift)
276 			break;
277 	}
278 
279 	if (WARN_ON(pi < 0))
280 		return ERR_PTR(-EINVAL);
281 
282 	/* Disable compression if suitable settings couldn't be found. */
283 	if (nvbo->comp && !vmm->page[pi].comp) {
284 		if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
285 			nvbo->kind = mmu->kind[nvbo->kind];
286 		nvbo->comp = 0;
287 	}
288 	nvbo->page = vmm->page[pi].shift;
289 
290 	nouveau_bo_fixup_align(nvbo, flags, align, size);
291 
292 	return nvbo;
293 }
294 
295 int
nouveau_bo_init(struct nouveau_bo * nvbo,u64 size,int align,u32 flags,struct sg_table * sg,struct dma_resv * robj)296 nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags,
297 		struct sg_table *sg, struct dma_resv *robj)
298 {
299 	int type = sg ? ttm_bo_type_sg : ttm_bo_type_device;
300 	size_t acc_size;
301 	int ret;
302 
303 	acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo));
304 
305 	nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
306 	nouveau_bo_placement_set(nvbo, flags, 0);
307 
308 	ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
309 			  &nvbo->placement, align >> PAGE_SHIFT, false,
310 			  acc_size, sg, robj, nouveau_bo_del_ttm);
311 	if (ret) {
312 		/* ttm will call nouveau_bo_del_ttm if it fails.. */
313 		return ret;
314 	}
315 
316 	return 0;
317 }
318 
319 int
nouveau_bo_new(struct nouveau_cli * cli,u64 size,int align,uint32_t flags,uint32_t tile_mode,uint32_t tile_flags,struct sg_table * sg,struct dma_resv * robj,struct nouveau_bo ** pnvbo)320 nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
321 	       uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
322 	       struct sg_table *sg, struct dma_resv *robj,
323 	       struct nouveau_bo **pnvbo)
324 {
325 	struct nouveau_bo *nvbo;
326 	int ret;
327 
328 	nvbo = nouveau_bo_alloc(cli, &size, &align, flags, tile_mode,
329 				tile_flags);
330 	if (IS_ERR(nvbo))
331 		return PTR_ERR(nvbo);
332 
333 	ret = nouveau_bo_init(nvbo, size, align, flags, sg, robj);
334 	if (ret)
335 		return ret;
336 
337 	*pnvbo = nvbo;
338 	return 0;
339 }
340 
341 static void
set_placement_list(struct ttm_place * pl,unsigned * n,uint32_t type,uint32_t flags)342 set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags)
343 {
344 	*n = 0;
345 
346 	if (type & TTM_PL_FLAG_VRAM)
347 		pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags;
348 	if (type & TTM_PL_FLAG_TT)
349 		pl[(*n)++].flags = TTM_PL_FLAG_TT | flags;
350 	if (type & TTM_PL_FLAG_SYSTEM)
351 		pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags;
352 }
353 
354 static void
set_placement_range(struct nouveau_bo * nvbo,uint32_t type)355 set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
356 {
357 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
358 	u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT;
359 	unsigned i, fpfn, lpfn;
360 
361 	if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
362 	    nvbo->mode && (type & TTM_PL_FLAG_VRAM) &&
363 	    nvbo->bo.mem.num_pages < vram_pages / 4) {
364 		/*
365 		 * Make sure that the color and depth buffers are handled
366 		 * by independent memory controller units. Up to a 9x
367 		 * speed up when alpha-blending and depth-test are enabled
368 		 * at the same time.
369 		 */
370 		if (nvbo->zeta) {
371 			fpfn = vram_pages / 2;
372 			lpfn = ~0;
373 		} else {
374 			fpfn = 0;
375 			lpfn = vram_pages / 2;
376 		}
377 		for (i = 0; i < nvbo->placement.num_placement; ++i) {
378 			nvbo->placements[i].fpfn = fpfn;
379 			nvbo->placements[i].lpfn = lpfn;
380 		}
381 		for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
382 			nvbo->busy_placements[i].fpfn = fpfn;
383 			nvbo->busy_placements[i].lpfn = lpfn;
384 		}
385 	}
386 }
387 
388 void
nouveau_bo_placement_set(struct nouveau_bo * nvbo,uint32_t type,uint32_t busy)389 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
390 {
391 	struct ttm_placement *pl = &nvbo->placement;
392 	uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
393 						 TTM_PL_MASK_CACHING) |
394 			 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
395 
396 	pl->placement = nvbo->placements;
397 	set_placement_list(nvbo->placements, &pl->num_placement,
398 			   type, flags);
399 
400 	pl->busy_placement = nvbo->busy_placements;
401 	set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
402 			   type | busy, flags);
403 
404 	set_placement_range(nvbo, type);
405 }
406 
407 int
nouveau_bo_pin(struct nouveau_bo * nvbo,uint32_t memtype,bool contig)408 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
409 {
410 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
411 	struct ttm_buffer_object *bo = &nvbo->bo;
412 	bool force = false, evict = false;
413 	int ret;
414 
415 	ret = ttm_bo_reserve(bo, false, false, NULL);
416 	if (ret)
417 		return ret;
418 
419 	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
420 	    memtype == TTM_PL_FLAG_VRAM && contig) {
421 		if (!nvbo->contig) {
422 			nvbo->contig = true;
423 			force = true;
424 			evict = true;
425 		}
426 	}
427 
428 	if (nvbo->pin_refcnt) {
429 		if (!(memtype & (1 << bo->mem.mem_type)) || evict) {
430 			NV_ERROR(drm, "bo %p pinned elsewhere: "
431 				      "0x%08x vs 0x%08x\n", bo,
432 				 1 << bo->mem.mem_type, memtype);
433 			ret = -EBUSY;
434 		}
435 		nvbo->pin_refcnt++;
436 		goto out;
437 	}
438 
439 	if (evict) {
440 		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0);
441 		ret = nouveau_bo_validate(nvbo, false, false);
442 		if (ret)
443 			goto out;
444 	}
445 
446 	nvbo->pin_refcnt++;
447 	nouveau_bo_placement_set(nvbo, memtype, 0);
448 
449 	/* drop pin_refcnt temporarily, so we don't trip the assertion
450 	 * in nouveau_bo_move() that makes sure we're not trying to
451 	 * move a pinned buffer
452 	 */
453 	nvbo->pin_refcnt--;
454 	ret = nouveau_bo_validate(nvbo, false, false);
455 	if (ret)
456 		goto out;
457 	nvbo->pin_refcnt++;
458 
459 	switch (bo->mem.mem_type) {
460 	case TTM_PL_VRAM:
461 		drm->gem.vram_available -= bo->mem.size;
462 		break;
463 	case TTM_PL_TT:
464 		drm->gem.gart_available -= bo->mem.size;
465 		break;
466 	default:
467 		break;
468 	}
469 
470 out:
471 	if (force && ret)
472 		nvbo->contig = false;
473 	ttm_bo_unreserve(bo);
474 	return ret;
475 }
476 
477 int
nouveau_bo_unpin(struct nouveau_bo * nvbo)478 nouveau_bo_unpin(struct nouveau_bo *nvbo)
479 {
480 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
481 	struct ttm_buffer_object *bo = &nvbo->bo;
482 	int ret, ref;
483 
484 	ret = ttm_bo_reserve(bo, false, false, NULL);
485 	if (ret)
486 		return ret;
487 
488 	ref = --nvbo->pin_refcnt;
489 	WARN_ON_ONCE(ref < 0);
490 	if (ref)
491 		goto out;
492 
493 	nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
494 
495 	ret = nouveau_bo_validate(nvbo, false, false);
496 	if (ret == 0) {
497 		switch (bo->mem.mem_type) {
498 		case TTM_PL_VRAM:
499 			drm->gem.vram_available += bo->mem.size;
500 			break;
501 		case TTM_PL_TT:
502 			drm->gem.gart_available += bo->mem.size;
503 			break;
504 		default:
505 			break;
506 		}
507 	}
508 
509 out:
510 	ttm_bo_unreserve(bo);
511 	return ret;
512 }
513 
514 int
nouveau_bo_map(struct nouveau_bo * nvbo)515 nouveau_bo_map(struct nouveau_bo *nvbo)
516 {
517 	int ret;
518 
519 	ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
520 	if (ret)
521 		return ret;
522 
523 	ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
524 
525 	ttm_bo_unreserve(&nvbo->bo);
526 	return ret;
527 }
528 
529 void
nouveau_bo_unmap(struct nouveau_bo * nvbo)530 nouveau_bo_unmap(struct nouveau_bo *nvbo)
531 {
532 	if (!nvbo)
533 		return;
534 
535 	ttm_bo_kunmap(&nvbo->kmap);
536 }
537 
538 void
nouveau_bo_sync_for_device(struct nouveau_bo * nvbo)539 nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
540 {
541 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
542 	struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
543 	int i;
544 
545 	if (!ttm_dma)
546 		return;
547 
548 	/* Don't waste time looping if the object is coherent */
549 	if (nvbo->force_coherent)
550 		return;
551 
552 	for (i = 0; i < ttm_dma->ttm.num_pages; i++)
553 		dma_sync_single_for_device(drm->dev->dev,
554 					   ttm_dma->dma_address[i],
555 					   PAGE_SIZE, DMA_TO_DEVICE);
556 }
557 
558 void
nouveau_bo_sync_for_cpu(struct nouveau_bo * nvbo)559 nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
560 {
561 	struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
562 	struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
563 	int i;
564 
565 	if (!ttm_dma)
566 		return;
567 
568 	/* Don't waste time looping if the object is coherent */
569 	if (nvbo->force_coherent)
570 		return;
571 
572 	for (i = 0; i < ttm_dma->ttm.num_pages; i++)
573 		dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i],
574 					PAGE_SIZE, DMA_FROM_DEVICE);
575 }
576 
577 int
nouveau_bo_validate(struct nouveau_bo * nvbo,bool interruptible,bool no_wait_gpu)578 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
579 		    bool no_wait_gpu)
580 {
581 	struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
582 	int ret;
583 
584 	ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, &ctx);
585 	if (ret)
586 		return ret;
587 
588 	nouveau_bo_sync_for_device(nvbo);
589 
590 	return 0;
591 }
592 
593 void
nouveau_bo_wr16(struct nouveau_bo * nvbo,unsigned index,u16 val)594 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
595 {
596 	bool is_iomem;
597 	u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
598 
599 	mem += index;
600 
601 	if (is_iomem)
602 		iowrite16_native(val, (void __force __iomem *)mem);
603 	else
604 		*mem = val;
605 }
606 
607 u32
nouveau_bo_rd32(struct nouveau_bo * nvbo,unsigned index)608 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
609 {
610 	bool is_iomem;
611 	u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
612 
613 	mem += index;
614 
615 	if (is_iomem)
616 		return ioread32_native((void __force __iomem *)mem);
617 	else
618 		return *mem;
619 }
620 
621 void
nouveau_bo_wr32(struct nouveau_bo * nvbo,unsigned index,u32 val)622 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
623 {
624 	bool is_iomem;
625 	u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
626 
627 	mem += index;
628 
629 	if (is_iomem)
630 		iowrite32_native(val, (void __force __iomem *)mem);
631 	else
632 		*mem = val;
633 }
634 
635 static struct ttm_tt *
nouveau_ttm_tt_create(struct ttm_buffer_object * bo,uint32_t page_flags)636 nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags)
637 {
638 #if IS_ENABLED(CONFIG_AGP)
639 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
640 
641 	if (drm->agp.bridge) {
642 		return ttm_agp_tt_create(bo, drm->agp.bridge, page_flags);
643 	}
644 #endif
645 
646 	return nouveau_sgdma_create_ttm(bo, page_flags);
647 }
648 
649 static int
nouveau_bo_invalidate_caches(struct ttm_bo_device * bdev,uint32_t flags)650 nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
651 {
652 	/* We'll do this from user space. */
653 	return 0;
654 }
655 
656 static int
nouveau_bo_init_mem_type(struct ttm_bo_device * bdev,uint32_t type,struct ttm_mem_type_manager * man)657 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
658 			 struct ttm_mem_type_manager *man)
659 {
660 	struct nouveau_drm *drm = nouveau_bdev(bdev);
661 	struct nvif_mmu *mmu = &drm->client.mmu;
662 
663 	switch (type) {
664 	case TTM_PL_SYSTEM:
665 		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
666 		man->available_caching = TTM_PL_MASK_CACHING;
667 		man->default_caching = TTM_PL_FLAG_CACHED;
668 		break;
669 	case TTM_PL_VRAM:
670 		man->flags = TTM_MEMTYPE_FLAG_FIXED |
671 			     TTM_MEMTYPE_FLAG_MAPPABLE;
672 		man->available_caching = TTM_PL_FLAG_UNCACHED |
673 					 TTM_PL_FLAG_WC;
674 		man->default_caching = TTM_PL_FLAG_WC;
675 
676 		if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
677 			/* Some BARs do not support being ioremapped WC */
678 			const u8 type = mmu->type[drm->ttm.type_vram].type;
679 			if (type & NVIF_MEM_UNCACHED) {
680 				man->available_caching = TTM_PL_FLAG_UNCACHED;
681 				man->default_caching = TTM_PL_FLAG_UNCACHED;
682 			}
683 
684 			man->func = &nouveau_vram_manager;
685 			man->io_reserve_fastpath = false;
686 			man->use_io_reserve_lru = true;
687 		} else {
688 			man->func = &ttm_bo_manager_func;
689 		}
690 		break;
691 	case TTM_PL_TT:
692 		if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
693 			man->func = &nouveau_gart_manager;
694 		else
695 		if (!drm->agp.bridge)
696 			man->func = &nv04_gart_manager;
697 		else
698 			man->func = &ttm_bo_manager_func;
699 
700 		if (drm->agp.bridge) {
701 			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
702 			man->available_caching = TTM_PL_FLAG_UNCACHED |
703 				TTM_PL_FLAG_WC;
704 			man->default_caching = TTM_PL_FLAG_WC;
705 		} else {
706 			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
707 				     TTM_MEMTYPE_FLAG_CMA;
708 			man->available_caching = TTM_PL_MASK_CACHING;
709 			man->default_caching = TTM_PL_FLAG_CACHED;
710 		}
711 
712 		break;
713 	default:
714 		return -EINVAL;
715 	}
716 	return 0;
717 }
718 
719 static void
nouveau_bo_evict_flags(struct ttm_buffer_object * bo,struct ttm_placement * pl)720 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
721 {
722 	struct nouveau_bo *nvbo = nouveau_bo(bo);
723 
724 	switch (bo->mem.mem_type) {
725 	case TTM_PL_VRAM:
726 		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
727 					 TTM_PL_FLAG_SYSTEM);
728 		break;
729 	default:
730 		nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
731 		break;
732 	}
733 
734 	*pl = nvbo->placement;
735 }
736 
737 
738 static int
nve0_bo_move_init(struct nouveau_channel * chan,u32 handle)739 nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
740 {
741 	int ret = RING_SPACE(chan, 2);
742 	if (ret == 0) {
743 		BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
744 		OUT_RING  (chan, handle & 0x0000ffff);
745 		FIRE_RING (chan);
746 	}
747 	return ret;
748 }
749 
750 static int
nve0_bo_move_copy(struct nouveau_channel * chan,struct ttm_buffer_object * bo,struct ttm_mem_reg * old_reg,struct ttm_mem_reg * new_reg)751 nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
752 		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
753 {
754 	struct nouveau_mem *mem = nouveau_mem(old_reg);
755 	int ret = RING_SPACE(chan, 10);
756 	if (ret == 0) {
757 		BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
758 		OUT_RING  (chan, upper_32_bits(mem->vma[0].addr));
759 		OUT_RING  (chan, lower_32_bits(mem->vma[0].addr));
760 		OUT_RING  (chan, upper_32_bits(mem->vma[1].addr));
761 		OUT_RING  (chan, lower_32_bits(mem->vma[1].addr));
762 		OUT_RING  (chan, PAGE_SIZE);
763 		OUT_RING  (chan, PAGE_SIZE);
764 		OUT_RING  (chan, PAGE_SIZE);
765 		OUT_RING  (chan, new_reg->num_pages);
766 		BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
767 	}
768 	return ret;
769 }
770 
771 static int
nvc0_bo_move_init(struct nouveau_channel * chan,u32 handle)772 nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
773 {
774 	int ret = RING_SPACE(chan, 2);
775 	if (ret == 0) {
776 		BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
777 		OUT_RING  (chan, handle);
778 	}
779 	return ret;
780 }
781 
782 static int
nvc0_bo_move_copy(struct nouveau_channel * chan,struct ttm_buffer_object * bo,struct ttm_mem_reg * old_reg,struct ttm_mem_reg * new_reg)783 nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
784 		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
785 {
786 	struct nouveau_mem *mem = nouveau_mem(old_reg);
787 	u64 src_offset = mem->vma[0].addr;
788 	u64 dst_offset = mem->vma[1].addr;
789 	u32 page_count = new_reg->num_pages;
790 	int ret;
791 
792 	page_count = new_reg->num_pages;
793 	while (page_count) {
794 		int line_count = (page_count > 8191) ? 8191 : page_count;
795 
796 		ret = RING_SPACE(chan, 11);
797 		if (ret)
798 			return ret;
799 
800 		BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
801 		OUT_RING  (chan, upper_32_bits(src_offset));
802 		OUT_RING  (chan, lower_32_bits(src_offset));
803 		OUT_RING  (chan, upper_32_bits(dst_offset));
804 		OUT_RING  (chan, lower_32_bits(dst_offset));
805 		OUT_RING  (chan, PAGE_SIZE);
806 		OUT_RING  (chan, PAGE_SIZE);
807 		OUT_RING  (chan, PAGE_SIZE);
808 		OUT_RING  (chan, line_count);
809 		BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
810 		OUT_RING  (chan, 0x00000110);
811 
812 		page_count -= line_count;
813 		src_offset += (PAGE_SIZE * line_count);
814 		dst_offset += (PAGE_SIZE * line_count);
815 	}
816 
817 	return 0;
818 }
819 
820 static int
nvc0_bo_move_m2mf(struct nouveau_channel * chan,struct ttm_buffer_object * bo,struct ttm_mem_reg * old_reg,struct ttm_mem_reg * new_reg)821 nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
822 		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
823 {
824 	struct nouveau_mem *mem = nouveau_mem(old_reg);
825 	u64 src_offset = mem->vma[0].addr;
826 	u64 dst_offset = mem->vma[1].addr;
827 	u32 page_count = new_reg->num_pages;
828 	int ret;
829 
830 	page_count = new_reg->num_pages;
831 	while (page_count) {
832 		int line_count = (page_count > 2047) ? 2047 : page_count;
833 
834 		ret = RING_SPACE(chan, 12);
835 		if (ret)
836 			return ret;
837 
838 		BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
839 		OUT_RING  (chan, upper_32_bits(dst_offset));
840 		OUT_RING  (chan, lower_32_bits(dst_offset));
841 		BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
842 		OUT_RING  (chan, upper_32_bits(src_offset));
843 		OUT_RING  (chan, lower_32_bits(src_offset));
844 		OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
845 		OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
846 		OUT_RING  (chan, PAGE_SIZE); /* line_length */
847 		OUT_RING  (chan, line_count);
848 		BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
849 		OUT_RING  (chan, 0x00100110);
850 
851 		page_count -= line_count;
852 		src_offset += (PAGE_SIZE * line_count);
853 		dst_offset += (PAGE_SIZE * line_count);
854 	}
855 
856 	return 0;
857 }
858 
859 static int
nva3_bo_move_copy(struct nouveau_channel * chan,struct ttm_buffer_object * bo,struct ttm_mem_reg * old_reg,struct ttm_mem_reg * new_reg)860 nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
861 		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
862 {
863 	struct nouveau_mem *mem = nouveau_mem(old_reg);
864 	u64 src_offset = mem->vma[0].addr;
865 	u64 dst_offset = mem->vma[1].addr;
866 	u32 page_count = new_reg->num_pages;
867 	int ret;
868 
869 	page_count = new_reg->num_pages;
870 	while (page_count) {
871 		int line_count = (page_count > 8191) ? 8191 : page_count;
872 
873 		ret = RING_SPACE(chan, 11);
874 		if (ret)
875 			return ret;
876 
877 		BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
878 		OUT_RING  (chan, upper_32_bits(src_offset));
879 		OUT_RING  (chan, lower_32_bits(src_offset));
880 		OUT_RING  (chan, upper_32_bits(dst_offset));
881 		OUT_RING  (chan, lower_32_bits(dst_offset));
882 		OUT_RING  (chan, PAGE_SIZE);
883 		OUT_RING  (chan, PAGE_SIZE);
884 		OUT_RING  (chan, PAGE_SIZE);
885 		OUT_RING  (chan, line_count);
886 		BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
887 		OUT_RING  (chan, 0x00000110);
888 
889 		page_count -= line_count;
890 		src_offset += (PAGE_SIZE * line_count);
891 		dst_offset += (PAGE_SIZE * line_count);
892 	}
893 
894 	return 0;
895 }
896 
897 static int
nv98_bo_move_exec(struct nouveau_channel * chan,struct ttm_buffer_object * bo,struct ttm_mem_reg * old_reg,struct ttm_mem_reg * new_reg)898 nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
899 		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
900 {
901 	struct nouveau_mem *mem = nouveau_mem(old_reg);
902 	int ret = RING_SPACE(chan, 7);
903 	if (ret == 0) {
904 		BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
905 		OUT_RING  (chan, upper_32_bits(mem->vma[0].addr));
906 		OUT_RING  (chan, lower_32_bits(mem->vma[0].addr));
907 		OUT_RING  (chan, upper_32_bits(mem->vma[1].addr));
908 		OUT_RING  (chan, lower_32_bits(mem->vma[1].addr));
909 		OUT_RING  (chan, 0x00000000 /* COPY */);
910 		OUT_RING  (chan, new_reg->num_pages << PAGE_SHIFT);
911 	}
912 	return ret;
913 }
914 
915 static int
nv84_bo_move_exec(struct nouveau_channel * chan,struct ttm_buffer_object * bo,struct ttm_mem_reg * old_reg,struct ttm_mem_reg * new_reg)916 nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
917 		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
918 {
919 	struct nouveau_mem *mem = nouveau_mem(old_reg);
920 	int ret = RING_SPACE(chan, 7);
921 	if (ret == 0) {
922 		BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
923 		OUT_RING  (chan, new_reg->num_pages << PAGE_SHIFT);
924 		OUT_RING  (chan, upper_32_bits(mem->vma[0].addr));
925 		OUT_RING  (chan, lower_32_bits(mem->vma[0].addr));
926 		OUT_RING  (chan, upper_32_bits(mem->vma[1].addr));
927 		OUT_RING  (chan, lower_32_bits(mem->vma[1].addr));
928 		OUT_RING  (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
929 	}
930 	return ret;
931 }
932 
933 static int
nv50_bo_move_init(struct nouveau_channel * chan,u32 handle)934 nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
935 {
936 	int ret = RING_SPACE(chan, 6);
937 	if (ret == 0) {
938 		BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
939 		OUT_RING  (chan, handle);
940 		BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
941 		OUT_RING  (chan, chan->drm->ntfy.handle);
942 		OUT_RING  (chan, chan->vram.handle);
943 		OUT_RING  (chan, chan->vram.handle);
944 	}
945 
946 	return ret;
947 }
948 
949 static int
nv50_bo_move_m2mf(struct nouveau_channel * chan,struct ttm_buffer_object * bo,struct ttm_mem_reg * old_reg,struct ttm_mem_reg * new_reg)950 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
951 		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
952 {
953 	struct nouveau_mem *mem = nouveau_mem(old_reg);
954 	u64 length = (new_reg->num_pages << PAGE_SHIFT);
955 	u64 src_offset = mem->vma[0].addr;
956 	u64 dst_offset = mem->vma[1].addr;
957 	int src_tiled = !!mem->kind;
958 	int dst_tiled = !!nouveau_mem(new_reg)->kind;
959 	int ret;
960 
961 	while (length) {
962 		u32 amount, stride, height;
963 
964 		ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
965 		if (ret)
966 			return ret;
967 
968 		amount  = min(length, (u64)(4 * 1024 * 1024));
969 		stride  = 16 * 4;
970 		height  = amount / stride;
971 
972 		if (src_tiled) {
973 			BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
974 			OUT_RING  (chan, 0);
975 			OUT_RING  (chan, 0);
976 			OUT_RING  (chan, stride);
977 			OUT_RING  (chan, height);
978 			OUT_RING  (chan, 1);
979 			OUT_RING  (chan, 0);
980 			OUT_RING  (chan, 0);
981 		} else {
982 			BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
983 			OUT_RING  (chan, 1);
984 		}
985 		if (dst_tiled) {
986 			BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
987 			OUT_RING  (chan, 0);
988 			OUT_RING  (chan, 0);
989 			OUT_RING  (chan, stride);
990 			OUT_RING  (chan, height);
991 			OUT_RING  (chan, 1);
992 			OUT_RING  (chan, 0);
993 			OUT_RING  (chan, 0);
994 		} else {
995 			BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
996 			OUT_RING  (chan, 1);
997 		}
998 
999 		BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
1000 		OUT_RING  (chan, upper_32_bits(src_offset));
1001 		OUT_RING  (chan, upper_32_bits(dst_offset));
1002 		BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
1003 		OUT_RING  (chan, lower_32_bits(src_offset));
1004 		OUT_RING  (chan, lower_32_bits(dst_offset));
1005 		OUT_RING  (chan, stride);
1006 		OUT_RING  (chan, stride);
1007 		OUT_RING  (chan, stride);
1008 		OUT_RING  (chan, height);
1009 		OUT_RING  (chan, 0x00000101);
1010 		OUT_RING  (chan, 0x00000000);
1011 		BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
1012 		OUT_RING  (chan, 0);
1013 
1014 		length -= amount;
1015 		src_offset += amount;
1016 		dst_offset += amount;
1017 	}
1018 
1019 	return 0;
1020 }
1021 
1022 static int
nv04_bo_move_init(struct nouveau_channel * chan,u32 handle)1023 nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
1024 {
1025 	int ret = RING_SPACE(chan, 4);
1026 	if (ret == 0) {
1027 		BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
1028 		OUT_RING  (chan, handle);
1029 		BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
1030 		OUT_RING  (chan, chan->drm->ntfy.handle);
1031 	}
1032 
1033 	return ret;
1034 }
1035 
1036 static inline uint32_t
nouveau_bo_mem_ctxdma(struct ttm_buffer_object * bo,struct nouveau_channel * chan,struct ttm_mem_reg * reg)1037 nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
1038 		      struct nouveau_channel *chan, struct ttm_mem_reg *reg)
1039 {
1040 	if (reg->mem_type == TTM_PL_TT)
1041 		return NvDmaTT;
1042 	return chan->vram.handle;
1043 }
1044 
1045 static int
nv04_bo_move_m2mf(struct nouveau_channel * chan,struct ttm_buffer_object * bo,struct ttm_mem_reg * old_reg,struct ttm_mem_reg * new_reg)1046 nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
1047 		  struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
1048 {
1049 	u32 src_offset = old_reg->start << PAGE_SHIFT;
1050 	u32 dst_offset = new_reg->start << PAGE_SHIFT;
1051 	u32 page_count = new_reg->num_pages;
1052 	int ret;
1053 
1054 	ret = RING_SPACE(chan, 3);
1055 	if (ret)
1056 		return ret;
1057 
1058 	BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
1059 	OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, old_reg));
1060 	OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, new_reg));
1061 
1062 	page_count = new_reg->num_pages;
1063 	while (page_count) {
1064 		int line_count = (page_count > 2047) ? 2047 : page_count;
1065 
1066 		ret = RING_SPACE(chan, 11);
1067 		if (ret)
1068 			return ret;
1069 
1070 		BEGIN_NV04(chan, NvSubCopy,
1071 				 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
1072 		OUT_RING  (chan, src_offset);
1073 		OUT_RING  (chan, dst_offset);
1074 		OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
1075 		OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
1076 		OUT_RING  (chan, PAGE_SIZE); /* line_length */
1077 		OUT_RING  (chan, line_count);
1078 		OUT_RING  (chan, 0x00000101);
1079 		OUT_RING  (chan, 0x00000000);
1080 		BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
1081 		OUT_RING  (chan, 0);
1082 
1083 		page_count -= line_count;
1084 		src_offset += (PAGE_SIZE * line_count);
1085 		dst_offset += (PAGE_SIZE * line_count);
1086 	}
1087 
1088 	return 0;
1089 }
1090 
1091 static int
nouveau_bo_move_prep(struct nouveau_drm * drm,struct ttm_buffer_object * bo,struct ttm_mem_reg * reg)1092 nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
1093 		     struct ttm_mem_reg *reg)
1094 {
1095 	struct nouveau_mem *old_mem = nouveau_mem(&bo->mem);
1096 	struct nouveau_mem *new_mem = nouveau_mem(reg);
1097 	struct nvif_vmm *vmm = &drm->client.vmm.vmm;
1098 	int ret;
1099 
1100 	ret = nvif_vmm_get(vmm, LAZY, false, old_mem->mem.page, 0,
1101 			   old_mem->mem.size, &old_mem->vma[0]);
1102 	if (ret)
1103 		return ret;
1104 
1105 	ret = nvif_vmm_get(vmm, LAZY, false, new_mem->mem.page, 0,
1106 			   new_mem->mem.size, &old_mem->vma[1]);
1107 	if (ret)
1108 		goto done;
1109 
1110 	ret = nouveau_mem_map(old_mem, vmm, &old_mem->vma[0]);
1111 	if (ret)
1112 		goto done;
1113 
1114 	ret = nouveau_mem_map(new_mem, vmm, &old_mem->vma[1]);
1115 done:
1116 	if (ret) {
1117 		nvif_vmm_put(vmm, &old_mem->vma[1]);
1118 		nvif_vmm_put(vmm, &old_mem->vma[0]);
1119 	}
1120 	return 0;
1121 }
1122 
1123 static int
nouveau_bo_move_m2mf(struct ttm_buffer_object * bo,int evict,bool intr,bool no_wait_gpu,struct ttm_mem_reg * new_reg)1124 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
1125 		     bool no_wait_gpu, struct ttm_mem_reg *new_reg)
1126 {
1127 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1128 	struct nouveau_channel *chan = drm->ttm.chan;
1129 	struct nouveau_cli *cli = (void *)chan->user.client;
1130 	struct nouveau_fence *fence;
1131 	int ret;
1132 
1133 	/* create temporary vmas for the transfer and attach them to the
1134 	 * old nvkm_mem node, these will get cleaned up after ttm has
1135 	 * destroyed the ttm_mem_reg
1136 	 */
1137 	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
1138 		ret = nouveau_bo_move_prep(drm, bo, new_reg);
1139 		if (ret)
1140 			return ret;
1141 	}
1142 
1143 	mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
1144 	ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr);
1145 	if (ret == 0) {
1146 		ret = drm->ttm.move(chan, bo, &bo->mem, new_reg);
1147 		if (ret == 0) {
1148 			ret = nouveau_fence_new(chan, false, &fence);
1149 			if (ret == 0) {
1150 				ret = ttm_bo_move_accel_cleanup(bo,
1151 								&fence->base,
1152 								evict,
1153 								new_reg);
1154 				nouveau_fence_unref(&fence);
1155 			}
1156 		}
1157 	}
1158 	mutex_unlock(&cli->mutex);
1159 	return ret;
1160 }
1161 
1162 void
nouveau_bo_move_init(struct nouveau_drm * drm)1163 nouveau_bo_move_init(struct nouveau_drm *drm)
1164 {
1165 	static const struct {
1166 		const char *name;
1167 		int engine;
1168 		s32 oclass;
1169 		int (*exec)(struct nouveau_channel *,
1170 			    struct ttm_buffer_object *,
1171 			    struct ttm_mem_reg *, struct ttm_mem_reg *);
1172 		int (*init)(struct nouveau_channel *, u32 handle);
1173 	} _methods[] = {
1174 		{  "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init },
1175 		{  "GRCE", 0, 0xc5b5, nve0_bo_move_copy, nvc0_bo_move_init },
1176 		{  "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init },
1177 		{  "GRCE", 0, 0xc3b5, nve0_bo_move_copy, nvc0_bo_move_init },
1178 		{  "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
1179 		{  "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init },
1180 		{  "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init },
1181 		{  "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1182 		{  "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
1183 		{  "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1184 		{  "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
1185 		{  "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1186 		{ "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
1187 		{ "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
1188 		{  "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
1189 		{ "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
1190 		{  "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
1191 		{  "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
1192 		{  "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
1193 		{},
1194 		{ "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
1195 	}, *mthd = _methods;
1196 	const char *name = "CPU";
1197 	int ret;
1198 
1199 	do {
1200 		struct nouveau_channel *chan;
1201 
1202 		if (mthd->engine)
1203 			chan = drm->cechan;
1204 		else
1205 			chan = drm->channel;
1206 		if (chan == NULL)
1207 			continue;
1208 
1209 		ret = nvif_object_init(&chan->user,
1210 				       mthd->oclass | (mthd->engine << 16),
1211 				       mthd->oclass, NULL, 0,
1212 				       &drm->ttm.copy);
1213 		if (ret == 0) {
1214 			ret = mthd->init(chan, drm->ttm.copy.handle);
1215 			if (ret) {
1216 				nvif_object_fini(&drm->ttm.copy);
1217 				continue;
1218 			}
1219 
1220 			drm->ttm.move = mthd->exec;
1221 			drm->ttm.chan = chan;
1222 			name = mthd->name;
1223 			break;
1224 		}
1225 	} while ((++mthd)->exec);
1226 
1227 	NV_INFO(drm, "MM: using %s for buffer copies\n", name);
1228 }
1229 
1230 static int
nouveau_bo_move_flipd(struct ttm_buffer_object * bo,bool evict,bool intr,bool no_wait_gpu,struct ttm_mem_reg * new_reg)1231 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
1232 		      bool no_wait_gpu, struct ttm_mem_reg *new_reg)
1233 {
1234 	struct ttm_operation_ctx ctx = { intr, no_wait_gpu };
1235 	struct ttm_place placement_memtype = {
1236 		.fpfn = 0,
1237 		.lpfn = 0,
1238 		.flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1239 	};
1240 	struct ttm_placement placement;
1241 	struct ttm_mem_reg tmp_reg;
1242 	int ret;
1243 
1244 	placement.num_placement = placement.num_busy_placement = 1;
1245 	placement.placement = placement.busy_placement = &placement_memtype;
1246 
1247 	tmp_reg = *new_reg;
1248 	tmp_reg.mm_node = NULL;
1249 	ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, &ctx);
1250 	if (ret)
1251 		return ret;
1252 
1253 	ret = ttm_tt_bind(bo->ttm, &tmp_reg, &ctx);
1254 	if (ret)
1255 		goto out;
1256 
1257 	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_reg);
1258 	if (ret)
1259 		goto out;
1260 
1261 	ret = ttm_bo_move_ttm(bo, &ctx, new_reg);
1262 out:
1263 	ttm_bo_mem_put(bo, &tmp_reg);
1264 	return ret;
1265 }
1266 
1267 static int
nouveau_bo_move_flips(struct ttm_buffer_object * bo,bool evict,bool intr,bool no_wait_gpu,struct ttm_mem_reg * new_reg)1268 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
1269 		      bool no_wait_gpu, struct ttm_mem_reg *new_reg)
1270 {
1271 	struct ttm_operation_ctx ctx = { intr, no_wait_gpu };
1272 	struct ttm_place placement_memtype = {
1273 		.fpfn = 0,
1274 		.lpfn = 0,
1275 		.flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1276 	};
1277 	struct ttm_placement placement;
1278 	struct ttm_mem_reg tmp_reg;
1279 	int ret;
1280 
1281 	placement.num_placement = placement.num_busy_placement = 1;
1282 	placement.placement = placement.busy_placement = &placement_memtype;
1283 
1284 	tmp_reg = *new_reg;
1285 	tmp_reg.mm_node = NULL;
1286 	ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, &ctx);
1287 	if (ret)
1288 		return ret;
1289 
1290 	ret = ttm_bo_move_ttm(bo, &ctx, &tmp_reg);
1291 	if (ret)
1292 		goto out;
1293 
1294 	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_reg);
1295 	if (ret)
1296 		goto out;
1297 
1298 out:
1299 	ttm_bo_mem_put(bo, &tmp_reg);
1300 	return ret;
1301 }
1302 
1303 static void
nouveau_bo_move_ntfy(struct ttm_buffer_object * bo,bool evict,struct ttm_mem_reg * new_reg)1304 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
1305 		     struct ttm_mem_reg *new_reg)
1306 {
1307 	struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL;
1308 	struct nouveau_bo *nvbo = nouveau_bo(bo);
1309 	struct nouveau_vma *vma;
1310 
1311 	/* ttm can now (stupidly) pass the driver bos it didn't create... */
1312 	if (bo->destroy != nouveau_bo_del_ttm)
1313 		return;
1314 
1315 	if (mem && new_reg->mem_type != TTM_PL_SYSTEM &&
1316 	    mem->mem.page == nvbo->page) {
1317 		list_for_each_entry(vma, &nvbo->vma_list, head) {
1318 			nouveau_vma_map(vma, mem);
1319 		}
1320 	} else {
1321 		list_for_each_entry(vma, &nvbo->vma_list, head) {
1322 			WARN_ON(ttm_bo_wait(bo, false, false));
1323 			nouveau_vma_unmap(vma);
1324 		}
1325 	}
1326 }
1327 
1328 static int
nouveau_bo_vm_bind(struct ttm_buffer_object * bo,struct ttm_mem_reg * new_reg,struct nouveau_drm_tile ** new_tile)1329 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_reg,
1330 		   struct nouveau_drm_tile **new_tile)
1331 {
1332 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1333 	struct drm_device *dev = drm->dev;
1334 	struct nouveau_bo *nvbo = nouveau_bo(bo);
1335 	u64 offset = new_reg->start << PAGE_SHIFT;
1336 
1337 	*new_tile = NULL;
1338 	if (new_reg->mem_type != TTM_PL_VRAM)
1339 		return 0;
1340 
1341 	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
1342 		*new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size,
1343 					       nvbo->mode, nvbo->zeta);
1344 	}
1345 
1346 	return 0;
1347 }
1348 
1349 static void
nouveau_bo_vm_cleanup(struct ttm_buffer_object * bo,struct nouveau_drm_tile * new_tile,struct nouveau_drm_tile ** old_tile)1350 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
1351 		      struct nouveau_drm_tile *new_tile,
1352 		      struct nouveau_drm_tile **old_tile)
1353 {
1354 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1355 	struct drm_device *dev = drm->dev;
1356 	struct dma_fence *fence = dma_resv_get_excl(bo->base.resv);
1357 
1358 	nv10_bo_put_tile_region(dev, *old_tile, fence);
1359 	*old_tile = new_tile;
1360 }
1361 
1362 static int
nouveau_bo_move(struct ttm_buffer_object * bo,bool evict,struct ttm_operation_ctx * ctx,struct ttm_mem_reg * new_reg)1363 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
1364 		struct ttm_operation_ctx *ctx,
1365 		struct ttm_mem_reg *new_reg)
1366 {
1367 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1368 	struct nouveau_bo *nvbo = nouveau_bo(bo);
1369 	struct ttm_mem_reg *old_reg = &bo->mem;
1370 	struct nouveau_drm_tile *new_tile = NULL;
1371 	int ret = 0;
1372 
1373 	ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
1374 	if (ret)
1375 		return ret;
1376 
1377 	if (nvbo->pin_refcnt)
1378 		NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
1379 
1380 	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1381 		ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile);
1382 		if (ret)
1383 			return ret;
1384 	}
1385 
1386 	/* Fake bo copy. */
1387 	if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1388 		BUG_ON(bo->mem.mm_node != NULL);
1389 		bo->mem = *new_reg;
1390 		new_reg->mm_node = NULL;
1391 		goto out;
1392 	}
1393 
1394 	/* Hardware assisted copy. */
1395 	if (drm->ttm.move) {
1396 		if (new_reg->mem_type == TTM_PL_SYSTEM)
1397 			ret = nouveau_bo_move_flipd(bo, evict,
1398 						    ctx->interruptible,
1399 						    ctx->no_wait_gpu, new_reg);
1400 		else if (old_reg->mem_type == TTM_PL_SYSTEM)
1401 			ret = nouveau_bo_move_flips(bo, evict,
1402 						    ctx->interruptible,
1403 						    ctx->no_wait_gpu, new_reg);
1404 		else
1405 			ret = nouveau_bo_move_m2mf(bo, evict,
1406 						   ctx->interruptible,
1407 						   ctx->no_wait_gpu, new_reg);
1408 		if (!ret)
1409 			goto out;
1410 	}
1411 
1412 	/* Fallback to software copy. */
1413 	ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
1414 	if (ret == 0)
1415 		ret = ttm_bo_move_memcpy(bo, ctx, new_reg);
1416 
1417 out:
1418 	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1419 		if (ret)
1420 			nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1421 		else
1422 			nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1423 	}
1424 
1425 	return ret;
1426 }
1427 
1428 static int
nouveau_bo_verify_access(struct ttm_buffer_object * bo,struct file * filp)1429 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1430 {
1431 	struct nouveau_bo *nvbo = nouveau_bo(bo);
1432 
1433 	return drm_vma_node_verify_access(&nvbo->bo.base.vma_node,
1434 					  filp->private_data);
1435 }
1436 
1437 static int
nouveau_ttm_io_mem_reserve(struct ttm_bo_device * bdev,struct ttm_mem_reg * reg)1438 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
1439 {
1440 	struct ttm_mem_type_manager *man = &bdev->man[reg->mem_type];
1441 	struct nouveau_drm *drm = nouveau_bdev(bdev);
1442 	struct nvkm_device *device = nvxx_device(&drm->client.device);
1443 	struct nouveau_mem *mem = nouveau_mem(reg);
1444 
1445 	reg->bus.addr = NULL;
1446 	reg->bus.offset = 0;
1447 	reg->bus.size = reg->num_pages << PAGE_SHIFT;
1448 	reg->bus.base = 0;
1449 	reg->bus.is_iomem = false;
1450 	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1451 		return -EINVAL;
1452 	switch (reg->mem_type) {
1453 	case TTM_PL_SYSTEM:
1454 		/* System memory */
1455 		return 0;
1456 	case TTM_PL_TT:
1457 #if IS_ENABLED(CONFIG_AGP)
1458 		if (drm->agp.bridge) {
1459 			reg->bus.offset = reg->start << PAGE_SHIFT;
1460 			reg->bus.base = drm->agp.base;
1461 			reg->bus.is_iomem = !drm->agp.cma;
1462 		}
1463 #endif
1464 		if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || !mem->kind)
1465 			/* untiled */
1466 			break;
1467 		/* fall through - tiled memory */
1468 	case TTM_PL_VRAM:
1469 		reg->bus.offset = reg->start << PAGE_SHIFT;
1470 		reg->bus.base = device->func->resource_addr(device, 1);
1471 		reg->bus.is_iomem = true;
1472 		if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
1473 			union {
1474 				struct nv50_mem_map_v0 nv50;
1475 				struct gf100_mem_map_v0 gf100;
1476 			} args;
1477 			u64 handle, length;
1478 			u32 argc = 0;
1479 			int ret;
1480 
1481 			switch (mem->mem.object.oclass) {
1482 			case NVIF_CLASS_MEM_NV50:
1483 				args.nv50.version = 0;
1484 				args.nv50.ro = 0;
1485 				args.nv50.kind = mem->kind;
1486 				args.nv50.comp = mem->comp;
1487 				argc = sizeof(args.nv50);
1488 				break;
1489 			case NVIF_CLASS_MEM_GF100:
1490 				args.gf100.version = 0;
1491 				args.gf100.ro = 0;
1492 				args.gf100.kind = mem->kind;
1493 				argc = sizeof(args.gf100);
1494 				break;
1495 			default:
1496 				WARN_ON(1);
1497 				break;
1498 			}
1499 
1500 			ret = nvif_object_map_handle(&mem->mem.object,
1501 						     &args, argc,
1502 						     &handle, &length);
1503 			if (ret != 1)
1504 				return ret ? ret : -EINVAL;
1505 
1506 			reg->bus.base = 0;
1507 			reg->bus.offset = handle;
1508 		}
1509 		break;
1510 	default:
1511 		return -EINVAL;
1512 	}
1513 	return 0;
1514 }
1515 
1516 static void
nouveau_ttm_io_mem_free(struct ttm_bo_device * bdev,struct ttm_mem_reg * reg)1517 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
1518 {
1519 	struct nouveau_drm *drm = nouveau_bdev(bdev);
1520 	struct nouveau_mem *mem = nouveau_mem(reg);
1521 
1522 	if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
1523 		switch (reg->mem_type) {
1524 		case TTM_PL_TT:
1525 			if (mem->kind)
1526 				nvif_object_unmap_handle(&mem->mem.object);
1527 			break;
1528 		case TTM_PL_VRAM:
1529 			nvif_object_unmap_handle(&mem->mem.object);
1530 			break;
1531 		default:
1532 			break;
1533 		}
1534 	}
1535 }
1536 
1537 static int
nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object * bo)1538 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1539 {
1540 	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1541 	struct nouveau_bo *nvbo = nouveau_bo(bo);
1542 	struct nvkm_device *device = nvxx_device(&drm->client.device);
1543 	u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
1544 	int i, ret;
1545 
1546 	/* as long as the bo isn't in vram, and isn't tiled, we've got
1547 	 * nothing to do here.
1548 	 */
1549 	if (bo->mem.mem_type != TTM_PL_VRAM) {
1550 		if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
1551 		    !nvbo->kind)
1552 			return 0;
1553 
1554 		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1555 			nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
1556 
1557 			ret = nouveau_bo_validate(nvbo, false, false);
1558 			if (ret)
1559 				return ret;
1560 		}
1561 		return 0;
1562 	}
1563 
1564 	/* make sure bo is in mappable vram */
1565 	if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
1566 	    bo->mem.start + bo->mem.num_pages < mappable)
1567 		return 0;
1568 
1569 	for (i = 0; i < nvbo->placement.num_placement; ++i) {
1570 		nvbo->placements[i].fpfn = 0;
1571 		nvbo->placements[i].lpfn = mappable;
1572 	}
1573 
1574 	for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
1575 		nvbo->busy_placements[i].fpfn = 0;
1576 		nvbo->busy_placements[i].lpfn = mappable;
1577 	}
1578 
1579 	nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
1580 	return nouveau_bo_validate(nvbo, false, false);
1581 }
1582 
1583 static int
nouveau_ttm_tt_populate(struct ttm_tt * ttm,struct ttm_operation_ctx * ctx)1584 nouveau_ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
1585 {
1586 	struct ttm_dma_tt *ttm_dma = (void *)ttm;
1587 	struct nouveau_drm *drm;
1588 	struct device *dev;
1589 	unsigned i;
1590 	int r;
1591 	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1592 
1593 	if (ttm->state != tt_unpopulated)
1594 		return 0;
1595 
1596 	if (slave && ttm->sg) {
1597 		/* make userspace faulting work */
1598 		drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1599 						 ttm_dma->dma_address, ttm->num_pages);
1600 		ttm->state = tt_unbound;
1601 		return 0;
1602 	}
1603 
1604 	drm = nouveau_bdev(ttm->bdev);
1605 	dev = drm->dev->dev;
1606 
1607 #if IS_ENABLED(CONFIG_AGP)
1608 	if (drm->agp.bridge) {
1609 		return ttm_agp_tt_populate(ttm, ctx);
1610 	}
1611 #endif
1612 
1613 #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
1614 	if (swiotlb_nr_tbl()) {
1615 		return ttm_dma_populate((void *)ttm, dev, ctx);
1616 	}
1617 #endif
1618 
1619 	r = ttm_pool_populate(ttm, ctx);
1620 	if (r) {
1621 		return r;
1622 	}
1623 
1624 	for (i = 0; i < ttm->num_pages; i++) {
1625 		dma_addr_t addr;
1626 
1627 		addr = dma_map_page(dev, ttm->pages[i], 0, PAGE_SIZE,
1628 				    DMA_BIDIRECTIONAL);
1629 
1630 		if (dma_mapping_error(dev, addr)) {
1631 			while (i--) {
1632 				dma_unmap_page(dev, ttm_dma->dma_address[i],
1633 					       PAGE_SIZE, DMA_BIDIRECTIONAL);
1634 				ttm_dma->dma_address[i] = 0;
1635 			}
1636 			ttm_pool_unpopulate(ttm);
1637 			return -EFAULT;
1638 		}
1639 
1640 		ttm_dma->dma_address[i] = addr;
1641 	}
1642 	return 0;
1643 }
1644 
1645 static void
nouveau_ttm_tt_unpopulate(struct ttm_tt * ttm)1646 nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1647 {
1648 	struct ttm_dma_tt *ttm_dma = (void *)ttm;
1649 	struct nouveau_drm *drm;
1650 	struct device *dev;
1651 	unsigned i;
1652 	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1653 
1654 	if (slave)
1655 		return;
1656 
1657 	drm = nouveau_bdev(ttm->bdev);
1658 	dev = drm->dev->dev;
1659 
1660 #if IS_ENABLED(CONFIG_AGP)
1661 	if (drm->agp.bridge) {
1662 		ttm_agp_tt_unpopulate(ttm);
1663 		return;
1664 	}
1665 #endif
1666 
1667 #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
1668 	if (swiotlb_nr_tbl()) {
1669 		ttm_dma_unpopulate((void *)ttm, dev);
1670 		return;
1671 	}
1672 #endif
1673 
1674 	for (i = 0; i < ttm->num_pages; i++) {
1675 		if (ttm_dma->dma_address[i]) {
1676 			dma_unmap_page(dev, ttm_dma->dma_address[i], PAGE_SIZE,
1677 				       DMA_BIDIRECTIONAL);
1678 		}
1679 	}
1680 
1681 	ttm_pool_unpopulate(ttm);
1682 }
1683 
1684 void
nouveau_bo_fence(struct nouveau_bo * nvbo,struct nouveau_fence * fence,bool exclusive)1685 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
1686 {
1687 	struct dma_resv *resv = nvbo->bo.base.resv;
1688 
1689 	if (exclusive)
1690 		dma_resv_add_excl_fence(resv, &fence->base);
1691 	else if (fence)
1692 		dma_resv_add_shared_fence(resv, &fence->base);
1693 }
1694 
1695 struct ttm_bo_driver nouveau_bo_driver = {
1696 	.ttm_tt_create = &nouveau_ttm_tt_create,
1697 	.ttm_tt_populate = &nouveau_ttm_tt_populate,
1698 	.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
1699 	.invalidate_caches = nouveau_bo_invalidate_caches,
1700 	.init_mem_type = nouveau_bo_init_mem_type,
1701 	.eviction_valuable = ttm_bo_eviction_valuable,
1702 	.evict_flags = nouveau_bo_evict_flags,
1703 	.move_notify = nouveau_bo_move_ntfy,
1704 	.move = nouveau_bo_move,
1705 	.verify_access = nouveau_bo_verify_access,
1706 	.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1707 	.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1708 	.io_mem_free = &nouveau_ttm_io_mem_free,
1709 };
1710