• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include <linux/pagemap.h>
2 #include <linux/slab.h>
3 
4 #include "nouveau_drv.h"
5 #include "nouveau_ttm.h"
6 
7 struct nouveau_sgdma_be {
8 	/* this has to be the first field so populate/unpopulated in
9 	 * nouve_bo.c works properly, otherwise have to move them here
10 	 */
11 	struct ttm_dma_tt ttm;
12 	struct nvkm_mem *node;
13 };
14 
15 static void
nouveau_sgdma_destroy(struct ttm_tt * ttm)16 nouveau_sgdma_destroy(struct ttm_tt *ttm)
17 {
18 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
19 
20 	if (ttm) {
21 		ttm_dma_tt_fini(&nvbe->ttm);
22 		kfree(nvbe);
23 	}
24 }
25 
26 static int
nv04_sgdma_bind(struct ttm_tt * ttm,struct ttm_mem_reg * mem)27 nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
28 {
29 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
30 	struct nvkm_mem *node = mem->mm_node;
31 
32 	if (ttm->sg) {
33 		node->sg    = ttm->sg;
34 		node->pages = NULL;
35 	} else {
36 		node->sg    = NULL;
37 		node->pages = nvbe->ttm.dma_address;
38 	}
39 	node->size = (mem->num_pages << PAGE_SHIFT) >> 12;
40 
41 	nvkm_vm_map(&node->vma[0], node);
42 	nvbe->node = node;
43 	return 0;
44 }
45 
46 static int
nv04_sgdma_unbind(struct ttm_tt * ttm)47 nv04_sgdma_unbind(struct ttm_tt *ttm)
48 {
49 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
50 	nvkm_vm_unmap(&nvbe->node->vma[0]);
51 	return 0;
52 }
53 
54 static struct ttm_backend_func nv04_sgdma_backend = {
55 	.bind			= nv04_sgdma_bind,
56 	.unbind			= nv04_sgdma_unbind,
57 	.destroy		= nouveau_sgdma_destroy
58 };
59 
60 static int
nv50_sgdma_bind(struct ttm_tt * ttm,struct ttm_mem_reg * mem)61 nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
62 {
63 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
64 	struct nvkm_mem *node = mem->mm_node;
65 
66 	/* noop: bound in move_notify() */
67 	if (ttm->sg) {
68 		node->sg    = ttm->sg;
69 		node->pages = NULL;
70 	} else {
71 		node->sg    = NULL;
72 		node->pages = nvbe->ttm.dma_address;
73 	}
74 	node->size = (mem->num_pages << PAGE_SHIFT) >> 12;
75 	return 0;
76 }
77 
78 static int
nv50_sgdma_unbind(struct ttm_tt * ttm)79 nv50_sgdma_unbind(struct ttm_tt *ttm)
80 {
81 	/* noop: unbound in move_notify() */
82 	return 0;
83 }
84 
85 static struct ttm_backend_func nv50_sgdma_backend = {
86 	.bind			= nv50_sgdma_bind,
87 	.unbind			= nv50_sgdma_unbind,
88 	.destroy		= nouveau_sgdma_destroy
89 };
90 
91 struct ttm_tt *
nouveau_sgdma_create_ttm(struct ttm_bo_device * bdev,unsigned long size,uint32_t page_flags,struct page * dummy_read_page)92 nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
93 			 unsigned long size, uint32_t page_flags,
94 			 struct page *dummy_read_page)
95 {
96 	struct nouveau_drm *drm = nouveau_bdev(bdev);
97 	struct nouveau_sgdma_be *nvbe;
98 
99 	nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
100 	if (!nvbe)
101 		return NULL;
102 
103 	if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA)
104 		nvbe->ttm.ttm.func = &nv04_sgdma_backend;
105 	else
106 		nvbe->ttm.ttm.func = &nv50_sgdma_backend;
107 
108 	if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page))
109 		/*
110 		 * A failing ttm_dma_tt_init() will call ttm_tt_destroy()
111 		 * and thus our nouveau_sgdma_destroy() hook, so we don't need
112 		 * to free nvbe here.
113 		 */
114 		return NULL;
115 	return &nvbe->ttm.ttm;
116 }
117