1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24 #include "nv04.h"
25
26 #include <core/gpuobj.h>
27
28 #define NV04_PDMA_SIZE (128 * 1024 * 1024)
29 #define NV04_PDMA_PAGE ( 4 * 1024)
30
31 /*******************************************************************************
32 * VM map/unmap callbacks
33 ******************************************************************************/
34
35 static void
nv04_vm_map_sg(struct nvkm_vma * vma,struct nvkm_memory * pgt,struct nvkm_mem * mem,u32 pte,u32 cnt,dma_addr_t * list)36 nv04_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
37 struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
38 {
39 pte = 0x00008 + (pte * 4);
40 nvkm_kmap(pgt);
41 while (cnt) {
42 u32 page = PAGE_SIZE / NV04_PDMA_PAGE;
43 u32 phys = (u32)*list++;
44 while (cnt && page--) {
45 nvkm_wo32(pgt, pte, phys | 3);
46 phys += NV04_PDMA_PAGE;
47 pte += 4;
48 cnt -= 1;
49 }
50 }
51 nvkm_done(pgt);
52 }
53
54 static void
nv04_vm_unmap(struct nvkm_vma * vma,struct nvkm_memory * pgt,u32 pte,u32 cnt)55 nv04_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
56 {
57 pte = 0x00008 + (pte * 4);
58 nvkm_kmap(pgt);
59 while (cnt--) {
60 nvkm_wo32(pgt, pte, 0x00000000);
61 pte += 4;
62 }
63 nvkm_done(pgt);
64 }
65
66 static void
nv04_vm_flush(struct nvkm_vm * vm)67 nv04_vm_flush(struct nvkm_vm *vm)
68 {
69 }
70
71 /*******************************************************************************
72 * MMU subdev
73 ******************************************************************************/
74
75 static int
nv04_mmu_oneinit(struct nvkm_mmu * base)76 nv04_mmu_oneinit(struct nvkm_mmu *base)
77 {
78 struct nv04_mmu *mmu = nv04_mmu(base);
79 struct nvkm_device *device = mmu->base.subdev.device;
80 struct nvkm_memory *dma;
81 int ret;
82
83 ret = nvkm_vm_create(&mmu->base, 0, NV04_PDMA_SIZE, 0, 4096, NULL,
84 &mmu->vm);
85 if (ret)
86 return ret;
87
88 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
89 (NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 + 8,
90 16, true, &dma);
91 mmu->vm->pgt[0].mem[0] = dma;
92 mmu->vm->pgt[0].refcount[0] = 1;
93 if (ret)
94 return ret;
95
96 nvkm_kmap(dma);
97 nvkm_wo32(dma, 0x00000, 0x0002103d); /* PCI, RW, PT, !LN */
98 nvkm_wo32(dma, 0x00004, NV04_PDMA_SIZE - 1);
99 nvkm_done(dma);
100 return 0;
101 }
102
103 void *
nv04_mmu_dtor(struct nvkm_mmu * base)104 nv04_mmu_dtor(struct nvkm_mmu *base)
105 {
106 struct nv04_mmu *mmu = nv04_mmu(base);
107 struct nvkm_device *device = mmu->base.subdev.device;
108 if (mmu->vm) {
109 nvkm_memory_del(&mmu->vm->pgt[0].mem[0]);
110 nvkm_vm_ref(NULL, &mmu->vm, NULL);
111 }
112 if (mmu->nullp) {
113 dma_free_coherent(device->dev, 16 * 1024,
114 mmu->nullp, mmu->null);
115 }
116 return mmu;
117 }
118
119 int
nv04_mmu_new_(const struct nvkm_mmu_func * func,struct nvkm_device * device,int index,struct nvkm_mmu ** pmmu)120 nv04_mmu_new_(const struct nvkm_mmu_func *func, struct nvkm_device *device,
121 int index, struct nvkm_mmu **pmmu)
122 {
123 struct nv04_mmu *mmu;
124 if (!(mmu = kzalloc(sizeof(*mmu), GFP_KERNEL)))
125 return -ENOMEM;
126 *pmmu = &mmu->base;
127 nvkm_mmu_ctor(func, device, index, &mmu->base);
128 return 0;
129 }
130
131 const struct nvkm_mmu_func
132 nv04_mmu = {
133 .oneinit = nv04_mmu_oneinit,
134 .dtor = nv04_mmu_dtor,
135 .limit = NV04_PDMA_SIZE,
136 .dma_bits = 32,
137 .pgt_bits = 32 - 12,
138 .spg_shift = 12,
139 .lpg_shift = 12,
140 .map_sg = nv04_vm_map_sg,
141 .unmap = nv04_vm_unmap,
142 .flush = nv04_vm_flush,
143 };
144
145 int
nv04_mmu_new(struct nvkm_device * device,int index,struct nvkm_mmu ** pmmu)146 nv04_mmu_new(struct nvkm_device *device, int index, struct nvkm_mmu **pmmu)
147 {
148 return nv04_mmu_new_(&nv04_mmu, device, index, pmmu);
149 }
150