• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2010 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 
25 #include <core/gpuobj.h>
26 #include <core/mm.h>
27 
28 #include <subdev/fb.h>
29 #include <subdev/vm.h>
30 
31 void
nouveau_vm_map_at(struct nouveau_vma * vma,u64 delta,struct nouveau_mem * node)32 nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
33 {
34 	struct nouveau_vm *vm = vma->vm;
35 	struct nouveau_vmmgr *vmm = vm->vmm;
36 	struct nouveau_mm_node *r;
37 	int big = vma->node->type != vmm->spg_shift;
38 	u32 offset = vma->node->offset + (delta >> 12);
39 	u32 bits = vma->node->type - 12;
40 	u32 pde  = (offset >> vmm->pgt_bits) - vm->fpde;
41 	u32 pte  = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
42 	u32 max  = 1 << (vmm->pgt_bits - bits);
43 	u32 end, len;
44 
45 	delta = 0;
46 	list_for_each_entry(r, &node->regions, rl_entry) {
47 		u64 phys = (u64)r->offset << 12;
48 		u32 num  = r->length >> bits;
49 
50 		while (num) {
51 			struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
52 
53 			end = (pte + num);
54 			if (unlikely(end >= max))
55 				end = max;
56 			len = end - pte;
57 
58 			vmm->map(vma, pgt, node, pte, len, phys, delta);
59 
60 			num -= len;
61 			pte += len;
62 			if (unlikely(end >= max)) {
63 				phys += len << (bits + 12);
64 				pde++;
65 				pte = 0;
66 			}
67 
68 			delta += (u64)len << vma->node->type;
69 		}
70 	}
71 
72 	vmm->flush(vm);
73 }
74 
75 void
nouveau_vm_map(struct nouveau_vma * vma,struct nouveau_mem * node)76 nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
77 {
78 	nouveau_vm_map_at(vma, 0, node);
79 }
80 
81 void
nouveau_vm_map_sg_table(struct nouveau_vma * vma,u64 delta,u64 length,struct nouveau_mem * mem)82 nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
83 			struct nouveau_mem *mem)
84 {
85 	struct nouveau_vm *vm = vma->vm;
86 	struct nouveau_vmmgr *vmm = vm->vmm;
87 	int big = vma->node->type != vmm->spg_shift;
88 	u32 offset = vma->node->offset + (delta >> 12);
89 	u32 bits = vma->node->type - 12;
90 	u32 num  = length >> vma->node->type;
91 	u32 pde  = (offset >> vmm->pgt_bits) - vm->fpde;
92 	u32 pte  = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
93 	u32 max  = 1 << (vmm->pgt_bits - bits);
94 	unsigned m, sglen;
95 	u32 end, len;
96 	int i;
97 	struct scatterlist *sg;
98 
99 	for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
100 		struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
101 		sglen = sg_dma_len(sg) >> PAGE_SHIFT;
102 
103 		end = pte + sglen;
104 		if (unlikely(end >= max))
105 			end = max;
106 		len = end - pte;
107 
108 		for (m = 0; m < len; m++) {
109 			dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
110 
111 			vmm->map_sg(vma, pgt, mem, pte, 1, &addr);
112 			num--;
113 			pte++;
114 
115 			if (num == 0)
116 				goto finish;
117 		}
118 		if (unlikely(end >= max)) {
119 			pde++;
120 			pte = 0;
121 		}
122 		if (m < sglen) {
123 			for (; m < sglen; m++) {
124 				dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
125 
126 				vmm->map_sg(vma, pgt, mem, pte, 1, &addr);
127 				num--;
128 				pte++;
129 				if (num == 0)
130 					goto finish;
131 			}
132 		}
133 
134 	}
135 finish:
136 	vmm->flush(vm);
137 }
138 
139 void
nouveau_vm_map_sg(struct nouveau_vma * vma,u64 delta,u64 length,struct nouveau_mem * mem)140 nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
141 		  struct nouveau_mem *mem)
142 {
143 	struct nouveau_vm *vm = vma->vm;
144 	struct nouveau_vmmgr *vmm = vm->vmm;
145 	dma_addr_t *list = mem->pages;
146 	int big = vma->node->type != vmm->spg_shift;
147 	u32 offset = vma->node->offset + (delta >> 12);
148 	u32 bits = vma->node->type - 12;
149 	u32 num  = length >> vma->node->type;
150 	u32 pde  = (offset >> vmm->pgt_bits) - vm->fpde;
151 	u32 pte  = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
152 	u32 max  = 1 << (vmm->pgt_bits - bits);
153 	u32 end, len;
154 
155 	while (num) {
156 		struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
157 
158 		end = (pte + num);
159 		if (unlikely(end >= max))
160 			end = max;
161 		len = end - pte;
162 
163 		vmm->map_sg(vma, pgt, mem, pte, len, list);
164 
165 		num  -= len;
166 		pte  += len;
167 		list += len;
168 		if (unlikely(end >= max)) {
169 			pde++;
170 			pte = 0;
171 		}
172 	}
173 
174 	vmm->flush(vm);
175 }
176 
177 void
nouveau_vm_unmap_at(struct nouveau_vma * vma,u64 delta,u64 length)178 nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
179 {
180 	struct nouveau_vm *vm = vma->vm;
181 	struct nouveau_vmmgr *vmm = vm->vmm;
182 	int big = vma->node->type != vmm->spg_shift;
183 	u32 offset = vma->node->offset + (delta >> 12);
184 	u32 bits = vma->node->type - 12;
185 	u32 num  = length >> vma->node->type;
186 	u32 pde  = (offset >> vmm->pgt_bits) - vm->fpde;
187 	u32 pte  = (offset & ((1 << vmm->pgt_bits) - 1)) >> bits;
188 	u32 max  = 1 << (vmm->pgt_bits - bits);
189 	u32 end, len;
190 
191 	while (num) {
192 		struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
193 
194 		end = (pte + num);
195 		if (unlikely(end >= max))
196 			end = max;
197 		len = end - pte;
198 
199 		vmm->unmap(pgt, pte, len);
200 
201 		num -= len;
202 		pte += len;
203 		if (unlikely(end >= max)) {
204 			pde++;
205 			pte = 0;
206 		}
207 	}
208 
209 	vmm->flush(vm);
210 }
211 
212 void
nouveau_vm_unmap(struct nouveau_vma * vma)213 nouveau_vm_unmap(struct nouveau_vma *vma)
214 {
215 	nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
216 }
217 
218 static void
nouveau_vm_unmap_pgt(struct nouveau_vm * vm,int big,u32 fpde,u32 lpde)219 nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
220 {
221 	struct nouveau_vmmgr *vmm = vm->vmm;
222 	struct nouveau_vm_pgd *vpgd;
223 	struct nouveau_vm_pgt *vpgt;
224 	struct nouveau_gpuobj *pgt;
225 	u32 pde;
226 
227 	for (pde = fpde; pde <= lpde; pde++) {
228 		vpgt = &vm->pgt[pde - vm->fpde];
229 		if (--vpgt->refcount[big])
230 			continue;
231 
232 		pgt = vpgt->obj[big];
233 		vpgt->obj[big] = NULL;
234 
235 		list_for_each_entry(vpgd, &vm->pgd_list, head) {
236 			vmm->map_pgt(vpgd->obj, pde, vpgt->obj);
237 		}
238 
239 		mutex_unlock(&vm->mm.mutex);
240 		nouveau_gpuobj_ref(NULL, &pgt);
241 		mutex_lock(&vm->mm.mutex);
242 	}
243 }
244 
245 static int
nouveau_vm_map_pgt(struct nouveau_vm * vm,u32 pde,u32 type)246 nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
247 {
248 	struct nouveau_vmmgr *vmm = vm->vmm;
249 	struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
250 	struct nouveau_vm_pgd *vpgd;
251 	struct nouveau_gpuobj *pgt;
252 	int big = (type != vmm->spg_shift);
253 	u32 pgt_size;
254 	int ret;
255 
256 	pgt_size  = (1 << (vmm->pgt_bits + 12)) >> type;
257 	pgt_size *= 8;
258 
259 	mutex_unlock(&vm->mm.mutex);
260 	ret = nouveau_gpuobj_new(nv_object(vm->vmm), NULL, pgt_size, 0x1000,
261 				 NVOBJ_FLAG_ZERO_ALLOC, &pgt);
262 	mutex_lock(&vm->mm.mutex);
263 	if (unlikely(ret))
264 		return ret;
265 
266 	/* someone beat us to filling the PDE while we didn't have the lock */
267 	if (unlikely(vpgt->refcount[big]++)) {
268 		mutex_unlock(&vm->mm.mutex);
269 		nouveau_gpuobj_ref(NULL, &pgt);
270 		mutex_lock(&vm->mm.mutex);
271 		return 0;
272 	}
273 
274 	vpgt->obj[big] = pgt;
275 	list_for_each_entry(vpgd, &vm->pgd_list, head) {
276 		vmm->map_pgt(vpgd->obj, pde, vpgt->obj);
277 	}
278 
279 	return 0;
280 }
281 
282 int
nouveau_vm_get(struct nouveau_vm * vm,u64 size,u32 page_shift,u32 access,struct nouveau_vma * vma)283 nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
284 	       u32 access, struct nouveau_vma *vma)
285 {
286 	struct nouveau_vmmgr *vmm = vm->vmm;
287 	u32 align = (1 << page_shift) >> 12;
288 	u32 msize = size >> 12;
289 	u32 fpde, lpde, pde;
290 	int ret;
291 
292 	mutex_lock(&vm->mm.mutex);
293 	ret = nouveau_mm_head(&vm->mm, page_shift, msize, msize, align,
294 			     &vma->node);
295 	if (unlikely(ret != 0)) {
296 		mutex_unlock(&vm->mm.mutex);
297 		return ret;
298 	}
299 
300 	fpde = (vma->node->offset >> vmm->pgt_bits);
301 	lpde = (vma->node->offset + vma->node->length - 1) >> vmm->pgt_bits;
302 
303 	for (pde = fpde; pde <= lpde; pde++) {
304 		struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
305 		int big = (vma->node->type != vmm->spg_shift);
306 
307 		if (likely(vpgt->refcount[big])) {
308 			vpgt->refcount[big]++;
309 			continue;
310 		}
311 
312 		ret = nouveau_vm_map_pgt(vm, pde, vma->node->type);
313 		if (ret) {
314 			if (pde != fpde)
315 				nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
316 			nouveau_mm_free(&vm->mm, &vma->node);
317 			mutex_unlock(&vm->mm.mutex);
318 			return ret;
319 		}
320 	}
321 	mutex_unlock(&vm->mm.mutex);
322 
323 	vma->vm     = vm;
324 	vma->offset = (u64)vma->node->offset << 12;
325 	vma->access = access;
326 	return 0;
327 }
328 
329 void
nouveau_vm_put(struct nouveau_vma * vma)330 nouveau_vm_put(struct nouveau_vma *vma)
331 {
332 	struct nouveau_vm *vm = vma->vm;
333 	struct nouveau_vmmgr *vmm = vm->vmm;
334 	u32 fpde, lpde;
335 
336 	if (unlikely(vma->node == NULL))
337 		return;
338 	fpde = (vma->node->offset >> vmm->pgt_bits);
339 	lpde = (vma->node->offset + vma->node->length - 1) >> vmm->pgt_bits;
340 
341 	mutex_lock(&vm->mm.mutex);
342 	nouveau_vm_unmap_pgt(vm, vma->node->type != vmm->spg_shift, fpde, lpde);
343 	nouveau_mm_free(&vm->mm, &vma->node);
344 	mutex_unlock(&vm->mm.mutex);
345 }
346 
347 int
nouveau_vm_create(struct nouveau_vmmgr * vmm,u64 offset,u64 length,u64 mm_offset,u32 block,struct nouveau_vm ** pvm)348 nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
349 		  u64 mm_offset, u32 block, struct nouveau_vm **pvm)
350 {
351 	struct nouveau_vm *vm;
352 	u64 mm_length = (offset + length) - mm_offset;
353 	int ret;
354 
355 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
356 	if (!vm)
357 		return -ENOMEM;
358 
359 	INIT_LIST_HEAD(&vm->pgd_list);
360 	vm->vmm = vmm;
361 	vm->refcount = 1;
362 	vm->fpde = offset >> (vmm->pgt_bits + 12);
363 	vm->lpde = (offset + length - 1) >> (vmm->pgt_bits + 12);
364 
365 	vm->pgt  = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
366 	if (!vm->pgt) {
367 		kfree(vm);
368 		return -ENOMEM;
369 	}
370 
371 	ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
372 			      block >> 12);
373 	if (ret) {
374 		kfree(vm->pgt);
375 		kfree(vm);
376 		return ret;
377 	}
378 
379 	*pvm = vm;
380 
381 	return 0;
382 }
383 
384 int
nouveau_vm_new(struct nouveau_device * device,u64 offset,u64 length,u64 mm_offset,struct nouveau_vm ** pvm)385 nouveau_vm_new(struct nouveau_device *device, u64 offset, u64 length,
386 	       u64 mm_offset, struct nouveau_vm **pvm)
387 {
388 	struct nouveau_vmmgr *vmm = nouveau_vmmgr(device);
389 	return vmm->create(vmm, offset, length, mm_offset, pvm);
390 }
391 
392 static int
nouveau_vm_link(struct nouveau_vm * vm,struct nouveau_gpuobj * pgd)393 nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
394 {
395 	struct nouveau_vmmgr *vmm = vm->vmm;
396 	struct nouveau_vm_pgd *vpgd;
397 	int i;
398 
399 	if (!pgd)
400 		return 0;
401 
402 	vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
403 	if (!vpgd)
404 		return -ENOMEM;
405 
406 	nouveau_gpuobj_ref(pgd, &vpgd->obj);
407 
408 	mutex_lock(&vm->mm.mutex);
409 	for (i = vm->fpde; i <= vm->lpde; i++)
410 		vmm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
411 	list_add(&vpgd->head, &vm->pgd_list);
412 	mutex_unlock(&vm->mm.mutex);
413 	return 0;
414 }
415 
416 static void
nouveau_vm_unlink(struct nouveau_vm * vm,struct nouveau_gpuobj * mpgd)417 nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
418 {
419 	struct nouveau_vm_pgd *vpgd, *tmp;
420 	struct nouveau_gpuobj *pgd = NULL;
421 
422 	if (!mpgd)
423 		return;
424 
425 	mutex_lock(&vm->mm.mutex);
426 	list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
427 		if (vpgd->obj == mpgd) {
428 			pgd = vpgd->obj;
429 			list_del(&vpgd->head);
430 			kfree(vpgd);
431 			break;
432 		}
433 	}
434 	mutex_unlock(&vm->mm.mutex);
435 
436 	nouveau_gpuobj_ref(NULL, &pgd);
437 }
438 
439 static void
nouveau_vm_del(struct nouveau_vm * vm)440 nouveau_vm_del(struct nouveau_vm *vm)
441 {
442 	struct nouveau_vm_pgd *vpgd, *tmp;
443 
444 	list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
445 		nouveau_vm_unlink(vm, vpgd->obj);
446 	}
447 
448 	nouveau_mm_fini(&vm->mm);
449 	kfree(vm->pgt);
450 	kfree(vm);
451 }
452 
453 int
nouveau_vm_ref(struct nouveau_vm * ref,struct nouveau_vm ** ptr,struct nouveau_gpuobj * pgd)454 nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr,
455 	       struct nouveau_gpuobj *pgd)
456 {
457 	struct nouveau_vm *vm;
458 	int ret;
459 
460 	vm = ref;
461 	if (vm) {
462 		ret = nouveau_vm_link(vm, pgd);
463 		if (ret)
464 			return ret;
465 
466 		vm->refcount++;
467 	}
468 
469 	vm = *ptr;
470 	*ptr = ref;
471 
472 	if (vm) {
473 		nouveau_vm_unlink(vm, pgd);
474 
475 		if (--vm->refcount == 0)
476 			nouveau_vm_del(vm);
477 	}
478 
479 	return 0;
480 }
481