• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2010 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "priv.h"
25 
26 #include <core/gpuobj.h>
27 #include <subdev/fb.h>
28 
29 void
nvkm_vm_map_at(struct nvkm_vma * vma,u64 delta,struct nvkm_mem * node)30 nvkm_vm_map_at(struct nvkm_vma *vma, u64 delta, struct nvkm_mem *node)
31 {
32 	struct nvkm_vm *vm = vma->vm;
33 	struct nvkm_mmu *mmu = vm->mmu;
34 	struct nvkm_mm_node *r;
35 	int big = vma->node->type != mmu->func->spg_shift;
36 	u32 offset = vma->node->offset + (delta >> 12);
37 	u32 bits = vma->node->type - 12;
38 	u32 pde  = (offset >> mmu->func->pgt_bits) - vm->fpde;
39 	u32 pte  = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
40 	u32 max  = 1 << (mmu->func->pgt_bits - bits);
41 	u32 end, len;
42 
43 	delta = 0;
44 	list_for_each_entry(r, &node->regions, rl_entry) {
45 		u64 phys = (u64)r->offset << 12;
46 		u32 num  = r->length >> bits;
47 
48 		while (num) {
49 			struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
50 
51 			end = (pte + num);
52 			if (unlikely(end >= max))
53 				end = max;
54 			len = end - pte;
55 
56 			mmu->func->map(vma, pgt, node, pte, len, phys, delta);
57 
58 			num -= len;
59 			pte += len;
60 			if (unlikely(end >= max)) {
61 				phys += len << (bits + 12);
62 				pde++;
63 				pte = 0;
64 			}
65 
66 			delta += (u64)len << vma->node->type;
67 		}
68 	}
69 
70 	mmu->func->flush(vm);
71 }
72 
73 static void
nvkm_vm_map_sg_table(struct nvkm_vma * vma,u64 delta,u64 length,struct nvkm_mem * mem)74 nvkm_vm_map_sg_table(struct nvkm_vma *vma, u64 delta, u64 length,
75 		     struct nvkm_mem *mem)
76 {
77 	struct nvkm_vm *vm = vma->vm;
78 	struct nvkm_mmu *mmu = vm->mmu;
79 	int big = vma->node->type != mmu->func->spg_shift;
80 	u32 offset = vma->node->offset + (delta >> 12);
81 	u32 bits = vma->node->type - 12;
82 	u32 num  = length >> vma->node->type;
83 	u32 pde  = (offset >> mmu->func->pgt_bits) - vm->fpde;
84 	u32 pte  = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
85 	u32 max  = 1 << (mmu->func->pgt_bits - bits);
86 	unsigned m, sglen;
87 	u32 end, len;
88 	int i;
89 	struct scatterlist *sg;
90 
91 	for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
92 		struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
93 		sglen = sg_dma_len(sg) >> PAGE_SHIFT;
94 
95 		end = pte + sglen;
96 		if (unlikely(end >= max))
97 			end = max;
98 		len = end - pte;
99 
100 		for (m = 0; m < len; m++) {
101 			dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
102 
103 			mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
104 			num--;
105 			pte++;
106 
107 			if (num == 0)
108 				goto finish;
109 		}
110 		if (unlikely(end >= max)) {
111 			pde++;
112 			pte = 0;
113 		}
114 		if (m < sglen) {
115 			for (; m < sglen; m++) {
116 				dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
117 
118 				mmu->func->map_sg(vma, pgt, mem, pte, 1, &addr);
119 				num--;
120 				pte++;
121 				if (num == 0)
122 					goto finish;
123 			}
124 		}
125 
126 	}
127 finish:
128 	mmu->func->flush(vm);
129 }
130 
131 static void
nvkm_vm_map_sg(struct nvkm_vma * vma,u64 delta,u64 length,struct nvkm_mem * mem)132 nvkm_vm_map_sg(struct nvkm_vma *vma, u64 delta, u64 length,
133 	       struct nvkm_mem *mem)
134 {
135 	struct nvkm_vm *vm = vma->vm;
136 	struct nvkm_mmu *mmu = vm->mmu;
137 	dma_addr_t *list = mem->pages;
138 	int big = vma->node->type != mmu->func->spg_shift;
139 	u32 offset = vma->node->offset + (delta >> 12);
140 	u32 bits = vma->node->type - 12;
141 	u32 num  = length >> vma->node->type;
142 	u32 pde  = (offset >> mmu->func->pgt_bits) - vm->fpde;
143 	u32 pte  = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
144 	u32 max  = 1 << (mmu->func->pgt_bits - bits);
145 	u32 end, len;
146 
147 	while (num) {
148 		struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
149 
150 		end = (pte + num);
151 		if (unlikely(end >= max))
152 			end = max;
153 		len = end - pte;
154 
155 		mmu->func->map_sg(vma, pgt, mem, pte, len, list);
156 
157 		num  -= len;
158 		pte  += len;
159 		list += len;
160 		if (unlikely(end >= max)) {
161 			pde++;
162 			pte = 0;
163 		}
164 	}
165 
166 	mmu->func->flush(vm);
167 }
168 
169 void
nvkm_vm_map(struct nvkm_vma * vma,struct nvkm_mem * node)170 nvkm_vm_map(struct nvkm_vma *vma, struct nvkm_mem *node)
171 {
172 	if (node->sg)
173 		nvkm_vm_map_sg_table(vma, 0, node->size << 12, node);
174 	else
175 	if (node->pages)
176 		nvkm_vm_map_sg(vma, 0, node->size << 12, node);
177 	else
178 		nvkm_vm_map_at(vma, 0, node);
179 }
180 
181 void
nvkm_vm_unmap_at(struct nvkm_vma * vma,u64 delta,u64 length)182 nvkm_vm_unmap_at(struct nvkm_vma *vma, u64 delta, u64 length)
183 {
184 	struct nvkm_vm *vm = vma->vm;
185 	struct nvkm_mmu *mmu = vm->mmu;
186 	int big = vma->node->type != mmu->func->spg_shift;
187 	u32 offset = vma->node->offset + (delta >> 12);
188 	u32 bits = vma->node->type - 12;
189 	u32 num  = length >> vma->node->type;
190 	u32 pde  = (offset >> mmu->func->pgt_bits) - vm->fpde;
191 	u32 pte  = (offset & ((1 << mmu->func->pgt_bits) - 1)) >> bits;
192 	u32 max  = 1 << (mmu->func->pgt_bits - bits);
193 	u32 end, len;
194 
195 	while (num) {
196 		struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
197 
198 		end = (pte + num);
199 		if (unlikely(end >= max))
200 			end = max;
201 		len = end - pte;
202 
203 		mmu->func->unmap(vma, pgt, pte, len);
204 
205 		num -= len;
206 		pte += len;
207 		if (unlikely(end >= max)) {
208 			pde++;
209 			pte = 0;
210 		}
211 	}
212 
213 	mmu->func->flush(vm);
214 }
215 
216 void
nvkm_vm_unmap(struct nvkm_vma * vma)217 nvkm_vm_unmap(struct nvkm_vma *vma)
218 {
219 	nvkm_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
220 }
221 
222 static void
nvkm_vm_unmap_pgt(struct nvkm_vm * vm,int big,u32 fpde,u32 lpde)223 nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
224 {
225 	struct nvkm_mmu *mmu = vm->mmu;
226 	struct nvkm_vm_pgd *vpgd;
227 	struct nvkm_vm_pgt *vpgt;
228 	struct nvkm_memory *pgt;
229 	u32 pde;
230 
231 	for (pde = fpde; pde <= lpde; pde++) {
232 		vpgt = &vm->pgt[pde - vm->fpde];
233 		if (--vpgt->refcount[big])
234 			continue;
235 
236 		pgt = vpgt->mem[big];
237 		vpgt->mem[big] = NULL;
238 
239 		list_for_each_entry(vpgd, &vm->pgd_list, head) {
240 			mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
241 		}
242 
243 		mmu->func->flush(vm);
244 
245 		nvkm_memory_del(&pgt);
246 	}
247 }
248 
249 static int
nvkm_vm_map_pgt(struct nvkm_vm * vm,u32 pde,u32 type)250 nvkm_vm_map_pgt(struct nvkm_vm *vm, u32 pde, u32 type)
251 {
252 	struct nvkm_mmu *mmu = vm->mmu;
253 	struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
254 	struct nvkm_vm_pgd *vpgd;
255 	int big = (type != mmu->func->spg_shift);
256 	u32 pgt_size;
257 	int ret;
258 
259 	pgt_size  = (1 << (mmu->func->pgt_bits + 12)) >> type;
260 	pgt_size *= 8;
261 
262 	ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
263 			      pgt_size, 0x1000, true, &vpgt->mem[big]);
264 	if (unlikely(ret))
265 		return ret;
266 
267 	list_for_each_entry(vpgd, &vm->pgd_list, head) {
268 		mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
269 	}
270 
271 	vpgt->refcount[big]++;
272 	return 0;
273 }
274 
275 int
nvkm_vm_get(struct nvkm_vm * vm,u64 size,u32 page_shift,u32 access,struct nvkm_vma * vma)276 nvkm_vm_get(struct nvkm_vm *vm, u64 size, u32 page_shift, u32 access,
277 	    struct nvkm_vma *vma)
278 {
279 	struct nvkm_mmu *mmu = vm->mmu;
280 	u32 align = (1 << page_shift) >> 12;
281 	u32 msize = size >> 12;
282 	u32 fpde, lpde, pde;
283 	int ret;
284 
285 	mutex_lock(&vm->mutex);
286 	ret = nvkm_mm_head(&vm->mm, 0, page_shift, msize, msize, align,
287 			   &vma->node);
288 	if (unlikely(ret != 0)) {
289 		mutex_unlock(&vm->mutex);
290 		return ret;
291 	}
292 
293 	fpde = (vma->node->offset >> mmu->func->pgt_bits);
294 	lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
295 
296 	for (pde = fpde; pde <= lpde; pde++) {
297 		struct nvkm_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
298 		int big = (vma->node->type != mmu->func->spg_shift);
299 
300 		if (likely(vpgt->refcount[big])) {
301 			vpgt->refcount[big]++;
302 			continue;
303 		}
304 
305 		ret = nvkm_vm_map_pgt(vm, pde, vma->node->type);
306 		if (ret) {
307 			if (pde != fpde)
308 				nvkm_vm_unmap_pgt(vm, big, fpde, pde - 1);
309 			nvkm_mm_free(&vm->mm, &vma->node);
310 			mutex_unlock(&vm->mutex);
311 			return ret;
312 		}
313 	}
314 	mutex_unlock(&vm->mutex);
315 
316 	vma->vm = NULL;
317 	nvkm_vm_ref(vm, &vma->vm, NULL);
318 	vma->offset = (u64)vma->node->offset << 12;
319 	vma->access = access;
320 	return 0;
321 }
322 
323 void
nvkm_vm_put(struct nvkm_vma * vma)324 nvkm_vm_put(struct nvkm_vma *vma)
325 {
326 	struct nvkm_mmu *mmu;
327 	struct nvkm_vm *vm;
328 	u32 fpde, lpde;
329 
330 	if (unlikely(vma->node == NULL))
331 		return;
332 	vm = vma->vm;
333 	mmu = vm->mmu;
334 
335 	fpde = (vma->node->offset >> mmu->func->pgt_bits);
336 	lpde = (vma->node->offset + vma->node->length - 1) >> mmu->func->pgt_bits;
337 
338 	mutex_lock(&vm->mutex);
339 	nvkm_vm_unmap_pgt(vm, vma->node->type != mmu->func->spg_shift, fpde, lpde);
340 	nvkm_mm_free(&vm->mm, &vma->node);
341 	mutex_unlock(&vm->mutex);
342 
343 	nvkm_vm_ref(NULL, &vma->vm, NULL);
344 }
345 
346 int
nvkm_vm_boot(struct nvkm_vm * vm,u64 size)347 nvkm_vm_boot(struct nvkm_vm *vm, u64 size)
348 {
349 	struct nvkm_mmu *mmu = vm->mmu;
350 	struct nvkm_memory *pgt;
351 	int ret;
352 
353 	ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
354 			      (size >> mmu->func->spg_shift) * 8, 0x1000, true, &pgt);
355 	if (ret == 0) {
356 		vm->pgt[0].refcount[0] = 1;
357 		vm->pgt[0].mem[0] = pgt;
358 		nvkm_memory_boot(pgt, vm);
359 	}
360 
361 	return ret;
362 }
363 
364 int
nvkm_vm_create(struct nvkm_mmu * mmu,u64 offset,u64 length,u64 mm_offset,u32 block,struct lock_class_key * key,struct nvkm_vm ** pvm)365 nvkm_vm_create(struct nvkm_mmu *mmu, u64 offset, u64 length, u64 mm_offset,
366 	       u32 block, struct lock_class_key *key, struct nvkm_vm **pvm)
367 {
368 	static struct lock_class_key _key;
369 	struct nvkm_vm *vm;
370 	u64 mm_length = (offset + length) - mm_offset;
371 	int ret;
372 
373 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
374 	if (!vm)
375 		return -ENOMEM;
376 
377 	__mutex_init(&vm->mutex, "&vm->mutex", key ? key : &_key);
378 	INIT_LIST_HEAD(&vm->pgd_list);
379 	vm->mmu = mmu;
380 	kref_init(&vm->refcount);
381 	vm->fpde = offset >> (mmu->func->pgt_bits + 12);
382 	vm->lpde = (offset + length - 1) >> (mmu->func->pgt_bits + 12);
383 
384 	vm->pgt  = vzalloc((vm->lpde - vm->fpde + 1) * sizeof(*vm->pgt));
385 	if (!vm->pgt) {
386 		kfree(vm);
387 		return -ENOMEM;
388 	}
389 
390 	ret = nvkm_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
391 			   block >> 12);
392 	if (ret) {
393 		vfree(vm->pgt);
394 		kfree(vm);
395 		return ret;
396 	}
397 
398 	*pvm = vm;
399 
400 	return 0;
401 }
402 
403 int
nvkm_vm_new(struct nvkm_device * device,u64 offset,u64 length,u64 mm_offset,struct lock_class_key * key,struct nvkm_vm ** pvm)404 nvkm_vm_new(struct nvkm_device *device, u64 offset, u64 length, u64 mm_offset,
405 	    struct lock_class_key *key, struct nvkm_vm **pvm)
406 {
407 	struct nvkm_mmu *mmu = device->mmu;
408 	if (!mmu->func->create)
409 		return -EINVAL;
410 	return mmu->func->create(mmu, offset, length, mm_offset, key, pvm);
411 }
412 
413 static int
nvkm_vm_link(struct nvkm_vm * vm,struct nvkm_gpuobj * pgd)414 nvkm_vm_link(struct nvkm_vm *vm, struct nvkm_gpuobj *pgd)
415 {
416 	struct nvkm_mmu *mmu = vm->mmu;
417 	struct nvkm_vm_pgd *vpgd;
418 	int i;
419 
420 	if (!pgd)
421 		return 0;
422 
423 	vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
424 	if (!vpgd)
425 		return -ENOMEM;
426 
427 	vpgd->obj = pgd;
428 
429 	mutex_lock(&vm->mutex);
430 	for (i = vm->fpde; i <= vm->lpde; i++)
431 		mmu->func->map_pgt(pgd, i, vm->pgt[i - vm->fpde].mem);
432 	list_add(&vpgd->head, &vm->pgd_list);
433 	mutex_unlock(&vm->mutex);
434 	return 0;
435 }
436 
437 static void
nvkm_vm_unlink(struct nvkm_vm * vm,struct nvkm_gpuobj * mpgd)438 nvkm_vm_unlink(struct nvkm_vm *vm, struct nvkm_gpuobj *mpgd)
439 {
440 	struct nvkm_vm_pgd *vpgd, *tmp;
441 
442 	if (!mpgd)
443 		return;
444 
445 	mutex_lock(&vm->mutex);
446 	list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
447 		if (vpgd->obj == mpgd) {
448 			list_del(&vpgd->head);
449 			kfree(vpgd);
450 			break;
451 		}
452 	}
453 	mutex_unlock(&vm->mutex);
454 }
455 
456 static void
nvkm_vm_del(struct kref * kref)457 nvkm_vm_del(struct kref *kref)
458 {
459 	struct nvkm_vm *vm = container_of(kref, typeof(*vm), refcount);
460 	struct nvkm_vm_pgd *vpgd, *tmp;
461 
462 	list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
463 		nvkm_vm_unlink(vm, vpgd->obj);
464 	}
465 
466 	nvkm_mm_fini(&vm->mm);
467 	vfree(vm->pgt);
468 	kfree(vm);
469 }
470 
471 int
nvkm_vm_ref(struct nvkm_vm * ref,struct nvkm_vm ** ptr,struct nvkm_gpuobj * pgd)472 nvkm_vm_ref(struct nvkm_vm *ref, struct nvkm_vm **ptr, struct nvkm_gpuobj *pgd)
473 {
474 	if (ref) {
475 		int ret = nvkm_vm_link(ref, pgd);
476 		if (ret)
477 			return ret;
478 
479 		kref_get(&ref->refcount);
480 	}
481 
482 	if (*ptr) {
483 		nvkm_vm_unlink(*ptr, pgd);
484 		kref_put(&(*ptr)->refcount, nvkm_vm_del);
485 	}
486 
487 	*ptr = ref;
488 	return 0;
489 }
490 
491 static int
nvkm_mmu_oneinit(struct nvkm_subdev * subdev)492 nvkm_mmu_oneinit(struct nvkm_subdev *subdev)
493 {
494 	struct nvkm_mmu *mmu = nvkm_mmu(subdev);
495 	if (mmu->func->oneinit)
496 		return mmu->func->oneinit(mmu);
497 	return 0;
498 }
499 
500 static int
nvkm_mmu_init(struct nvkm_subdev * subdev)501 nvkm_mmu_init(struct nvkm_subdev *subdev)
502 {
503 	struct nvkm_mmu *mmu = nvkm_mmu(subdev);
504 	if (mmu->func->init)
505 		mmu->func->init(mmu);
506 	return 0;
507 }
508 
509 static void *
nvkm_mmu_dtor(struct nvkm_subdev * subdev)510 nvkm_mmu_dtor(struct nvkm_subdev *subdev)
511 {
512 	struct nvkm_mmu *mmu = nvkm_mmu(subdev);
513 	if (mmu->func->dtor)
514 		return mmu->func->dtor(mmu);
515 	return mmu;
516 }
517 
518 static const struct nvkm_subdev_func
519 nvkm_mmu = {
520 	.dtor = nvkm_mmu_dtor,
521 	.oneinit = nvkm_mmu_oneinit,
522 	.init = nvkm_mmu_init,
523 };
524 
525 void
nvkm_mmu_ctor(const struct nvkm_mmu_func * func,struct nvkm_device * device,int index,struct nvkm_mmu * mmu)526 nvkm_mmu_ctor(const struct nvkm_mmu_func *func, struct nvkm_device *device,
527 	      int index, struct nvkm_mmu *mmu)
528 {
529 	nvkm_subdev_ctor(&nvkm_mmu, device, index, 0, &mmu->subdev);
530 	mmu->func = func;
531 	mmu->limit = func->limit;
532 	mmu->dma_bits = func->dma_bits;
533 	mmu->lpg_shift = func->lpg_shift;
534 }
535 
536 int
nvkm_mmu_new_(const struct nvkm_mmu_func * func,struct nvkm_device * device,int index,struct nvkm_mmu ** pmmu)537 nvkm_mmu_new_(const struct nvkm_mmu_func *func, struct nvkm_device *device,
538 	      int index, struct nvkm_mmu **pmmu)
539 {
540 	if (!(*pmmu = kzalloc(sizeof(**pmmu), GFP_KERNEL)))
541 		return -ENOMEM;
542 	nvkm_mmu_ctor(func, device, index, *pmmu);
543 	return 0;
544 }
545