• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * CPU-agnostic AMD IO page table allocator.
4  *
5  * Copyright (C) 2020 Advanced Micro Devices, Inc.
6  * Author: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
7  */
8 
9 #define pr_fmt(fmt)     "AMD-Vi: " fmt
10 #define dev_fmt(fmt)    pr_fmt(fmt)
11 
12 #include <linux/atomic.h>
13 #include <linux/bitops.h>
14 #include <linux/io-pgtable.h>
15 #include <linux/kernel.h>
16 #include <linux/sizes.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/seqlock.h>
21 
22 #include <asm/barrier.h>
23 
24 #include "amd_iommu_types.h"
25 #include "amd_iommu.h"
26 #include "../iommu-pages.h"
27 
28 /*
29  * Helper function to get the first pte of a large mapping
30  */
first_pte_l7(u64 * pte,unsigned long * page_size,unsigned long * count)31 static u64 *first_pte_l7(u64 *pte, unsigned long *page_size,
32 			 unsigned long *count)
33 {
34 	unsigned long pte_mask, pg_size, cnt;
35 	u64 *fpte;
36 
37 	pg_size  = PTE_PAGE_SIZE(*pte);
38 	cnt      = PAGE_SIZE_PTE_COUNT(pg_size);
39 	pte_mask = ~((cnt << 3) - 1);
40 	fpte     = (u64 *)(((unsigned long)pte) & pte_mask);
41 
42 	if (page_size)
43 		*page_size = pg_size;
44 
45 	if (count)
46 		*count = cnt;
47 
48 	return fpte;
49 }
50 
51 /****************************************************************************
52  *
53  * The functions below are used the create the page table mappings for
54  * unity mapped regions.
55  *
56  ****************************************************************************/
57 
free_pt_page(u64 * pt,struct list_head * freelist)58 static void free_pt_page(u64 *pt, struct list_head *freelist)
59 {
60 	struct page *p = virt_to_page(pt);
61 
62 	list_add_tail(&p->lru, freelist);
63 }
64 
free_pt_lvl(u64 * pt,struct list_head * freelist,int lvl)65 static void free_pt_lvl(u64 *pt, struct list_head *freelist, int lvl)
66 {
67 	u64 *p;
68 	int i;
69 
70 	for (i = 0; i < 512; ++i) {
71 		/* PTE present? */
72 		if (!IOMMU_PTE_PRESENT(pt[i]))
73 			continue;
74 
75 		/* Large PTE? */
76 		if (PM_PTE_LEVEL(pt[i]) == 0 ||
77 		    PM_PTE_LEVEL(pt[i]) == 7)
78 			continue;
79 
80 		/*
81 		 * Free the next level. No need to look at l1 tables here since
82 		 * they can only contain leaf PTEs; just free them directly.
83 		 */
84 		p = IOMMU_PTE_PAGE(pt[i]);
85 		if (lvl > 2)
86 			free_pt_lvl(p, freelist, lvl - 1);
87 		else
88 			free_pt_page(p, freelist);
89 	}
90 
91 	free_pt_page(pt, freelist);
92 }
93 
free_sub_pt(u64 * root,int mode,struct list_head * freelist)94 static void free_sub_pt(u64 *root, int mode, struct list_head *freelist)
95 {
96 	switch (mode) {
97 	case PAGE_MODE_NONE:
98 	case PAGE_MODE_7_LEVEL:
99 		break;
100 	case PAGE_MODE_1_LEVEL:
101 		free_pt_page(root, freelist);
102 		break;
103 	case PAGE_MODE_2_LEVEL:
104 	case PAGE_MODE_3_LEVEL:
105 	case PAGE_MODE_4_LEVEL:
106 	case PAGE_MODE_5_LEVEL:
107 	case PAGE_MODE_6_LEVEL:
108 		free_pt_lvl(root, freelist, mode);
109 		break;
110 	default:
111 		BUG();
112 	}
113 }
114 
115 /*
116  * This function is used to add another level to an IO page table. Adding
117  * another level increases the size of the address space by 9 bits to a size up
118  * to 64 bits.
119  */
increase_address_space(struct amd_io_pgtable * pgtable,unsigned long address,unsigned int page_size_level,gfp_t gfp)120 static bool increase_address_space(struct amd_io_pgtable *pgtable,
121 				   unsigned long address,
122 				   unsigned int page_size_level,
123 				   gfp_t gfp)
124 {
125 	struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
126 	struct protection_domain *domain =
127 		container_of(pgtable, struct protection_domain, iop);
128 	unsigned long flags;
129 	bool ret = true;
130 	u64 *pte;
131 
132 	pte = iommu_alloc_page_node(cfg->amd.nid, gfp);
133 	if (!pte)
134 		return false;
135 
136 	spin_lock_irqsave(&domain->lock, flags);
137 
138 	if (address <= PM_LEVEL_SIZE(pgtable->mode) &&
139 	    pgtable->mode - 1 >= page_size_level)
140 		goto out;
141 
142 	ret = false;
143 	if (WARN_ON_ONCE(pgtable->mode == PAGE_MODE_6_LEVEL))
144 		goto out;
145 
146 	*pte = PM_LEVEL_PDE(pgtable->mode, iommu_virt_to_phys(pgtable->root));
147 
148 	write_seqcount_begin(&pgtable->seqcount);
149 	pgtable->root  = pte;
150 	pgtable->mode += 1;
151 	write_seqcount_end(&pgtable->seqcount);
152 
153 	amd_iommu_update_and_flush_device_table(domain);
154 
155 	pte = NULL;
156 	ret = true;
157 
158 out:
159 	spin_unlock_irqrestore(&domain->lock, flags);
160 	iommu_free_page(pte);
161 
162 	return ret;
163 }
164 
alloc_pte(struct amd_io_pgtable * pgtable,unsigned long address,unsigned long page_size,u64 ** pte_page,gfp_t gfp,bool * updated)165 static u64 *alloc_pte(struct amd_io_pgtable *pgtable,
166 		      unsigned long address,
167 		      unsigned long page_size,
168 		      u64 **pte_page,
169 		      gfp_t gfp,
170 		      bool *updated)
171 {
172 	unsigned long last_addr = address + (page_size - 1);
173 	struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
174 	unsigned int seqcount;
175 	int level, end_lvl;
176 	u64 *pte, *page;
177 
178 	BUG_ON(!is_power_of_2(page_size));
179 
180 	while (last_addr > PM_LEVEL_SIZE(pgtable->mode) ||
181 	       pgtable->mode - 1 < PAGE_SIZE_LEVEL(page_size)) {
182 		/*
183 		 * Return an error if there is no memory to update the
184 		 * page-table.
185 		 */
186 		if (!increase_address_space(pgtable, last_addr,
187 					    PAGE_SIZE_LEVEL(page_size), gfp))
188 			return NULL;
189 	}
190 
191 
192 	do {
193 		seqcount = read_seqcount_begin(&pgtable->seqcount);
194 
195 		level   = pgtable->mode - 1;
196 		pte     = &pgtable->root[PM_LEVEL_INDEX(level, address)];
197 	} while (read_seqcount_retry(&pgtable->seqcount, seqcount));
198 
199 
200 	address = PAGE_SIZE_ALIGN(address, page_size);
201 	end_lvl = PAGE_SIZE_LEVEL(page_size);
202 
203 	while (level > end_lvl) {
204 		u64 __pte, __npte;
205 		int pte_level;
206 
207 		__pte     = *pte;
208 		pte_level = PM_PTE_LEVEL(__pte);
209 
210 		/*
211 		 * If we replace a series of large PTEs, we need
212 		 * to tear down all of them.
213 		 */
214 		if (IOMMU_PTE_PRESENT(__pte) &&
215 		    pte_level == PAGE_MODE_7_LEVEL) {
216 			unsigned long count, i;
217 			u64 *lpte;
218 
219 			lpte = first_pte_l7(pte, NULL, &count);
220 
221 			/*
222 			 * Unmap the replicated PTEs that still match the
223 			 * original large mapping
224 			 */
225 			for (i = 0; i < count; ++i)
226 				cmpxchg64(&lpte[i], __pte, 0ULL);
227 
228 			*updated = true;
229 			continue;
230 		}
231 
232 		if (!IOMMU_PTE_PRESENT(__pte) ||
233 		    pte_level == PAGE_MODE_NONE) {
234 			page = iommu_alloc_page_node(cfg->amd.nid, gfp);
235 
236 			if (!page)
237 				return NULL;
238 
239 			__npte = PM_LEVEL_PDE(level, iommu_virt_to_phys(page));
240 
241 			/* pte could have been changed somewhere. */
242 			if (!try_cmpxchg64(pte, &__pte, __npte))
243 				iommu_free_page(page);
244 			else if (IOMMU_PTE_PRESENT(__pte))
245 				*updated = true;
246 
247 			continue;
248 		}
249 
250 		/* No level skipping support yet */
251 		if (pte_level != level)
252 			return NULL;
253 
254 		level -= 1;
255 
256 		pte = IOMMU_PTE_PAGE(__pte);
257 
258 		if (pte_page && level == end_lvl)
259 			*pte_page = pte;
260 
261 		pte = &pte[PM_LEVEL_INDEX(level, address)];
262 	}
263 
264 	return pte;
265 }
266 
267 /*
268  * This function checks if there is a PTE for a given dma address. If
269  * there is one, it returns the pointer to it.
270  */
fetch_pte(struct amd_io_pgtable * pgtable,unsigned long address,unsigned long * page_size)271 static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
272 		      unsigned long address,
273 		      unsigned long *page_size)
274 {
275 	int level;
276 	unsigned int seqcount;
277 	u64 *pte;
278 
279 	*page_size = 0;
280 
281 	if (address > PM_LEVEL_SIZE(pgtable->mode))
282 		return NULL;
283 
284 	do {
285 		seqcount = read_seqcount_begin(&pgtable->seqcount);
286 		level	   =  pgtable->mode - 1;
287 		pte	   = &pgtable->root[PM_LEVEL_INDEX(level, address)];
288 	} while (read_seqcount_retry(&pgtable->seqcount, seqcount));
289 
290 	*page_size =  PTE_LEVEL_PAGE_SIZE(level);
291 
292 	while (level > 0) {
293 
294 		/* Not Present */
295 		if (!IOMMU_PTE_PRESENT(*pte))
296 			return NULL;
297 
298 		/* Large PTE */
299 		if (PM_PTE_LEVEL(*pte) == PAGE_MODE_7_LEVEL ||
300 		    PM_PTE_LEVEL(*pte) == PAGE_MODE_NONE)
301 			break;
302 
303 		/* No level skipping support yet */
304 		if (PM_PTE_LEVEL(*pte) != level)
305 			return NULL;
306 
307 		level -= 1;
308 
309 		/* Walk to the next level */
310 		pte	   = IOMMU_PTE_PAGE(*pte);
311 		pte	   = &pte[PM_LEVEL_INDEX(level, address)];
312 		*page_size = PTE_LEVEL_PAGE_SIZE(level);
313 	}
314 
315 	/*
316 	 * If we have a series of large PTEs, make
317 	 * sure to return a pointer to the first one.
318 	 */
319 	if (PM_PTE_LEVEL(*pte) == PAGE_MODE_7_LEVEL)
320 		pte = first_pte_l7(pte, page_size, NULL);
321 
322 	return pte;
323 }
324 
free_clear_pte(u64 * pte,u64 pteval,struct list_head * freelist)325 static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist)
326 {
327 	u64 *pt;
328 	int mode;
329 
330 	while (!try_cmpxchg64(pte, &pteval, 0))
331 		pr_warn("AMD-Vi: IOMMU pte changed since we read it\n");
332 
333 	if (!IOMMU_PTE_PRESENT(pteval))
334 		return;
335 
336 	pt   = IOMMU_PTE_PAGE(pteval);
337 	mode = IOMMU_PTE_MODE(pteval);
338 
339 	free_sub_pt(pt, mode, freelist);
340 }
341 
342 /*
343  * Generic mapping functions. It maps a physical address into a DMA
344  * address space. It allocates the page table pages if necessary.
345  * In the future it can be extended to a generic mapping function
346  * supporting all features of AMD IOMMU page tables like level skipping
347  * and full 64 bit address spaces.
348  */
iommu_v1_map_pages(struct io_pgtable_ops * ops,unsigned long iova,phys_addr_t paddr,size_t pgsize,size_t pgcount,int prot,gfp_t gfp,size_t * mapped)349 static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
350 			      phys_addr_t paddr, size_t pgsize, size_t pgcount,
351 			      int prot, gfp_t gfp, size_t *mapped)
352 {
353 	struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
354 	LIST_HEAD(freelist);
355 	bool updated = false;
356 	u64 __pte, *pte;
357 	int ret, i, count;
358 	size_t size = pgcount << __ffs(pgsize);
359 	unsigned long o_iova = iova;
360 
361 	BUG_ON(!IS_ALIGNED(iova, pgsize));
362 	BUG_ON(!IS_ALIGNED(paddr, pgsize));
363 
364 	ret = -EINVAL;
365 	if (!(prot & IOMMU_PROT_MASK))
366 		goto out;
367 
368 	while (pgcount > 0) {
369 		count = PAGE_SIZE_PTE_COUNT(pgsize);
370 		pte   = alloc_pte(pgtable, iova, pgsize, NULL, gfp, &updated);
371 
372 		ret = -ENOMEM;
373 		if (!pte)
374 			goto out;
375 
376 		for (i = 0; i < count; ++i)
377 			free_clear_pte(&pte[i], pte[i], &freelist);
378 
379 		if (!list_empty(&freelist))
380 			updated = true;
381 
382 		if (count > 1) {
383 			__pte = PAGE_SIZE_PTE(__sme_set(paddr), pgsize);
384 			__pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
385 		} else
386 			__pte = __sme_set(paddr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
387 
388 		if (prot & IOMMU_PROT_IR)
389 			__pte |= IOMMU_PTE_IR;
390 		if (prot & IOMMU_PROT_IW)
391 			__pte |= IOMMU_PTE_IW;
392 
393 		for (i = 0; i < count; ++i)
394 			pte[i] = __pte;
395 
396 		iova  += pgsize;
397 		paddr += pgsize;
398 		pgcount--;
399 		if (mapped)
400 			*mapped += pgsize;
401 	}
402 
403 	ret = 0;
404 
405 out:
406 	if (updated) {
407 		struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
408 		unsigned long flags;
409 
410 		spin_lock_irqsave(&dom->lock, flags);
411 		/*
412 		 * Flush domain TLB(s) and wait for completion. Any Device-Table
413 		 * Updates and flushing already happened in
414 		 * increase_address_space().
415 		 */
416 		amd_iommu_domain_flush_pages(dom, o_iova, size);
417 		spin_unlock_irqrestore(&dom->lock, flags);
418 	}
419 
420 	/* Everything flushed out, free pages now */
421 	iommu_put_pages_list(&freelist);
422 
423 	return ret;
424 }
425 
iommu_v1_unmap_pages(struct io_pgtable_ops * ops,unsigned long iova,size_t pgsize,size_t pgcount,struct iommu_iotlb_gather * gather)426 static unsigned long iommu_v1_unmap_pages(struct io_pgtable_ops *ops,
427 					  unsigned long iova,
428 					  size_t pgsize, size_t pgcount,
429 					  struct iommu_iotlb_gather *gather)
430 {
431 	struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
432 	unsigned long long unmapped;
433 	unsigned long unmap_size;
434 	u64 *pte;
435 	size_t size = pgcount << __ffs(pgsize);
436 
437 	BUG_ON(!is_power_of_2(pgsize));
438 
439 	unmapped = 0;
440 
441 	while (unmapped < size) {
442 		pte = fetch_pte(pgtable, iova, &unmap_size);
443 		if (pte) {
444 			int i, count;
445 
446 			count = PAGE_SIZE_PTE_COUNT(unmap_size);
447 			for (i = 0; i < count; i++)
448 				pte[i] = 0ULL;
449 		} else {
450 			return unmapped;
451 		}
452 
453 		iova = (iova & ~(unmap_size - 1)) + unmap_size;
454 		unmapped += unmap_size;
455 	}
456 
457 	return unmapped;
458 }
459 
iommu_v1_iova_to_phys(struct io_pgtable_ops * ops,unsigned long iova)460 static phys_addr_t iommu_v1_iova_to_phys(struct io_pgtable_ops *ops, unsigned long iova)
461 {
462 	struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
463 	unsigned long offset_mask, pte_pgsize;
464 	u64 *pte, __pte;
465 
466 	pte = fetch_pte(pgtable, iova, &pte_pgsize);
467 
468 	if (!pte || !IOMMU_PTE_PRESENT(*pte))
469 		return 0;
470 
471 	offset_mask = pte_pgsize - 1;
472 	__pte	    = __sme_clr(*pte & PM_ADDR_MASK);
473 
474 	return (__pte & ~offset_mask) | (iova & offset_mask);
475 }
476 
pte_test_and_clear_dirty(u64 * ptep,unsigned long size,unsigned long flags)477 static bool pte_test_and_clear_dirty(u64 *ptep, unsigned long size,
478 				     unsigned long flags)
479 {
480 	bool test_only = flags & IOMMU_DIRTY_NO_CLEAR;
481 	bool dirty = false;
482 	int i, count;
483 
484 	/*
485 	 * 2.2.3.2 Host Dirty Support
486 	 * When a non-default page size is used , software must OR the
487 	 * Dirty bits in all of the replicated host PTEs used to map
488 	 * the page. The IOMMU does not guarantee the Dirty bits are
489 	 * set in all of the replicated PTEs. Any portion of the page
490 	 * may have been written even if the Dirty bit is set in only
491 	 * one of the replicated PTEs.
492 	 */
493 	count = PAGE_SIZE_PTE_COUNT(size);
494 	for (i = 0; i < count && test_only; i++) {
495 		if (test_bit(IOMMU_PTE_HD_BIT, (unsigned long *)&ptep[i])) {
496 			dirty = true;
497 			break;
498 		}
499 	}
500 
501 	for (i = 0; i < count && !test_only; i++) {
502 		if (test_and_clear_bit(IOMMU_PTE_HD_BIT,
503 				       (unsigned long *)&ptep[i])) {
504 			dirty = true;
505 		}
506 	}
507 
508 	return dirty;
509 }
510 
iommu_v1_read_and_clear_dirty(struct io_pgtable_ops * ops,unsigned long iova,size_t size,unsigned long flags,struct iommu_dirty_bitmap * dirty)511 static int iommu_v1_read_and_clear_dirty(struct io_pgtable_ops *ops,
512 					 unsigned long iova, size_t size,
513 					 unsigned long flags,
514 					 struct iommu_dirty_bitmap *dirty)
515 {
516 	struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
517 	unsigned long end = iova + size - 1;
518 
519 	do {
520 		unsigned long pgsize = 0;
521 		u64 *ptep, pte;
522 
523 		ptep = fetch_pte(pgtable, iova, &pgsize);
524 		if (ptep)
525 			pte = READ_ONCE(*ptep);
526 		if (!ptep || !IOMMU_PTE_PRESENT(pte)) {
527 			pgsize = pgsize ?: PTE_LEVEL_PAGE_SIZE(0);
528 			iova += pgsize;
529 			continue;
530 		}
531 
532 		/*
533 		 * Mark the whole IOVA range as dirty even if only one of
534 		 * the replicated PTEs were marked dirty.
535 		 */
536 		if (pte_test_and_clear_dirty(ptep, pgsize, flags))
537 			iommu_dirty_bitmap_record(dirty, iova, pgsize);
538 		iova += pgsize;
539 	} while (iova < end);
540 
541 	return 0;
542 }
543 
544 /*
545  * ----------------------------------------------------
546  */
v1_free_pgtable(struct io_pgtable * iop)547 static void v1_free_pgtable(struct io_pgtable *iop)
548 {
549 	struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, pgtbl);
550 	LIST_HEAD(freelist);
551 
552 	if (pgtable->mode == PAGE_MODE_NONE)
553 		return;
554 
555 	/* Page-table is not visible to IOMMU anymore, so free it */
556 	BUG_ON(pgtable->mode < PAGE_MODE_NONE ||
557 	       pgtable->mode > PAGE_MODE_6_LEVEL);
558 
559 	free_sub_pt(pgtable->root, pgtable->mode, &freelist);
560 	iommu_put_pages_list(&freelist);
561 }
562 
v1_alloc_pgtable(struct io_pgtable_cfg * cfg,void * cookie)563 static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
564 {
565 	struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
566 
567 	pgtable->root = iommu_alloc_page_node(cfg->amd.nid, GFP_KERNEL);
568 	if (!pgtable->root)
569 		return NULL;
570 	pgtable->mode = PAGE_MODE_3_LEVEL;
571 	seqcount_init(&pgtable->seqcount);
572 
573 	cfg->pgsize_bitmap  = amd_iommu_pgsize_bitmap;
574 	cfg->ias            = IOMMU_IN_ADDR_BIT_SIZE;
575 	cfg->oas            = IOMMU_OUT_ADDR_BIT_SIZE;
576 
577 	pgtable->pgtbl.ops.map_pages    = iommu_v1_map_pages;
578 	pgtable->pgtbl.ops.unmap_pages  = iommu_v1_unmap_pages;
579 	pgtable->pgtbl.ops.iova_to_phys = iommu_v1_iova_to_phys;
580 	pgtable->pgtbl.ops.read_and_clear_dirty = iommu_v1_read_and_clear_dirty;
581 
582 	return &pgtable->pgtbl;
583 }
584 
585 struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns = {
586 	.alloc	= v1_alloc_pgtable,
587 	.free	= v1_free_pgtable,
588 };
589