• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * CPU-agnostic ARM page table allocator.
4  *
5  * Copyright (C) 2014 ARM Limited
6  *
7  * Author: Will Deacon <will.deacon@arm.com>
8  */
9 
10 #define pr_fmt(fmt)	"arm-lpae io-pgtable: " fmt
11 
12 #include <linux/atomic.h>
13 #include <linux/bitops.h>
14 #include <linux/io-pgtable.h>
15 #include <linux/kernel.h>
16 #include <linux/sizes.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
19 #include <linux/dma-mapping.h>
20 
21 #include <asm/barrier.h>
22 
23 #include "io-pgtable-arm.h"
24 
25 #define ARM_LPAE_MAX_ADDR_BITS		52
26 #define ARM_LPAE_S2_MAX_CONCAT_PAGES	16
27 #define ARM_LPAE_MAX_LEVELS		4
28 
29 /* Struct accessors */
30 #define io_pgtable_to_data(x)						\
31 	container_of((x), struct arm_lpae_io_pgtable, iop)
32 
33 #define io_pgtable_ops_to_data(x)					\
34 	io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
35 
36 /*
37  * Calculate the right shift amount to get to the portion describing level l
38  * in a virtual address mapped by the pagetable in d.
39  */
40 #define ARM_LPAE_LVL_SHIFT(l,d)						\
41 	(((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) +		\
42 	ilog2(sizeof(arm_lpae_iopte)))
43 
44 #define ARM_LPAE_GRANULE(d)						\
45 	(sizeof(arm_lpae_iopte) << (d)->bits_per_level)
46 #define ARM_LPAE_PGD_SIZE(d)						\
47 	(sizeof(arm_lpae_iopte) << (d)->pgd_bits)
48 
49 /*
50  * Calculate the index at level l used to map virtual address a using the
51  * pagetable in d.
52  */
53 #define ARM_LPAE_PGD_IDX(l,d)						\
54 	((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0)
55 
56 #define ARM_LPAE_LVL_IDX(a,l,d)						\
57 	(((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) &			\
58 	 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
59 
60 /* Calculate the block/page mapping size at level l for pagetable in d. */
61 #define ARM_LPAE_BLOCK_SIZE(l,d)	(1ULL << ARM_LPAE_LVL_SHIFT(l,d))
62 
63 /* Page table bits */
64 #define ARM_LPAE_PTE_TYPE_SHIFT		0
65 #define ARM_LPAE_PTE_TYPE_MASK		0x3
66 
67 #define ARM_LPAE_PTE_TYPE_BLOCK		1
68 #define ARM_LPAE_PTE_TYPE_TABLE		3
69 #define ARM_LPAE_PTE_TYPE_PAGE		3
70 
71 #define ARM_LPAE_PTE_ADDR_MASK		GENMASK_ULL(47,12)
72 
73 #define ARM_LPAE_PTE_NSTABLE		(((arm_lpae_iopte)1) << 63)
74 #define ARM_LPAE_PTE_XN			(((arm_lpae_iopte)3) << 53)
75 #define ARM_LPAE_PTE_AF			(((arm_lpae_iopte)1) << 10)
76 #define ARM_LPAE_PTE_SH_NS		(((arm_lpae_iopte)0) << 8)
77 #define ARM_LPAE_PTE_SH_OS		(((arm_lpae_iopte)2) << 8)
78 #define ARM_LPAE_PTE_SH_IS		(((arm_lpae_iopte)3) << 8)
79 #define ARM_LPAE_PTE_NS			(((arm_lpae_iopte)1) << 5)
80 #define ARM_LPAE_PTE_VALID		(((arm_lpae_iopte)1) << 0)
81 
82 #define ARM_LPAE_PTE_ATTR_LO_MASK	(((arm_lpae_iopte)0x3ff) << 2)
83 /* Ignore the contiguous bit for block splitting */
84 #define ARM_LPAE_PTE_ATTR_HI_MASK	(((arm_lpae_iopte)6) << 52)
85 #define ARM_LPAE_PTE_ATTR_MASK		(ARM_LPAE_PTE_ATTR_LO_MASK |	\
86 					 ARM_LPAE_PTE_ATTR_HI_MASK)
87 /* Software bit for solving coherency races */
88 #define ARM_LPAE_PTE_SW_SYNC		(((arm_lpae_iopte)1) << 55)
89 
90 /* Stage-1 PTE */
91 #define ARM_LPAE_PTE_AP_UNPRIV		(((arm_lpae_iopte)1) << 6)
92 #define ARM_LPAE_PTE_AP_RDONLY		(((arm_lpae_iopte)2) << 6)
93 #define ARM_LPAE_PTE_ATTRINDX_SHIFT	2
94 #define ARM_LPAE_PTE_nG			(((arm_lpae_iopte)1) << 11)
95 
96 /* Stage-2 PTE */
97 #define ARM_LPAE_PTE_HAP_FAULT		(((arm_lpae_iopte)0) << 6)
98 #define ARM_LPAE_PTE_HAP_READ		(((arm_lpae_iopte)1) << 6)
99 #define ARM_LPAE_PTE_HAP_WRITE		(((arm_lpae_iopte)2) << 6)
100 #define ARM_LPAE_PTE_MEMATTR_OIWB	(((arm_lpae_iopte)0xf) << 2)
101 #define ARM_LPAE_PTE_MEMATTR_NC		(((arm_lpae_iopte)0x5) << 2)
102 #define ARM_LPAE_PTE_MEMATTR_DEV	(((arm_lpae_iopte)0x1) << 2)
103 
104 /* Register bits */
105 #define ARM_LPAE_VTCR_SL0_MASK		0x3
106 
107 #define ARM_LPAE_TCR_T0SZ_SHIFT		0
108 
109 #define ARM_LPAE_VTCR_PS_SHIFT		16
110 #define ARM_LPAE_VTCR_PS_MASK		0x7
111 
112 #define ARM_LPAE_MAIR_ATTR_SHIFT(n)	((n) << 3)
113 #define ARM_LPAE_MAIR_ATTR_MASK		0xff
114 #define ARM_LPAE_MAIR_ATTR_DEVICE	0x04
115 #define ARM_LPAE_MAIR_ATTR_NC		0x44
116 #define ARM_LPAE_MAIR_ATTR_INC_OWBRWA	0xf4
117 #define ARM_LPAE_MAIR_ATTR_WBRWA	0xff
118 #define ARM_LPAE_MAIR_ATTR_IDX_NC	0
119 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE	1
120 #define ARM_LPAE_MAIR_ATTR_IDX_DEV	2
121 #define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE	3
122 
123 #define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0)
124 #define ARM_MALI_LPAE_TTBR_READ_INNER	BIT(2)
125 #define ARM_MALI_LPAE_TTBR_SHARE_OUTER	BIT(4)
126 
127 #define ARM_MALI_LPAE_MEMATTR_IMP_DEF	0x88ULL
128 #define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
129 
130 /* IOPTE accessors */
131 #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
132 
133 #define iopte_type(pte,l)					\
134 	(((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
135 
136 #define iopte_prot(pte)	((pte) & ARM_LPAE_PTE_ATTR_MASK)
137 
138 struct arm_lpae_io_pgtable {
139 	struct io_pgtable	iop;
140 
141 	int			pgd_bits;
142 	int			start_level;
143 	int			bits_per_level;
144 
145 	void			*pgd;
146 };
147 
148 typedef u64 arm_lpae_iopte;
149 
iopte_leaf(arm_lpae_iopte pte,int lvl,enum io_pgtable_fmt fmt)150 static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
151 			      enum io_pgtable_fmt fmt)
152 {
153 	if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE)
154 		return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_PAGE;
155 
156 	return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_BLOCK;
157 }
158 
paddr_to_iopte(phys_addr_t paddr,struct arm_lpae_io_pgtable * data)159 static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
160 				     struct arm_lpae_io_pgtable *data)
161 {
162 	arm_lpae_iopte pte = paddr;
163 
164 	/* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */
165 	return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK;
166 }
167 
iopte_to_paddr(arm_lpae_iopte pte,struct arm_lpae_io_pgtable * data)168 static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
169 				  struct arm_lpae_io_pgtable *data)
170 {
171 	u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
172 
173 	if (ARM_LPAE_GRANULE(data) < SZ_64K)
174 		return paddr;
175 
176 	/* Rotate the packed high-order bits back to the top */
177 	return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
178 }
179 
180 static bool selftest_running = false;
181 
__arm_lpae_dma_addr(void * pages)182 static dma_addr_t __arm_lpae_dma_addr(void *pages)
183 {
184 	return (dma_addr_t)virt_to_phys(pages);
185 }
186 
__arm_lpae_alloc_pages(size_t size,gfp_t gfp,struct io_pgtable_cfg * cfg)187 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
188 				    struct io_pgtable_cfg *cfg)
189 {
190 	struct device *dev = cfg->iommu_dev;
191 	int order = get_order(size);
192 	struct page *p;
193 	dma_addr_t dma;
194 	void *pages;
195 
196 	VM_BUG_ON((gfp & __GFP_HIGHMEM));
197 	p = alloc_pages_node(dev ? dev_to_node(dev) : NUMA_NO_NODE,
198 			     gfp | __GFP_ZERO, order);
199 	if (!p)
200 		return NULL;
201 
202 	pages = page_address(p);
203 	if (!cfg->coherent_walk) {
204 		dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
205 		if (dma_mapping_error(dev, dma))
206 			goto out_free;
207 		/*
208 		 * We depend on the IOMMU being able to work with any physical
209 		 * address directly, so if the DMA layer suggests otherwise by
210 		 * translating or truncating them, that bodes very badly...
211 		 */
212 		if (dma != virt_to_phys(pages))
213 			goto out_unmap;
214 	}
215 
216 	return pages;
217 
218 out_unmap:
219 	dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
220 	dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
221 out_free:
222 	__free_pages(p, order);
223 	return NULL;
224 }
225 
__arm_lpae_free_pages(void * pages,size_t size,struct io_pgtable_cfg * cfg)226 static void __arm_lpae_free_pages(void *pages, size_t size,
227 				  struct io_pgtable_cfg *cfg)
228 {
229 	if (!cfg->coherent_walk)
230 		dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
231 				 size, DMA_TO_DEVICE);
232 	free_pages((unsigned long)pages, get_order(size));
233 }
234 
__arm_lpae_sync_pte(arm_lpae_iopte * ptep,struct io_pgtable_cfg * cfg)235 static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep,
236 				struct io_pgtable_cfg *cfg)
237 {
238 	dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep),
239 				   sizeof(*ptep), DMA_TO_DEVICE);
240 }
241 
__arm_lpae_set_pte(arm_lpae_iopte * ptep,arm_lpae_iopte pte,struct io_pgtable_cfg * cfg)242 static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
243 			       struct io_pgtable_cfg *cfg)
244 {
245 	*ptep = pte;
246 
247 	if (!cfg->coherent_walk)
248 		__arm_lpae_sync_pte(ptep, cfg);
249 }
250 
251 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
252 			       struct iommu_iotlb_gather *gather,
253 			       unsigned long iova, size_t size, int lvl,
254 			       arm_lpae_iopte *ptep);
255 
__arm_lpae_init_pte(struct arm_lpae_io_pgtable * data,phys_addr_t paddr,arm_lpae_iopte prot,int lvl,arm_lpae_iopte * ptep)256 static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
257 				phys_addr_t paddr, arm_lpae_iopte prot,
258 				int lvl, arm_lpae_iopte *ptep)
259 {
260 	arm_lpae_iopte pte = prot;
261 
262 	if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1)
263 		pte |= ARM_LPAE_PTE_TYPE_PAGE;
264 	else
265 		pte |= ARM_LPAE_PTE_TYPE_BLOCK;
266 
267 	pte |= paddr_to_iopte(paddr, data);
268 
269 	__arm_lpae_set_pte(ptep, pte, &data->iop.cfg);
270 }
271 
arm_lpae_init_pte(struct arm_lpae_io_pgtable * data,unsigned long iova,phys_addr_t paddr,arm_lpae_iopte prot,int lvl,arm_lpae_iopte * ptep)272 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
273 			     unsigned long iova, phys_addr_t paddr,
274 			     arm_lpae_iopte prot, int lvl,
275 			     arm_lpae_iopte *ptep)
276 {
277 	arm_lpae_iopte pte = *ptep;
278 
279 	if (iopte_leaf(pte, lvl, data->iop.fmt)) {
280 		/* We require an unmap first */
281 		WARN_ON(!selftest_running);
282 		return -EEXIST;
283 	} else if (iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
284 		/*
285 		 * We need to unmap and free the old table before
286 		 * overwriting it with a block entry.
287 		 */
288 		arm_lpae_iopte *tblp;
289 		size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
290 
291 		tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
292 		if (__arm_lpae_unmap(data, NULL, iova, sz, lvl, tblp) != sz) {
293 			WARN_ON(1);
294 			return -EINVAL;
295 		}
296 	}
297 
298 	__arm_lpae_init_pte(data, paddr, prot, lvl, ptep);
299 	return 0;
300 }
301 
arm_lpae_install_table(arm_lpae_iopte * table,arm_lpae_iopte * ptep,arm_lpae_iopte curr,struct arm_lpae_io_pgtable * data)302 static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
303 					     arm_lpae_iopte *ptep,
304 					     arm_lpae_iopte curr,
305 					     struct arm_lpae_io_pgtable *data)
306 {
307 	arm_lpae_iopte old, new;
308 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
309 
310 	new = paddr_to_iopte(__pa(table), data) | ARM_LPAE_PTE_TYPE_TABLE;
311 	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
312 		new |= ARM_LPAE_PTE_NSTABLE;
313 
314 	/*
315 	 * Ensure the table itself is visible before its PTE can be.
316 	 * Whilst we could get away with cmpxchg64_release below, this
317 	 * doesn't have any ordering semantics when !CONFIG_SMP.
318 	 */
319 	dma_wmb();
320 
321 	old = cmpxchg64_relaxed(ptep, curr, new);
322 
323 	if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC))
324 		return old;
325 
326 	/* Even if it's not ours, there's no point waiting; just kick it */
327 	__arm_lpae_sync_pte(ptep, cfg);
328 	if (old == curr)
329 		WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC);
330 
331 	return old;
332 }
333 
__arm_lpae_map(struct arm_lpae_io_pgtable * data,unsigned long iova,phys_addr_t paddr,size_t size,arm_lpae_iopte prot,int lvl,arm_lpae_iopte * ptep,gfp_t gfp)334 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
335 			  phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
336 			  int lvl, arm_lpae_iopte *ptep, gfp_t gfp)
337 {
338 	arm_lpae_iopte *cptep, pte;
339 	size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
340 	size_t tblsz = ARM_LPAE_GRANULE(data);
341 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
342 
343 	/* Find our entry at the current level */
344 	ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
345 
346 	/* If we can install a leaf entry at this level, then do so */
347 	if (size == block_size)
348 		return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
349 
350 	/* We can't allocate tables at the final level */
351 	if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
352 		return -EINVAL;
353 
354 	/* Grab a pointer to the next level */
355 	pte = READ_ONCE(*ptep);
356 	if (!pte) {
357 		cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg);
358 		if (!cptep)
359 			return -ENOMEM;
360 
361 		pte = arm_lpae_install_table(cptep, ptep, 0, data);
362 		if (pte)
363 			__arm_lpae_free_pages(cptep, tblsz, cfg);
364 	} else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
365 		__arm_lpae_sync_pte(ptep, cfg);
366 	}
367 
368 	if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) {
369 		cptep = iopte_deref(pte, data);
370 	} else if (pte) {
371 		/* We require an unmap first */
372 		WARN_ON(!selftest_running);
373 		return -EEXIST;
374 	}
375 
376 	/* Rinse, repeat */
377 	return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep, gfp);
378 }
379 
arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable * data,int prot)380 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
381 					   int prot)
382 {
383 	arm_lpae_iopte pte;
384 
385 	if (data->iop.fmt == ARM_64_LPAE_S1 ||
386 	    data->iop.fmt == ARM_32_LPAE_S1) {
387 		pte = ARM_LPAE_PTE_nG;
388 		if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
389 			pte |= ARM_LPAE_PTE_AP_RDONLY;
390 		if (!(prot & IOMMU_PRIV))
391 			pte |= ARM_LPAE_PTE_AP_UNPRIV;
392 	} else {
393 		pte = ARM_LPAE_PTE_HAP_FAULT;
394 		if (prot & IOMMU_READ)
395 			pte |= ARM_LPAE_PTE_HAP_READ;
396 		if (prot & IOMMU_WRITE)
397 			pte |= ARM_LPAE_PTE_HAP_WRITE;
398 	}
399 
400 	/*
401 	 * Note that this logic is structured to accommodate Mali LPAE
402 	 * having stage-1-like attributes but stage-2-like permissions.
403 	 */
404 	if (data->iop.fmt == ARM_64_LPAE_S2 ||
405 	    data->iop.fmt == ARM_32_LPAE_S2) {
406 		if (prot & IOMMU_MMIO)
407 			pte |= ARM_LPAE_PTE_MEMATTR_DEV;
408 		else if (prot & IOMMU_CACHE)
409 			pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
410 		else
411 			pte |= ARM_LPAE_PTE_MEMATTR_NC;
412 	} else {
413 		if (prot & IOMMU_MMIO)
414 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
415 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
416 		else if (prot & IOMMU_CACHE)
417 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
418 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
419 	}
420 
421 	/*
422 	 * Also Mali has its own notions of shareability wherein its Inner
423 	 * domain covers the cores within the GPU, and its Outer domain is
424 	 * "outside the GPU" (i.e. either the Inner or System domain in CPU
425 	 * terms, depending on coherency).
426 	 */
427 	if (prot & IOMMU_CACHE && data->iop.fmt != ARM_MALI_LPAE)
428 		pte |= ARM_LPAE_PTE_SH_IS;
429 	else
430 		pte |= ARM_LPAE_PTE_SH_OS;
431 
432 	if (prot & IOMMU_NOEXEC)
433 		pte |= ARM_LPAE_PTE_XN;
434 
435 	if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
436 		pte |= ARM_LPAE_PTE_NS;
437 
438 	if (data->iop.fmt != ARM_MALI_LPAE)
439 		pte |= ARM_LPAE_PTE_AF;
440 
441 	return pte;
442 }
443 
arm_lpae_map(struct io_pgtable_ops * ops,unsigned long iova,phys_addr_t paddr,size_t size,int iommu_prot,gfp_t gfp)444 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
445 			phys_addr_t paddr, size_t size, int iommu_prot, gfp_t gfp)
446 {
447 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
448 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
449 	arm_lpae_iopte *ptep = data->pgd;
450 	int ret, lvl = data->start_level;
451 	arm_lpae_iopte prot;
452 	long iaext = (s64)iova >> cfg->ias;
453 
454 	/* If no access, then nothing to do */
455 	if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
456 		return 0;
457 
458 	if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
459 		return -EINVAL;
460 
461 	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
462 		iaext = ~iaext;
463 	if (WARN_ON(iaext || paddr >> cfg->oas))
464 		return -ERANGE;
465 
466 	prot = arm_lpae_prot_to_pte(data, iommu_prot);
467 	ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep, gfp);
468 	/*
469 	 * Synchronise all PTE updates for the new mapping before there's
470 	 * a chance for anything to kick off a table walk for the new iova.
471 	 */
472 	wmb();
473 
474 	return ret;
475 }
476 
__arm_lpae_free_pgtable(struct arm_lpae_io_pgtable * data,int lvl,arm_lpae_iopte * ptep)477 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
478 				    arm_lpae_iopte *ptep)
479 {
480 	arm_lpae_iopte *start, *end;
481 	unsigned long table_size;
482 
483 	if (lvl == data->start_level)
484 		table_size = ARM_LPAE_PGD_SIZE(data);
485 	else
486 		table_size = ARM_LPAE_GRANULE(data);
487 
488 	start = ptep;
489 
490 	/* Only leaf entries at the last level */
491 	if (lvl == ARM_LPAE_MAX_LEVELS - 1)
492 		end = ptep;
493 	else
494 		end = (void *)ptep + table_size;
495 
496 	while (ptep != end) {
497 		arm_lpae_iopte pte = *ptep++;
498 
499 		if (!pte || iopte_leaf(pte, lvl, data->iop.fmt))
500 			continue;
501 
502 		__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
503 	}
504 
505 	__arm_lpae_free_pages(start, table_size, &data->iop.cfg);
506 }
507 
arm_lpae_free_pgtable(struct io_pgtable * iop)508 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
509 {
510 	struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
511 
512 	__arm_lpae_free_pgtable(data, data->start_level, data->pgd);
513 	kfree(data);
514 }
515 
arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable * data,struct iommu_iotlb_gather * gather,unsigned long iova,size_t size,arm_lpae_iopte blk_pte,int lvl,arm_lpae_iopte * ptep)516 static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
517 				       struct iommu_iotlb_gather *gather,
518 				       unsigned long iova, size_t size,
519 				       arm_lpae_iopte blk_pte, int lvl,
520 				       arm_lpae_iopte *ptep)
521 {
522 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
523 	arm_lpae_iopte pte, *tablep;
524 	phys_addr_t blk_paddr;
525 	size_t tablesz = ARM_LPAE_GRANULE(data);
526 	size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
527 	int i, unmap_idx = -1;
528 
529 	if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
530 		return 0;
531 
532 	tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg);
533 	if (!tablep)
534 		return 0; /* Bytes unmapped */
535 
536 	if (size == split_sz)
537 		unmap_idx = ARM_LPAE_LVL_IDX(iova, lvl, data);
538 
539 	blk_paddr = iopte_to_paddr(blk_pte, data);
540 	pte = iopte_prot(blk_pte);
541 
542 	for (i = 0; i < tablesz / sizeof(pte); i++, blk_paddr += split_sz) {
543 		/* Unmap! */
544 		if (i == unmap_idx)
545 			continue;
546 
547 		__arm_lpae_init_pte(data, blk_paddr, pte, lvl, &tablep[i]);
548 	}
549 
550 	pte = arm_lpae_install_table(tablep, ptep, blk_pte, data);
551 	if (pte != blk_pte) {
552 		__arm_lpae_free_pages(tablep, tablesz, cfg);
553 		/*
554 		 * We may race against someone unmapping another part of this
555 		 * block, but anything else is invalid. We can't misinterpret
556 		 * a page entry here since we're never at the last level.
557 		 */
558 		if (iopte_type(pte, lvl - 1) != ARM_LPAE_PTE_TYPE_TABLE)
559 			return 0;
560 
561 		tablep = iopte_deref(pte, data);
562 	} else if (unmap_idx >= 0) {
563 		io_pgtable_tlb_add_page(&data->iop, gather, iova, size);
564 		return size;
565 	}
566 
567 	return __arm_lpae_unmap(data, gather, iova, size, lvl, tablep);
568 }
569 
__arm_lpae_unmap(struct arm_lpae_io_pgtable * data,struct iommu_iotlb_gather * gather,unsigned long iova,size_t size,int lvl,arm_lpae_iopte * ptep)570 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
571 			       struct iommu_iotlb_gather *gather,
572 			       unsigned long iova, size_t size, int lvl,
573 			       arm_lpae_iopte *ptep)
574 {
575 	arm_lpae_iopte pte;
576 	struct io_pgtable *iop = &data->iop;
577 
578 	/* Something went horribly wrong and we ran out of page table */
579 	if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
580 		return 0;
581 
582 	ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
583 	pte = READ_ONCE(*ptep);
584 	if (WARN_ON(!pte))
585 		return 0;
586 
587 	/* If the size matches this level, we're in the right place */
588 	if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
589 		__arm_lpae_set_pte(ptep, 0, &iop->cfg);
590 
591 		if (!iopte_leaf(pte, lvl, iop->fmt)) {
592 			/* Also flush any partial walks */
593 			io_pgtable_tlb_flush_walk(iop, iova, size,
594 						  ARM_LPAE_GRANULE(data));
595 			ptep = iopte_deref(pte, data);
596 			__arm_lpae_free_pgtable(data, lvl + 1, ptep);
597 		} else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
598 			/*
599 			 * Order the PTE update against queueing the IOVA, to
600 			 * guarantee that a flush callback from a different CPU
601 			 * has observed it before the TLBIALL can be issued.
602 			 */
603 			smp_wmb();
604 		} else {
605 			io_pgtable_tlb_add_page(iop, gather, iova, size);
606 		}
607 
608 		return size;
609 	} else if (iopte_leaf(pte, lvl, iop->fmt)) {
610 		/*
611 		 * Insert a table at the next level to map the old region,
612 		 * minus the part we want to unmap
613 		 */
614 		return arm_lpae_split_blk_unmap(data, gather, iova, size, pte,
615 						lvl + 1, ptep);
616 	}
617 
618 	/* Keep on walkin' */
619 	ptep = iopte_deref(pte, data);
620 	return __arm_lpae_unmap(data, gather, iova, size, lvl + 1, ptep);
621 }
622 
arm_lpae_unmap(struct io_pgtable_ops * ops,unsigned long iova,size_t size,struct iommu_iotlb_gather * gather)623 static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
624 			     size_t size, struct iommu_iotlb_gather *gather)
625 {
626 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
627 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
628 	arm_lpae_iopte *ptep = data->pgd;
629 	long iaext = (s64)iova >> cfg->ias;
630 
631 	if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
632 		return 0;
633 
634 	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
635 		iaext = ~iaext;
636 	if (WARN_ON(iaext))
637 		return 0;
638 
639 	return __arm_lpae_unmap(data, gather, iova, size, data->start_level, ptep);
640 }
641 
arm_lpae_iova_to_phys(struct io_pgtable_ops * ops,unsigned long iova)642 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
643 					 unsigned long iova)
644 {
645 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
646 	arm_lpae_iopte pte, *ptep = data->pgd;
647 	int lvl = data->start_level;
648 
649 	do {
650 		/* Valid IOPTE pointer? */
651 		if (!ptep)
652 			return 0;
653 
654 		/* Grab the IOPTE we're interested in */
655 		ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
656 		pte = READ_ONCE(*ptep);
657 
658 		/* Valid entry? */
659 		if (!pte)
660 			return 0;
661 
662 		/* Leaf entry? */
663 		if (iopte_leaf(pte, lvl, data->iop.fmt))
664 			goto found_translation;
665 
666 		/* Take it to the next level */
667 		ptep = iopte_deref(pte, data);
668 	} while (++lvl < ARM_LPAE_MAX_LEVELS);
669 
670 	/* Ran out of page tables to walk */
671 	return 0;
672 
673 found_translation:
674 	iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
675 	return iopte_to_paddr(pte, data) | iova;
676 }
677 
arm_lpae_restrict_pgsizes(struct io_pgtable_cfg * cfg)678 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
679 {
680 	unsigned long granule, page_sizes;
681 	unsigned int max_addr_bits = 48;
682 
683 	/*
684 	 * We need to restrict the supported page sizes to match the
685 	 * translation regime for a particular granule. Aim to match
686 	 * the CPU page size if possible, otherwise prefer smaller sizes.
687 	 * While we're at it, restrict the block sizes to match the
688 	 * chosen granule.
689 	 */
690 	if (cfg->pgsize_bitmap & PAGE_SIZE)
691 		granule = PAGE_SIZE;
692 	else if (cfg->pgsize_bitmap & ~PAGE_MASK)
693 		granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
694 	else if (cfg->pgsize_bitmap & PAGE_MASK)
695 		granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
696 	else
697 		granule = 0;
698 
699 	switch (granule) {
700 	case SZ_4K:
701 		page_sizes = (SZ_4K | SZ_2M | SZ_1G);
702 		break;
703 	case SZ_16K:
704 		page_sizes = (SZ_16K | SZ_32M);
705 		break;
706 	case SZ_64K:
707 		max_addr_bits = 52;
708 		page_sizes = (SZ_64K | SZ_512M);
709 		if (cfg->oas > 48)
710 			page_sizes |= 1ULL << 42; /* 4TB */
711 		break;
712 	default:
713 		page_sizes = 0;
714 	}
715 
716 	cfg->pgsize_bitmap &= page_sizes;
717 	cfg->ias = min(cfg->ias, max_addr_bits);
718 	cfg->oas = min(cfg->oas, max_addr_bits);
719 }
720 
721 static struct arm_lpae_io_pgtable *
arm_lpae_alloc_pgtable(struct io_pgtable_cfg * cfg)722 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
723 {
724 	struct arm_lpae_io_pgtable *data;
725 	int levels, va_bits, pg_shift;
726 
727 	arm_lpae_restrict_pgsizes(cfg);
728 
729 	if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
730 		return NULL;
731 
732 	if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
733 		return NULL;
734 
735 	if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
736 		return NULL;
737 
738 	data = kmalloc(sizeof(*data), GFP_KERNEL);
739 	if (!data)
740 		return NULL;
741 
742 	pg_shift = __ffs(cfg->pgsize_bitmap);
743 	data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte));
744 
745 	va_bits = cfg->ias - pg_shift;
746 	levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
747 	data->start_level = ARM_LPAE_MAX_LEVELS - levels;
748 
749 	/* Calculate the actual size of our pgd (without concatenation) */
750 	data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1));
751 
752 	data->iop.ops = (struct io_pgtable_ops) {
753 		.map		= arm_lpae_map,
754 		.unmap		= arm_lpae_unmap,
755 		.iova_to_phys	= arm_lpae_iova_to_phys,
756 	};
757 
758 	return data;
759 }
760 
761 static struct io_pgtable *
arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg * cfg,void * cookie)762 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
763 {
764 	u64 reg;
765 	struct arm_lpae_io_pgtable *data;
766 	typeof(&cfg->arm_lpae_s1_cfg.tcr) tcr = &cfg->arm_lpae_s1_cfg.tcr;
767 	bool tg1;
768 
769 	if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
770 			    IO_PGTABLE_QUIRK_NON_STRICT |
771 			    IO_PGTABLE_QUIRK_ARM_TTBR1))
772 		return NULL;
773 
774 	data = arm_lpae_alloc_pgtable(cfg);
775 	if (!data)
776 		return NULL;
777 
778 	/* TCR */
779 	if (cfg->coherent_walk) {
780 		tcr->sh = ARM_LPAE_TCR_SH_IS;
781 		tcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
782 		tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
783 	} else {
784 		tcr->sh = ARM_LPAE_TCR_SH_OS;
785 		tcr->irgn = ARM_LPAE_TCR_RGN_NC;
786 		tcr->orgn = ARM_LPAE_TCR_RGN_NC;
787 	}
788 
789 	tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1;
790 	switch (ARM_LPAE_GRANULE(data)) {
791 	case SZ_4K:
792 		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_4K : ARM_LPAE_TCR_TG0_4K;
793 		break;
794 	case SZ_16K:
795 		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_16K : ARM_LPAE_TCR_TG0_16K;
796 		break;
797 	case SZ_64K:
798 		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_64K : ARM_LPAE_TCR_TG0_64K;
799 		break;
800 	}
801 
802 	switch (cfg->oas) {
803 	case 32:
804 		tcr->ips = ARM_LPAE_TCR_PS_32_BIT;
805 		break;
806 	case 36:
807 		tcr->ips = ARM_LPAE_TCR_PS_36_BIT;
808 		break;
809 	case 40:
810 		tcr->ips = ARM_LPAE_TCR_PS_40_BIT;
811 		break;
812 	case 42:
813 		tcr->ips = ARM_LPAE_TCR_PS_42_BIT;
814 		break;
815 	case 44:
816 		tcr->ips = ARM_LPAE_TCR_PS_44_BIT;
817 		break;
818 	case 48:
819 		tcr->ips = ARM_LPAE_TCR_PS_48_BIT;
820 		break;
821 	case 52:
822 		tcr->ips = ARM_LPAE_TCR_PS_52_BIT;
823 		break;
824 	default:
825 		goto out_free_data;
826 	}
827 
828 	tcr->tsz = 64ULL - cfg->ias;
829 
830 	/* MAIRs */
831 	reg = (ARM_LPAE_MAIR_ATTR_NC
832 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
833 	      (ARM_LPAE_MAIR_ATTR_WBRWA
834 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
835 	      (ARM_LPAE_MAIR_ATTR_DEVICE
836 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) |
837 	      (ARM_LPAE_MAIR_ATTR_INC_OWBRWA
838 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE));
839 
840 	cfg->arm_lpae_s1_cfg.mair = reg;
841 
842 	/* Looking good; allocate a pgd */
843 	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
844 					   GFP_KERNEL, cfg);
845 	if (!data->pgd)
846 		goto out_free_data;
847 
848 	/* Ensure the empty pgd is visible before any actual TTBR write */
849 	wmb();
850 
851 	/* TTBR */
852 	cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd);
853 	return &data->iop;
854 
855 out_free_data:
856 	kfree(data);
857 	return NULL;
858 }
859 
860 static struct io_pgtable *
arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg * cfg,void * cookie)861 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
862 {
863 	u64 sl;
864 	struct arm_lpae_io_pgtable *data;
865 	typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr;
866 
867 	/* The NS quirk doesn't apply at stage 2 */
868 	if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NON_STRICT))
869 		return NULL;
870 
871 	data = arm_lpae_alloc_pgtable(cfg);
872 	if (!data)
873 		return NULL;
874 
875 	/*
876 	 * Concatenate PGDs at level 1 if possible in order to reduce
877 	 * the depth of the stage-2 walk.
878 	 */
879 	if (data->start_level == 0) {
880 		unsigned long pgd_pages;
881 
882 		pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
883 		if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
884 			data->pgd_bits += data->bits_per_level;
885 			data->start_level++;
886 		}
887 	}
888 
889 	/* VTCR */
890 	if (cfg->coherent_walk) {
891 		vtcr->sh = ARM_LPAE_TCR_SH_IS;
892 		vtcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
893 		vtcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
894 	} else {
895 		vtcr->sh = ARM_LPAE_TCR_SH_OS;
896 		vtcr->irgn = ARM_LPAE_TCR_RGN_NC;
897 		vtcr->orgn = ARM_LPAE_TCR_RGN_NC;
898 	}
899 
900 	sl = data->start_level;
901 
902 	switch (ARM_LPAE_GRANULE(data)) {
903 	case SZ_4K:
904 		vtcr->tg = ARM_LPAE_TCR_TG0_4K;
905 		sl++; /* SL0 format is different for 4K granule size */
906 		break;
907 	case SZ_16K:
908 		vtcr->tg = ARM_LPAE_TCR_TG0_16K;
909 		break;
910 	case SZ_64K:
911 		vtcr->tg = ARM_LPAE_TCR_TG0_64K;
912 		break;
913 	}
914 
915 	switch (cfg->oas) {
916 	case 32:
917 		vtcr->ps = ARM_LPAE_TCR_PS_32_BIT;
918 		break;
919 	case 36:
920 		vtcr->ps = ARM_LPAE_TCR_PS_36_BIT;
921 		break;
922 	case 40:
923 		vtcr->ps = ARM_LPAE_TCR_PS_40_BIT;
924 		break;
925 	case 42:
926 		vtcr->ps = ARM_LPAE_TCR_PS_42_BIT;
927 		break;
928 	case 44:
929 		vtcr->ps = ARM_LPAE_TCR_PS_44_BIT;
930 		break;
931 	case 48:
932 		vtcr->ps = ARM_LPAE_TCR_PS_48_BIT;
933 		break;
934 	case 52:
935 		vtcr->ps = ARM_LPAE_TCR_PS_52_BIT;
936 		break;
937 	default:
938 		goto out_free_data;
939 	}
940 
941 	vtcr->tsz = 64ULL - cfg->ias;
942 	vtcr->sl = ~sl & ARM_LPAE_VTCR_SL0_MASK;
943 
944 	/* Allocate pgd pages */
945 	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
946 					   GFP_KERNEL, cfg);
947 	if (!data->pgd)
948 		goto out_free_data;
949 
950 	/* Ensure the empty pgd is visible before any actual TTBR write */
951 	wmb();
952 
953 	/* VTTBR */
954 	cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
955 	return &data->iop;
956 
957 out_free_data:
958 	kfree(data);
959 	return NULL;
960 }
961 
962 static struct io_pgtable *
arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg * cfg,void * cookie)963 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
964 {
965 	if (cfg->ias > 32 || cfg->oas > 40)
966 		return NULL;
967 
968 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
969 	return arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
970 }
971 
972 static struct io_pgtable *
arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg * cfg,void * cookie)973 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
974 {
975 	if (cfg->ias > 40 || cfg->oas > 40)
976 		return NULL;
977 
978 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
979 	return arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
980 }
981 
982 static struct io_pgtable *
arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg * cfg,void * cookie)983 arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
984 {
985 	struct arm_lpae_io_pgtable *data;
986 
987 	/* No quirks for Mali (hopefully) */
988 	if (cfg->quirks)
989 		return NULL;
990 
991 	if (cfg->ias > 48 || cfg->oas > 40)
992 		return NULL;
993 
994 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
995 
996 	data = arm_lpae_alloc_pgtable(cfg);
997 	if (!data)
998 		return NULL;
999 
1000 	/* Mali seems to need a full 4-level table regardless of IAS */
1001 	if (data->start_level > 0) {
1002 		data->start_level = 0;
1003 		data->pgd_bits = 0;
1004 	}
1005 	/*
1006 	 * MEMATTR: Mali has no actual notion of a non-cacheable type, so the
1007 	 * best we can do is mimic the out-of-tree driver and hope that the
1008 	 * "implementation-defined caching policy" is good enough. Similarly,
1009 	 * we'll use it for the sake of a valid attribute for our 'device'
1010 	 * index, although callers should never request that in practice.
1011 	 */
1012 	cfg->arm_mali_lpae_cfg.memattr =
1013 		(ARM_MALI_LPAE_MEMATTR_IMP_DEF
1014 		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
1015 		(ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC
1016 		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
1017 		(ARM_MALI_LPAE_MEMATTR_IMP_DEF
1018 		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
1019 
1020 	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
1021 					   cfg);
1022 	if (!data->pgd)
1023 		goto out_free_data;
1024 
1025 	/* Ensure the empty pgd is visible before TRANSTAB can be written */
1026 	wmb();
1027 
1028 	cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
1029 					  ARM_MALI_LPAE_TTBR_READ_INNER |
1030 					  ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
1031 	if (cfg->coherent_walk)
1032 		cfg->arm_mali_lpae_cfg.transtab |= ARM_MALI_LPAE_TTBR_SHARE_OUTER;
1033 
1034 	return &data->iop;
1035 
1036 out_free_data:
1037 	kfree(data);
1038 	return NULL;
1039 }
1040 
1041 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
1042 	.alloc	= arm_64_lpae_alloc_pgtable_s1,
1043 	.free	= arm_lpae_free_pgtable,
1044 };
1045 
1046 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
1047 	.alloc	= arm_64_lpae_alloc_pgtable_s2,
1048 	.free	= arm_lpae_free_pgtable,
1049 };
1050 
1051 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
1052 	.alloc	= arm_32_lpae_alloc_pgtable_s1,
1053 	.free	= arm_lpae_free_pgtable,
1054 };
1055 
1056 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
1057 	.alloc	= arm_32_lpae_alloc_pgtable_s2,
1058 	.free	= arm_lpae_free_pgtable,
1059 };
1060 
1061 struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
1062 	.alloc	= arm_mali_lpae_alloc_pgtable,
1063 	.free	= arm_lpae_free_pgtable,
1064 };
1065 
1066 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
1067 
1068 static struct io_pgtable_cfg *cfg_cookie __initdata;
1069 
dummy_tlb_flush_all(void * cookie)1070 static void __init dummy_tlb_flush_all(void *cookie)
1071 {
1072 	WARN_ON(cookie != cfg_cookie);
1073 }
1074 
dummy_tlb_flush(unsigned long iova,size_t size,size_t granule,void * cookie)1075 static void __init dummy_tlb_flush(unsigned long iova, size_t size,
1076 				   size_t granule, void *cookie)
1077 {
1078 	WARN_ON(cookie != cfg_cookie);
1079 	WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
1080 }
1081 
dummy_tlb_add_page(struct iommu_iotlb_gather * gather,unsigned long iova,size_t granule,void * cookie)1082 static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
1083 				      unsigned long iova, size_t granule,
1084 				      void *cookie)
1085 {
1086 	dummy_tlb_flush(iova, granule, granule, cookie);
1087 }
1088 
1089 static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
1090 	.tlb_flush_all	= dummy_tlb_flush_all,
1091 	.tlb_flush_walk	= dummy_tlb_flush,
1092 	.tlb_flush_leaf	= dummy_tlb_flush,
1093 	.tlb_add_page	= dummy_tlb_add_page,
1094 };
1095 
arm_lpae_dump_ops(struct io_pgtable_ops * ops)1096 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
1097 {
1098 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
1099 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
1100 
1101 	pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1102 		cfg->pgsize_bitmap, cfg->ias);
1103 	pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n",
1104 		ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data),
1105 		ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd);
1106 }
1107 
1108 #define __FAIL(ops, i)	({						\
1109 		WARN(1, "selftest: test failed for fmt idx %d\n", (i));	\
1110 		arm_lpae_dump_ops(ops);					\
1111 		selftest_running = false;				\
1112 		-EFAULT;						\
1113 })
1114 
arm_lpae_run_tests(struct io_pgtable_cfg * cfg)1115 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
1116 {
1117 	static const enum io_pgtable_fmt fmts[] __initconst = {
1118 		ARM_64_LPAE_S1,
1119 		ARM_64_LPAE_S2,
1120 	};
1121 
1122 	int i, j;
1123 	unsigned long iova;
1124 	size_t size;
1125 	struct io_pgtable_ops *ops;
1126 
1127 	selftest_running = true;
1128 
1129 	for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
1130 		cfg_cookie = cfg;
1131 		ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
1132 		if (!ops) {
1133 			pr_err("selftest: failed to allocate io pgtable ops\n");
1134 			return -ENOMEM;
1135 		}
1136 
1137 		/*
1138 		 * Initial sanity checks.
1139 		 * Empty page tables shouldn't provide any translations.
1140 		 */
1141 		if (ops->iova_to_phys(ops, 42))
1142 			return __FAIL(ops, i);
1143 
1144 		if (ops->iova_to_phys(ops, SZ_1G + 42))
1145 			return __FAIL(ops, i);
1146 
1147 		if (ops->iova_to_phys(ops, SZ_2G + 42))
1148 			return __FAIL(ops, i);
1149 
1150 		/*
1151 		 * Distinct mappings of different granule sizes.
1152 		 */
1153 		iova = 0;
1154 		for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1155 			size = 1UL << j;
1156 
1157 			if (ops->map(ops, iova, iova, size, IOMMU_READ |
1158 							    IOMMU_WRITE |
1159 							    IOMMU_NOEXEC |
1160 							    IOMMU_CACHE, GFP_KERNEL))
1161 				return __FAIL(ops, i);
1162 
1163 			/* Overlapping mappings */
1164 			if (!ops->map(ops, iova, iova + size, size,
1165 				      IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL))
1166 				return __FAIL(ops, i);
1167 
1168 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1169 				return __FAIL(ops, i);
1170 
1171 			iova += SZ_1G;
1172 		}
1173 
1174 		/* Partial unmap */
1175 		size = 1UL << __ffs(cfg->pgsize_bitmap);
1176 		if (ops->unmap(ops, SZ_1G + size, size, NULL) != size)
1177 			return __FAIL(ops, i);
1178 
1179 		/* Remap of partial unmap */
1180 		if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ, GFP_KERNEL))
1181 			return __FAIL(ops, i);
1182 
1183 		if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
1184 			return __FAIL(ops, i);
1185 
1186 		/* Full unmap */
1187 		iova = 0;
1188 		for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1189 			size = 1UL << j;
1190 
1191 			if (ops->unmap(ops, iova, size, NULL) != size)
1192 				return __FAIL(ops, i);
1193 
1194 			if (ops->iova_to_phys(ops, iova + 42))
1195 				return __FAIL(ops, i);
1196 
1197 			/* Remap full block */
1198 			if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL))
1199 				return __FAIL(ops, i);
1200 
1201 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1202 				return __FAIL(ops, i);
1203 
1204 			iova += SZ_1G;
1205 		}
1206 
1207 		free_io_pgtable_ops(ops);
1208 	}
1209 
1210 	selftest_running = false;
1211 	return 0;
1212 }
1213 
arm_lpae_do_selftests(void)1214 static int __init arm_lpae_do_selftests(void)
1215 {
1216 	static const unsigned long pgsize[] __initconst = {
1217 		SZ_4K | SZ_2M | SZ_1G,
1218 		SZ_16K | SZ_32M,
1219 		SZ_64K | SZ_512M,
1220 	};
1221 
1222 	static const unsigned int ias[] __initconst = {
1223 		32, 36, 40, 42, 44, 48,
1224 	};
1225 
1226 	int i, j, pass = 0, fail = 0;
1227 	struct io_pgtable_cfg cfg = {
1228 		.tlb = &dummy_tlb_ops,
1229 		.oas = 48,
1230 		.coherent_walk = true,
1231 	};
1232 
1233 	for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1234 		for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1235 			cfg.pgsize_bitmap = pgsize[i];
1236 			cfg.ias = ias[j];
1237 			pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1238 				pgsize[i], ias[j]);
1239 			if (arm_lpae_run_tests(&cfg))
1240 				fail++;
1241 			else
1242 				pass++;
1243 		}
1244 	}
1245 
1246 	pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1247 	return fail ? -EFAULT : 0;
1248 }
1249 subsys_initcall(arm_lpae_do_selftests);
1250 #endif
1251