1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * CPU-agnostic ARM page table allocator.
4 *
5 * Copyright (C) 2014 ARM Limited
6 *
7 * Author: Will Deacon <will.deacon@arm.com>
8 */
9
10 #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
11
12 #include <linux/atomic.h>
13 #include <linux/bitops.h>
14 #include <linux/io-pgtable.h>
15 #include <linux/kernel.h>
16 #include <linux/sizes.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
19 #include <linux/dma-mapping.h>
20
21 #include <asm/barrier.h>
22
23 #include "io-pgtable-arm.h"
24
25 #define ARM_LPAE_MAX_ADDR_BITS 52
26 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
27 #define ARM_LPAE_MAX_LEVELS 4
28
29 /* Struct accessors */
30 #define io_pgtable_to_data(x) \
31 container_of((x), struct arm_lpae_io_pgtable, iop)
32
33 #define io_pgtable_ops_to_data(x) \
34 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
35
36 /*
37 * Calculate the right shift amount to get to the portion describing level l
38 * in a virtual address mapped by the pagetable in d.
39 */
40 #define ARM_LPAE_LVL_SHIFT(l,d) \
41 (((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) + \
42 ilog2(sizeof(arm_lpae_iopte)))
43
44 #define ARM_LPAE_GRANULE(d) \
45 (sizeof(arm_lpae_iopte) << (d)->bits_per_level)
46 #define ARM_LPAE_PGD_SIZE(d) \
47 (sizeof(arm_lpae_iopte) << (d)->pgd_bits)
48
49 #define ARM_LPAE_PTES_PER_TABLE(d) \
50 (ARM_LPAE_GRANULE(d) >> ilog2(sizeof(arm_lpae_iopte)))
51
52 /*
53 * Calculate the index at level l used to map virtual address a using the
54 * pagetable in d.
55 */
56 #define ARM_LPAE_PGD_IDX(l,d) \
57 ((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0)
58
59 #define ARM_LPAE_LVL_IDX(a,l,d) \
60 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
61 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
62
63 /* Calculate the block/page mapping size at level l for pagetable in d. */
64 #define ARM_LPAE_BLOCK_SIZE(l,d) (1ULL << ARM_LPAE_LVL_SHIFT(l,d))
65
66 /* Page table bits */
67 #define ARM_LPAE_PTE_TYPE_SHIFT 0
68 #define ARM_LPAE_PTE_TYPE_MASK 0x3
69
70 #define ARM_LPAE_PTE_TYPE_BLOCK 1
71 #define ARM_LPAE_PTE_TYPE_TABLE 3
72 #define ARM_LPAE_PTE_TYPE_PAGE 3
73
74 #define ARM_LPAE_PTE_ADDR_MASK GENMASK_ULL(47,12)
75
76 #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
77 #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
78 #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
79 #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
80 #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
81 #define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
82 #define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
83 #define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
84
85 #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
86 /* Ignore the contiguous bit for block splitting */
87 #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
88 #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
89 ARM_LPAE_PTE_ATTR_HI_MASK)
90 /* Software bit for solving coherency races */
91 #define ARM_LPAE_PTE_SW_SYNC (((arm_lpae_iopte)1) << 55)
92
93 /* Stage-1 PTE */
94 #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
95 #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
96 #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
97 #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
98
99 /* Stage-2 PTE */
100 #define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
101 #define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
102 #define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
103 #define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
104 #define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
105 #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
106
107 /* Register bits */
108 #define ARM_LPAE_VTCR_SL0_MASK 0x3
109
110 #define ARM_LPAE_TCR_T0SZ_SHIFT 0
111
112 #define ARM_LPAE_VTCR_PS_SHIFT 16
113 #define ARM_LPAE_VTCR_PS_MASK 0x7
114
115 #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
116 #define ARM_LPAE_MAIR_ATTR_MASK 0xff
117 #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
118 #define ARM_LPAE_MAIR_ATTR_NC 0x44
119 #define ARM_LPAE_MAIR_ATTR_INC_OWBRWA 0xf4
120 #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
121 #define ARM_LPAE_MAIR_ATTR_INC_OWBRANWA 0xe4ULL
122 #define ARM_LPAE_MAIR_ATTR_IWBRWA_OWBRANWA 0xefULL
123 #define ARM_LPAE_MAIR_ATTR_IDX_NC 0
124 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
125 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
126 #define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE 3
127 #define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE_NWA 4
128 #define ARM_LPAE_MAIR_ATTR_IDX_ICACHE_OCACHE_NWA 5
129
130 #define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0)
131 #define ARM_MALI_LPAE_TTBR_READ_INNER BIT(2)
132 #define ARM_MALI_LPAE_TTBR_SHARE_OUTER BIT(4)
133
134 #define ARM_MALI_LPAE_MEMATTR_IMP_DEF 0x88ULL
135 #define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
136
137 /* IOPTE accessors */
138 #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
139
140 #define iopte_type(pte) \
141 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
142
143 #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
144
145 struct arm_lpae_io_pgtable {
146 struct io_pgtable iop;
147
148 int pgd_bits;
149 int start_level;
150 int bits_per_level;
151
152 void *pgd;
153 };
154
155 typedef u64 arm_lpae_iopte;
156
iopte_leaf(arm_lpae_iopte pte,int lvl,enum io_pgtable_fmt fmt)157 static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
158 enum io_pgtable_fmt fmt)
159 {
160 if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE)
161 return iopte_type(pte) == ARM_LPAE_PTE_TYPE_PAGE;
162
163 return iopte_type(pte) == ARM_LPAE_PTE_TYPE_BLOCK;
164 }
165
paddr_to_iopte(phys_addr_t paddr,struct arm_lpae_io_pgtable * data)166 static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
167 struct arm_lpae_io_pgtable *data)
168 {
169 arm_lpae_iopte pte = paddr;
170
171 /* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */
172 return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK;
173 }
174
iopte_to_paddr(arm_lpae_iopte pte,struct arm_lpae_io_pgtable * data)175 static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
176 struct arm_lpae_io_pgtable *data)
177 {
178 u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
179
180 if (ARM_LPAE_GRANULE(data) < SZ_64K)
181 return paddr;
182
183 /* Rotate the packed high-order bits back to the top */
184 return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
185 }
186
187 static bool selftest_running = false;
188
__arm_lpae_dma_addr(void * pages)189 static dma_addr_t __arm_lpae_dma_addr(void *pages)
190 {
191 return (dma_addr_t)virt_to_phys(pages);
192 }
193
__arm_lpae_alloc_pages(size_t size,gfp_t gfp,struct io_pgtable_cfg * cfg)194 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
195 struct io_pgtable_cfg *cfg)
196 {
197 struct device *dev = cfg->iommu_dev;
198 int order = get_order(size);
199 struct page *p;
200 dma_addr_t dma;
201 void *pages;
202
203 VM_BUG_ON((gfp & __GFP_HIGHMEM));
204 p = alloc_pages_node(dev_to_node(dev), gfp | __GFP_ZERO, order);
205 if (!p)
206 return NULL;
207
208 pages = page_address(p);
209 if (!cfg->coherent_walk) {
210 dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
211 if (dma_mapping_error(dev, dma))
212 goto out_free;
213 /*
214 * We depend on the IOMMU being able to work with any physical
215 * address directly, so if the DMA layer suggests otherwise by
216 * translating or truncating them, that bodes very badly...
217 */
218 if (dma != virt_to_phys(pages))
219 goto out_unmap;
220 }
221
222 return pages;
223
224 out_unmap:
225 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
226 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
227 out_free:
228 __free_pages(p, order);
229 return NULL;
230 }
231
__arm_lpae_free_pages(void * pages,size_t size,struct io_pgtable_cfg * cfg)232 static void __arm_lpae_free_pages(void *pages, size_t size,
233 struct io_pgtable_cfg *cfg)
234 {
235 if (!cfg->coherent_walk)
236 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
237 size, DMA_TO_DEVICE);
238 free_pages((unsigned long)pages, get_order(size));
239 }
240
__arm_lpae_sync_pte(arm_lpae_iopte * ptep,int num_entries,struct io_pgtable_cfg * cfg)241 static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
242 struct io_pgtable_cfg *cfg)
243 {
244 dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep),
245 sizeof(*ptep) * num_entries, DMA_TO_DEVICE);
246 }
247
__arm_lpae_clear_pte(arm_lpae_iopte * ptep,struct io_pgtable_cfg * cfg)248 static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg)
249 {
250
251 *ptep = 0;
252
253 if (!cfg->coherent_walk)
254 __arm_lpae_sync_pte(ptep, 1, cfg);
255 }
256
257 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
258 struct iommu_iotlb_gather *gather,
259 unsigned long iova, size_t size, size_t pgcount,
260 int lvl, arm_lpae_iopte *ptep);
261
__arm_lpae_init_pte(struct arm_lpae_io_pgtable * data,phys_addr_t paddr,arm_lpae_iopte prot,int lvl,int num_entries,arm_lpae_iopte * ptep)262 static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
263 phys_addr_t paddr, arm_lpae_iopte prot,
264 int lvl, int num_entries, arm_lpae_iopte *ptep)
265 {
266 arm_lpae_iopte pte = prot;
267 struct io_pgtable_cfg *cfg = &data->iop.cfg;
268 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
269 int i;
270
271 if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1)
272 pte |= ARM_LPAE_PTE_TYPE_PAGE;
273 else
274 pte |= ARM_LPAE_PTE_TYPE_BLOCK;
275
276 for (i = 0; i < num_entries; i++)
277 ptep[i] = pte | paddr_to_iopte(paddr + i * sz, data);
278
279 if (!cfg->coherent_walk)
280 __arm_lpae_sync_pte(ptep, num_entries, cfg);
281 }
282
arm_lpae_init_pte(struct arm_lpae_io_pgtable * data,unsigned long iova,phys_addr_t paddr,arm_lpae_iopte prot,int lvl,int num_entries,arm_lpae_iopte * ptep)283 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
284 unsigned long iova, phys_addr_t paddr,
285 arm_lpae_iopte prot, int lvl, int num_entries,
286 arm_lpae_iopte *ptep)
287 {
288 int i;
289
290 for (i = 0; i < num_entries; i++)
291 if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) {
292 /* We require an unmap first */
293 WARN_ON(!selftest_running);
294 return -EEXIST;
295 } else if (iopte_type(ptep[i]) == ARM_LPAE_PTE_TYPE_TABLE) {
296 /*
297 * We need to unmap and free the old table before
298 * overwriting it with a block entry.
299 */
300 arm_lpae_iopte *tblp;
301 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
302
303 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
304 if (__arm_lpae_unmap(data, NULL, iova + i * sz, sz, 1,
305 lvl, tblp) != sz) {
306 WARN_ON(1);
307 return -EINVAL;
308 }
309 }
310
311 __arm_lpae_init_pte(data, paddr, prot, lvl, num_entries, ptep);
312 return 0;
313 }
314
arm_lpae_install_table(arm_lpae_iopte * table,arm_lpae_iopte * ptep,arm_lpae_iopte curr,struct arm_lpae_io_pgtable * data)315 static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
316 arm_lpae_iopte *ptep,
317 arm_lpae_iopte curr,
318 struct arm_lpae_io_pgtable *data)
319 {
320 arm_lpae_iopte old, new;
321 struct io_pgtable_cfg *cfg = &data->iop.cfg;
322
323 new = paddr_to_iopte(__pa(table), data) | ARM_LPAE_PTE_TYPE_TABLE;
324 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
325 new |= ARM_LPAE_PTE_NSTABLE;
326
327 /*
328 * Ensure the table itself is visible before its PTE can be.
329 * Whilst we could get away with cmpxchg64_release below, this
330 * doesn't have any ordering semantics when !CONFIG_SMP.
331 */
332 dma_wmb();
333
334 old = cmpxchg64_relaxed(ptep, curr, new);
335
336 if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC))
337 return old;
338
339 /* Even if it's not ours, there's no point waiting; just kick it */
340 __arm_lpae_sync_pte(ptep, 1, cfg);
341 if (old == curr)
342 WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC);
343
344 return old;
345 }
346
__arm_lpae_map(struct arm_lpae_io_pgtable * data,unsigned long iova,phys_addr_t paddr,size_t size,size_t pgcount,arm_lpae_iopte prot,int lvl,arm_lpae_iopte * ptep,gfp_t gfp,size_t * mapped)347 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
348 phys_addr_t paddr, size_t size, size_t pgcount,
349 arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep,
350 gfp_t gfp, size_t *mapped)
351 {
352 arm_lpae_iopte *cptep, pte;
353 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
354 size_t tblsz = ARM_LPAE_GRANULE(data);
355 struct io_pgtable_cfg *cfg = &data->iop.cfg;
356 int ret = 0, num_entries, max_entries, map_idx_start;
357
358 /* Find our entry at the current level */
359 map_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
360 ptep += map_idx_start;
361
362 /* If we can install a leaf entry at this level, then do so */
363 if (size == block_size) {
364 max_entries = ARM_LPAE_PTES_PER_TABLE(data) - map_idx_start;
365 num_entries = min_t(int, pgcount, max_entries);
366 ret = arm_lpae_init_pte(data, iova, paddr, prot, lvl, num_entries, ptep);
367 if (!ret && mapped)
368 *mapped += num_entries * size;
369
370 return ret;
371 }
372
373 /* We can't allocate tables at the final level */
374 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
375 return -EINVAL;
376
377 /* Grab a pointer to the next level */
378 pte = READ_ONCE(*ptep);
379 if (!pte) {
380 cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg);
381 if (!cptep)
382 return -ENOMEM;
383
384 pte = arm_lpae_install_table(cptep, ptep, 0, data);
385 if (pte)
386 __arm_lpae_free_pages(cptep, tblsz, cfg);
387 } else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
388 __arm_lpae_sync_pte(ptep, 1, cfg);
389 }
390
391 if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) {
392 cptep = iopte_deref(pte, data);
393 } else if (pte) {
394 /* We require an unmap first */
395 WARN_ON(!selftest_running);
396 return -EEXIST;
397 }
398
399 /* Rinse, repeat */
400 return __arm_lpae_map(data, iova, paddr, size, pgcount, prot, lvl + 1,
401 cptep, gfp, mapped);
402 }
403
arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable * data,int prot)404 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
405 int prot)
406 {
407 arm_lpae_iopte pte;
408
409 if (data->iop.fmt == ARM_64_LPAE_S1 ||
410 data->iop.fmt == ARM_32_LPAE_S1) {
411 pte = ARM_LPAE_PTE_nG;
412 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
413 pte |= ARM_LPAE_PTE_AP_RDONLY;
414 if (!(prot & IOMMU_PRIV))
415 pte |= ARM_LPAE_PTE_AP_UNPRIV;
416 } else {
417 pte = ARM_LPAE_PTE_HAP_FAULT;
418 if (prot & IOMMU_READ)
419 pte |= ARM_LPAE_PTE_HAP_READ;
420 if (prot & IOMMU_WRITE)
421 pte |= ARM_LPAE_PTE_HAP_WRITE;
422 }
423
424 /*
425 * Note that this logic is structured to accommodate Mali LPAE
426 * having stage-1-like attributes but stage-2-like permissions.
427 */
428 if (data->iop.fmt == ARM_64_LPAE_S2 ||
429 data->iop.fmt == ARM_32_LPAE_S2) {
430 if (prot & IOMMU_MMIO)
431 pte |= ARM_LPAE_PTE_MEMATTR_DEV;
432 else if (prot & IOMMU_CACHE)
433 pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
434 else
435 pte |= ARM_LPAE_PTE_MEMATTR_NC;
436 } else {
437 if (prot & IOMMU_MMIO)
438 pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
439 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
440 else if ((prot & IOMMU_CACHE) && (prot & IOMMU_SYS_CACHE_NWA))
441 pte |= (ARM_LPAE_MAIR_ATTR_IDX_ICACHE_OCACHE_NWA
442 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
443 /* IOMMU_CACHE + IOMMU_SYS_CACHE equivalent to IOMMU_CACHE */
444 else if (prot & IOMMU_CACHE)
445 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
446 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
447 else if (prot & IOMMU_SYS_CACHE)
448 pte |= (ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE
449 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
450 else if (prot & IOMMU_SYS_CACHE_NWA)
451 pte |= (ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE_NWA
452 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
453 }
454
455 /*
456 * Also Mali has its own notions of shareability wherein its Inner
457 * domain covers the cores within the GPU, and its Outer domain is
458 * "outside the GPU" (i.e. either the Inner or System domain in CPU
459 * terms, depending on coherency).
460 */
461 if (prot & IOMMU_CACHE && data->iop.fmt != ARM_MALI_LPAE)
462 pte |= ARM_LPAE_PTE_SH_IS;
463 else
464 pte |= ARM_LPAE_PTE_SH_OS;
465
466 if (prot & IOMMU_NOEXEC)
467 pte |= ARM_LPAE_PTE_XN;
468
469 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
470 pte |= ARM_LPAE_PTE_NS;
471
472 if (data->iop.fmt != ARM_MALI_LPAE)
473 pte |= ARM_LPAE_PTE_AF;
474
475 return pte;
476 }
477
arm_lpae_map_pages(struct io_pgtable_ops * ops,unsigned long iova,phys_addr_t paddr,size_t pgsize,size_t pgcount,int iommu_prot,gfp_t gfp,size_t * mapped)478 static int arm_lpae_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
479 phys_addr_t paddr, size_t pgsize, size_t pgcount,
480 int iommu_prot, gfp_t gfp, size_t *mapped)
481 {
482 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
483 struct io_pgtable_cfg *cfg = &data->iop.cfg;
484 arm_lpae_iopte *ptep = data->pgd;
485 int ret, lvl = data->start_level;
486 arm_lpae_iopte prot;
487 long iaext = (s64)iova >> cfg->ias;
488
489 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize))
490 return -EINVAL;
491
492 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
493 iaext = ~iaext;
494 if (WARN_ON(iaext || paddr >> cfg->oas))
495 return -ERANGE;
496
497 /* If no access, then nothing to do */
498 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
499 return 0;
500
501 prot = arm_lpae_prot_to_pte(data, iommu_prot);
502 ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl,
503 ptep, gfp, mapped);
504 /*
505 * Synchronise all PTE updates for the new mapping before there's
506 * a chance for anything to kick off a table walk for the new iova.
507 */
508 wmb();
509
510 return ret;
511 }
512
arm_lpae_map(struct io_pgtable_ops * ops,unsigned long iova,phys_addr_t paddr,size_t size,int iommu_prot,gfp_t gfp)513 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
514 phys_addr_t paddr, size_t size, int iommu_prot, gfp_t gfp)
515 {
516 return arm_lpae_map_pages(ops, iova, paddr, size, 1, iommu_prot, gfp,
517 NULL);
518 }
519
__arm_lpae_free_pgtable(struct arm_lpae_io_pgtable * data,int lvl,arm_lpae_iopte * ptep)520 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
521 arm_lpae_iopte *ptep)
522 {
523 arm_lpae_iopte *start, *end;
524 unsigned long table_size;
525
526 if (lvl == data->start_level)
527 table_size = ARM_LPAE_PGD_SIZE(data);
528 else
529 table_size = ARM_LPAE_GRANULE(data);
530
531 start = ptep;
532
533 /* Only leaf entries at the last level */
534 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
535 end = ptep;
536 else
537 end = (void *)ptep + table_size;
538
539 while (ptep != end) {
540 arm_lpae_iopte pte = *ptep++;
541
542 if (!pte || iopte_leaf(pte, lvl, data->iop.fmt))
543 continue;
544
545 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
546 }
547
548 __arm_lpae_free_pages(start, table_size, &data->iop.cfg);
549 }
550
arm_lpae_free_pgtable(struct io_pgtable * iop)551 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
552 {
553 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
554
555 __arm_lpae_free_pgtable(data, data->start_level, data->pgd);
556 kfree(data);
557 }
558
arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable * data,struct iommu_iotlb_gather * gather,unsigned long iova,size_t size,arm_lpae_iopte blk_pte,int lvl,arm_lpae_iopte * ptep,size_t pgcount)559 static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
560 struct iommu_iotlb_gather *gather,
561 unsigned long iova, size_t size,
562 arm_lpae_iopte blk_pte, int lvl,
563 arm_lpae_iopte *ptep, size_t pgcount)
564 {
565 struct io_pgtable_cfg *cfg = &data->iop.cfg;
566 arm_lpae_iopte pte, *tablep;
567 phys_addr_t blk_paddr;
568 size_t tablesz = ARM_LPAE_GRANULE(data);
569 size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
570 int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data);
571 int i, unmap_idx_start = -1, num_entries = 0, max_entries;
572
573 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
574 return 0;
575
576 tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg);
577 if (!tablep)
578 return 0; /* Bytes unmapped */
579
580 if (size == split_sz) {
581 unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
582 max_entries = ptes_per_table - unmap_idx_start;
583 num_entries = min_t(int, pgcount, max_entries);
584 }
585
586 blk_paddr = iopte_to_paddr(blk_pte, data);
587 pte = iopte_prot(blk_pte);
588
589 for (i = 0; i < ptes_per_table; i++, blk_paddr += split_sz) {
590 /* Unmap! */
591 if (i >= unmap_idx_start && i < (unmap_idx_start + num_entries))
592 continue;
593
594 __arm_lpae_init_pte(data, blk_paddr, pte, lvl, 1, &tablep[i]);
595 }
596
597 pte = arm_lpae_install_table(tablep, ptep, blk_pte, data);
598 if (pte != blk_pte) {
599 __arm_lpae_free_pages(tablep, tablesz, cfg);
600 /*
601 * We may race against someone unmapping another part of this
602 * block, but anything else is invalid. We can't misinterpret
603 * a page entry here since we're never at the last level.
604 */
605 if (iopte_type(pte) != ARM_LPAE_PTE_TYPE_TABLE)
606 return 0;
607
608 tablep = iopte_deref(pte, data);
609 } else if (unmap_idx_start >= 0) {
610 for (i = 0; i < num_entries; i++)
611 io_pgtable_tlb_add_page(&data->iop, gather, iova + i * size, size);
612
613 return num_entries * size;
614 }
615
616 return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl, tablep);
617 }
618
__arm_lpae_unmap(struct arm_lpae_io_pgtable * data,struct iommu_iotlb_gather * gather,unsigned long iova,size_t size,size_t pgcount,int lvl,arm_lpae_iopte * ptep)619 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
620 struct iommu_iotlb_gather *gather,
621 unsigned long iova, size_t size, size_t pgcount,
622 int lvl, arm_lpae_iopte *ptep)
623 {
624 arm_lpae_iopte pte;
625 struct io_pgtable *iop = &data->iop;
626 int i = 0, num_entries, max_entries, unmap_idx_start;
627
628 /* Something went horribly wrong and we ran out of page table */
629 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
630 return 0;
631
632 unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
633 ptep += unmap_idx_start;
634 pte = READ_ONCE(*ptep);
635 if (WARN_ON(!pte))
636 return 0;
637
638 /* If the size matches this level, we're in the right place */
639 if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
640 max_entries = ARM_LPAE_PTES_PER_TABLE(data) - unmap_idx_start;
641 num_entries = min_t(int, pgcount, max_entries);
642
643 while (i < num_entries) {
644 pte = READ_ONCE(*ptep);
645 if (WARN_ON(!pte))
646 break;
647
648 __arm_lpae_clear_pte(ptep, &iop->cfg);
649
650 if (!iopte_leaf(pte, lvl, iop->fmt)) {
651 /* Also flush any partial walks */
652 io_pgtable_tlb_flush_walk(iop, iova + i * size, size,
653 ARM_LPAE_GRANULE(data));
654 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
655 } else if (!iommu_iotlb_gather_queued(gather)) {
656 io_pgtable_tlb_add_page(iop, gather, iova + i * size, size);
657 }
658
659 ptep++;
660 i++;
661 }
662
663 return i * size;
664 } else if (iopte_leaf(pte, lvl, iop->fmt)) {
665 /*
666 * Insert a table at the next level to map the old region,
667 * minus the part we want to unmap
668 */
669 return arm_lpae_split_blk_unmap(data, gather, iova, size, pte,
670 lvl + 1, ptep, pgcount);
671 }
672
673 /* Keep on walkin' */
674 ptep = iopte_deref(pte, data);
675 return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl + 1, ptep);
676 }
677
arm_lpae_unmap_pages(struct io_pgtable_ops * ops,unsigned long iova,size_t pgsize,size_t pgcount,struct iommu_iotlb_gather * gather)678 static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova,
679 size_t pgsize, size_t pgcount,
680 struct iommu_iotlb_gather *gather)
681 {
682 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
683 struct io_pgtable_cfg *cfg = &data->iop.cfg;
684 arm_lpae_iopte *ptep = data->pgd;
685 long iaext = (s64)iova >> cfg->ias;
686
687 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount))
688 return 0;
689
690 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
691 iaext = ~iaext;
692 if (WARN_ON(iaext))
693 return 0;
694
695 return __arm_lpae_unmap(data, gather, iova, pgsize, pgcount,
696 data->start_level, ptep);
697 }
698
arm_lpae_unmap(struct io_pgtable_ops * ops,unsigned long iova,size_t size,struct iommu_iotlb_gather * gather)699 static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
700 size_t size, struct iommu_iotlb_gather *gather)
701 {
702 return arm_lpae_unmap_pages(ops, iova, size, 1, gather);
703 }
704
arm_lpae_iova_to_phys(struct io_pgtable_ops * ops,unsigned long iova)705 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
706 unsigned long iova)
707 {
708 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
709 arm_lpae_iopte pte, *ptep = data->pgd;
710 int lvl = data->start_level;
711
712 do {
713 /* Valid IOPTE pointer? */
714 if (!ptep)
715 return 0;
716
717 /* Grab the IOPTE we're interested in */
718 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
719 pte = READ_ONCE(*ptep);
720
721 /* Valid entry? */
722 if (!pte)
723 return 0;
724
725 /* Leaf entry? */
726 if (iopte_leaf(pte, lvl, data->iop.fmt))
727 goto found_translation;
728
729 /* Take it to the next level */
730 ptep = iopte_deref(pte, data);
731 } while (++lvl < ARM_LPAE_MAX_LEVELS);
732
733 /* Ran out of page tables to walk */
734 return 0;
735
736 found_translation:
737 iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
738 return iopte_to_paddr(pte, data) | iova;
739 }
740
arm_lpae_restrict_pgsizes(struct io_pgtable_cfg * cfg)741 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
742 {
743 unsigned long granule, page_sizes;
744 unsigned int max_addr_bits = 48;
745
746 /*
747 * We need to restrict the supported page sizes to match the
748 * translation regime for a particular granule. Aim to match
749 * the CPU page size if possible, otherwise prefer smaller sizes.
750 * While we're at it, restrict the block sizes to match the
751 * chosen granule.
752 */
753 if (cfg->pgsize_bitmap & PAGE_SIZE)
754 granule = PAGE_SIZE;
755 else if (cfg->pgsize_bitmap & ~PAGE_MASK)
756 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
757 else if (cfg->pgsize_bitmap & PAGE_MASK)
758 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
759 else
760 granule = 0;
761
762 switch (granule) {
763 case SZ_4K:
764 page_sizes = (SZ_4K | SZ_2M | SZ_1G);
765 break;
766 case SZ_16K:
767 page_sizes = (SZ_16K | SZ_32M);
768 break;
769 case SZ_64K:
770 max_addr_bits = 52;
771 page_sizes = (SZ_64K | SZ_512M);
772 if (cfg->oas > 48)
773 page_sizes |= 1ULL << 42; /* 4TB */
774 break;
775 default:
776 page_sizes = 0;
777 }
778
779 cfg->pgsize_bitmap &= page_sizes;
780 cfg->ias = min(cfg->ias, max_addr_bits);
781 cfg->oas = min(cfg->oas, max_addr_bits);
782 }
783
784 static struct arm_lpae_io_pgtable *
arm_lpae_alloc_pgtable(struct io_pgtable_cfg * cfg)785 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
786 {
787 struct arm_lpae_io_pgtable *data;
788 int levels, va_bits, pg_shift;
789
790 arm_lpae_restrict_pgsizes(cfg);
791
792 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
793 return NULL;
794
795 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
796 return NULL;
797
798 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
799 return NULL;
800
801 data = kmalloc(sizeof(*data), GFP_KERNEL);
802 if (!data)
803 return NULL;
804
805 pg_shift = __ffs(cfg->pgsize_bitmap);
806 data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte));
807
808 va_bits = cfg->ias - pg_shift;
809 levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
810 data->start_level = ARM_LPAE_MAX_LEVELS - levels;
811
812 /* Calculate the actual size of our pgd (without concatenation) */
813 data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1));
814
815 data->iop.ops = (struct io_pgtable_ops) {
816 .map = arm_lpae_map,
817 .map_pages = arm_lpae_map_pages,
818 .unmap = arm_lpae_unmap,
819 .unmap_pages = arm_lpae_unmap_pages,
820 .iova_to_phys = arm_lpae_iova_to_phys,
821 };
822
823 return data;
824 }
825
826 static struct io_pgtable *
arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg * cfg,void * cookie)827 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
828 {
829 u64 reg;
830 struct arm_lpae_io_pgtable *data;
831 typeof(&cfg->arm_lpae_s1_cfg.tcr) tcr = &cfg->arm_lpae_s1_cfg.tcr;
832 bool tg1;
833
834 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
835 IO_PGTABLE_QUIRK_ARM_TTBR1 |
836 IO_PGTABLE_QUIRK_ARM_OUTER_WBWA))
837 return NULL;
838
839 data = arm_lpae_alloc_pgtable(cfg);
840 if (!data)
841 return NULL;
842
843 /* TCR */
844 if (cfg->coherent_walk) {
845 tcr->sh = ARM_LPAE_TCR_SH_IS;
846 tcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
847 tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
848 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA)
849 goto out_free_data;
850 } else {
851 tcr->sh = ARM_LPAE_TCR_SH_OS;
852 tcr->irgn = ARM_LPAE_TCR_RGN_NC;
853 if (!(cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA))
854 tcr->orgn = ARM_LPAE_TCR_RGN_NC;
855 else
856 tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
857 }
858
859 tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1;
860 switch (ARM_LPAE_GRANULE(data)) {
861 case SZ_4K:
862 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_4K : ARM_LPAE_TCR_TG0_4K;
863 break;
864 case SZ_16K:
865 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_16K : ARM_LPAE_TCR_TG0_16K;
866 break;
867 case SZ_64K:
868 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_64K : ARM_LPAE_TCR_TG0_64K;
869 break;
870 }
871
872 switch (cfg->oas) {
873 case 32:
874 tcr->ips = ARM_LPAE_TCR_PS_32_BIT;
875 break;
876 case 36:
877 tcr->ips = ARM_LPAE_TCR_PS_36_BIT;
878 break;
879 case 40:
880 tcr->ips = ARM_LPAE_TCR_PS_40_BIT;
881 break;
882 case 42:
883 tcr->ips = ARM_LPAE_TCR_PS_42_BIT;
884 break;
885 case 44:
886 tcr->ips = ARM_LPAE_TCR_PS_44_BIT;
887 break;
888 case 48:
889 tcr->ips = ARM_LPAE_TCR_PS_48_BIT;
890 break;
891 case 52:
892 tcr->ips = ARM_LPAE_TCR_PS_52_BIT;
893 break;
894 default:
895 goto out_free_data;
896 }
897
898 tcr->tsz = 64ULL - cfg->ias;
899
900 /* MAIRs */
901 reg = (ARM_LPAE_MAIR_ATTR_NC
902 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
903 (ARM_LPAE_MAIR_ATTR_WBRWA
904 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
905 (ARM_LPAE_MAIR_ATTR_DEVICE
906 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) |
907 (ARM_LPAE_MAIR_ATTR_INC_OWBRWA
908 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE)) |
909 (ARM_LPAE_MAIR_ATTR_INC_OWBRANWA
910 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE_NWA)) |
911 (ARM_LPAE_MAIR_ATTR_IWBRWA_OWBRANWA
912 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_ICACHE_OCACHE_NWA));
913
914 cfg->arm_lpae_s1_cfg.mair = reg;
915
916 /* Looking good; allocate a pgd */
917 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
918 GFP_KERNEL, cfg);
919 if (!data->pgd)
920 goto out_free_data;
921
922 /* Ensure the empty pgd is visible before any actual TTBR write */
923 wmb();
924
925 /* TTBR */
926 cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd);
927 return &data->iop;
928
929 out_free_data:
930 kfree(data);
931 return NULL;
932 }
933
934 static struct io_pgtable *
arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg * cfg,void * cookie)935 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
936 {
937 u64 sl;
938 struct arm_lpae_io_pgtable *data;
939 typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr;
940
941 /* The NS quirk doesn't apply at stage 2 */
942 if (cfg->quirks)
943 return NULL;
944
945 data = arm_lpae_alloc_pgtable(cfg);
946 if (!data)
947 return NULL;
948
949 /*
950 * Concatenate PGDs at level 1 if possible in order to reduce
951 * the depth of the stage-2 walk.
952 */
953 if (data->start_level == 0) {
954 unsigned long pgd_pages;
955
956 pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
957 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
958 data->pgd_bits += data->bits_per_level;
959 data->start_level++;
960 }
961 }
962
963 /* VTCR */
964 if (cfg->coherent_walk) {
965 vtcr->sh = ARM_LPAE_TCR_SH_IS;
966 vtcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
967 vtcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
968 } else {
969 vtcr->sh = ARM_LPAE_TCR_SH_OS;
970 vtcr->irgn = ARM_LPAE_TCR_RGN_NC;
971 vtcr->orgn = ARM_LPAE_TCR_RGN_NC;
972 }
973
974 sl = data->start_level;
975
976 switch (ARM_LPAE_GRANULE(data)) {
977 case SZ_4K:
978 vtcr->tg = ARM_LPAE_TCR_TG0_4K;
979 sl++; /* SL0 format is different for 4K granule size */
980 break;
981 case SZ_16K:
982 vtcr->tg = ARM_LPAE_TCR_TG0_16K;
983 break;
984 case SZ_64K:
985 vtcr->tg = ARM_LPAE_TCR_TG0_64K;
986 break;
987 }
988
989 switch (cfg->oas) {
990 case 32:
991 vtcr->ps = ARM_LPAE_TCR_PS_32_BIT;
992 break;
993 case 36:
994 vtcr->ps = ARM_LPAE_TCR_PS_36_BIT;
995 break;
996 case 40:
997 vtcr->ps = ARM_LPAE_TCR_PS_40_BIT;
998 break;
999 case 42:
1000 vtcr->ps = ARM_LPAE_TCR_PS_42_BIT;
1001 break;
1002 case 44:
1003 vtcr->ps = ARM_LPAE_TCR_PS_44_BIT;
1004 break;
1005 case 48:
1006 vtcr->ps = ARM_LPAE_TCR_PS_48_BIT;
1007 break;
1008 case 52:
1009 vtcr->ps = ARM_LPAE_TCR_PS_52_BIT;
1010 break;
1011 default:
1012 goto out_free_data;
1013 }
1014
1015 vtcr->tsz = 64ULL - cfg->ias;
1016 vtcr->sl = ~sl & ARM_LPAE_VTCR_SL0_MASK;
1017
1018 /* Allocate pgd pages */
1019 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
1020 GFP_KERNEL, cfg);
1021 if (!data->pgd)
1022 goto out_free_data;
1023
1024 /* Ensure the empty pgd is visible before any actual TTBR write */
1025 wmb();
1026
1027 /* VTTBR */
1028 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
1029 return &data->iop;
1030
1031 out_free_data:
1032 kfree(data);
1033 return NULL;
1034 }
1035
1036 static struct io_pgtable *
arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg * cfg,void * cookie)1037 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
1038 {
1039 if (cfg->ias > 32 || cfg->oas > 40)
1040 return NULL;
1041
1042 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1043 return arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
1044 }
1045
1046 static struct io_pgtable *
arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg * cfg,void * cookie)1047 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
1048 {
1049 if (cfg->ias > 40 || cfg->oas > 40)
1050 return NULL;
1051
1052 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1053 return arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
1054 }
1055
1056 static struct io_pgtable *
arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg * cfg,void * cookie)1057 arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
1058 {
1059 struct arm_lpae_io_pgtable *data;
1060
1061 /* No quirks for Mali (hopefully) */
1062 if (cfg->quirks)
1063 return NULL;
1064
1065 if (cfg->ias > 48 || cfg->oas > 40)
1066 return NULL;
1067
1068 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1069
1070 data = arm_lpae_alloc_pgtable(cfg);
1071 if (!data)
1072 return NULL;
1073
1074 /* Mali seems to need a full 4-level table regardless of IAS */
1075 if (data->start_level > 0) {
1076 data->start_level = 0;
1077 data->pgd_bits = 0;
1078 }
1079 /*
1080 * MEMATTR: Mali has no actual notion of a non-cacheable type, so the
1081 * best we can do is mimic the out-of-tree driver and hope that the
1082 * "implementation-defined caching policy" is good enough. Similarly,
1083 * we'll use it for the sake of a valid attribute for our 'device'
1084 * index, although callers should never request that in practice.
1085 */
1086 cfg->arm_mali_lpae_cfg.memattr =
1087 (ARM_MALI_LPAE_MEMATTR_IMP_DEF
1088 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
1089 (ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC
1090 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
1091 (ARM_MALI_LPAE_MEMATTR_IMP_DEF
1092 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
1093
1094 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
1095 cfg);
1096 if (!data->pgd)
1097 goto out_free_data;
1098
1099 /* Ensure the empty pgd is visible before TRANSTAB can be written */
1100 wmb();
1101
1102 cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
1103 ARM_MALI_LPAE_TTBR_READ_INNER |
1104 ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
1105 if (cfg->coherent_walk)
1106 cfg->arm_mali_lpae_cfg.transtab |= ARM_MALI_LPAE_TTBR_SHARE_OUTER;
1107
1108 return &data->iop;
1109
1110 out_free_data:
1111 kfree(data);
1112 return NULL;
1113 }
1114
1115 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
1116 .alloc = arm_64_lpae_alloc_pgtable_s1,
1117 .free = arm_lpae_free_pgtable,
1118 };
1119
1120 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
1121 .alloc = arm_64_lpae_alloc_pgtable_s2,
1122 .free = arm_lpae_free_pgtable,
1123 };
1124
1125 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
1126 .alloc = arm_32_lpae_alloc_pgtable_s1,
1127 .free = arm_lpae_free_pgtable,
1128 };
1129
1130 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
1131 .alloc = arm_32_lpae_alloc_pgtable_s2,
1132 .free = arm_lpae_free_pgtable,
1133 };
1134
1135 struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
1136 .alloc = arm_mali_lpae_alloc_pgtable,
1137 .free = arm_lpae_free_pgtable,
1138 };
1139
1140 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
1141
1142 static struct io_pgtable_cfg *cfg_cookie __initdata;
1143
dummy_tlb_flush_all(void * cookie)1144 static void __init dummy_tlb_flush_all(void *cookie)
1145 {
1146 WARN_ON(cookie != cfg_cookie);
1147 }
1148
dummy_tlb_flush(unsigned long iova,size_t size,size_t granule,void * cookie)1149 static void __init dummy_tlb_flush(unsigned long iova, size_t size,
1150 size_t granule, void *cookie)
1151 {
1152 WARN_ON(cookie != cfg_cookie);
1153 WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
1154 }
1155
dummy_tlb_add_page(struct iommu_iotlb_gather * gather,unsigned long iova,size_t granule,void * cookie)1156 static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
1157 unsigned long iova, size_t granule,
1158 void *cookie)
1159 {
1160 dummy_tlb_flush(iova, granule, granule, cookie);
1161 }
1162
1163 static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
1164 .tlb_flush_all = dummy_tlb_flush_all,
1165 .tlb_flush_walk = dummy_tlb_flush,
1166 .tlb_add_page = dummy_tlb_add_page,
1167 };
1168
arm_lpae_dump_ops(struct io_pgtable_ops * ops)1169 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
1170 {
1171 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
1172 struct io_pgtable_cfg *cfg = &data->iop.cfg;
1173
1174 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1175 cfg->pgsize_bitmap, cfg->ias);
1176 pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n",
1177 ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data),
1178 ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd);
1179 }
1180
1181 #define __FAIL(ops, i) ({ \
1182 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
1183 arm_lpae_dump_ops(ops); \
1184 selftest_running = false; \
1185 -EFAULT; \
1186 })
1187
arm_lpae_run_tests(struct io_pgtable_cfg * cfg)1188 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
1189 {
1190 static const enum io_pgtable_fmt fmts[] __initconst = {
1191 ARM_64_LPAE_S1,
1192 ARM_64_LPAE_S2,
1193 };
1194
1195 int i, j;
1196 unsigned long iova;
1197 size_t size;
1198 struct io_pgtable_ops *ops;
1199
1200 selftest_running = true;
1201
1202 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
1203 cfg_cookie = cfg;
1204 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
1205 if (!ops) {
1206 pr_err("selftest: failed to allocate io pgtable ops\n");
1207 return -ENOMEM;
1208 }
1209
1210 /*
1211 * Initial sanity checks.
1212 * Empty page tables shouldn't provide any translations.
1213 */
1214 if (ops->iova_to_phys(ops, 42))
1215 return __FAIL(ops, i);
1216
1217 if (ops->iova_to_phys(ops, SZ_1G + 42))
1218 return __FAIL(ops, i);
1219
1220 if (ops->iova_to_phys(ops, SZ_2G + 42))
1221 return __FAIL(ops, i);
1222
1223 /*
1224 * Distinct mappings of different granule sizes.
1225 */
1226 iova = 0;
1227 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1228 size = 1UL << j;
1229
1230 if (ops->map(ops, iova, iova, size, IOMMU_READ |
1231 IOMMU_WRITE |
1232 IOMMU_NOEXEC |
1233 IOMMU_CACHE, GFP_KERNEL))
1234 return __FAIL(ops, i);
1235
1236 /* Overlapping mappings */
1237 if (!ops->map(ops, iova, iova + size, size,
1238 IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL))
1239 return __FAIL(ops, i);
1240
1241 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1242 return __FAIL(ops, i);
1243
1244 iova += SZ_1G;
1245 }
1246
1247 /* Partial unmap */
1248 size = 1UL << __ffs(cfg->pgsize_bitmap);
1249 if (ops->unmap(ops, SZ_1G + size, size, NULL) != size)
1250 return __FAIL(ops, i);
1251
1252 /* Remap of partial unmap */
1253 if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ, GFP_KERNEL))
1254 return __FAIL(ops, i);
1255
1256 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
1257 return __FAIL(ops, i);
1258
1259 /* Full unmap */
1260 iova = 0;
1261 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1262 size = 1UL << j;
1263
1264 if (ops->unmap(ops, iova, size, NULL) != size)
1265 return __FAIL(ops, i);
1266
1267 if (ops->iova_to_phys(ops, iova + 42))
1268 return __FAIL(ops, i);
1269
1270 /* Remap full block */
1271 if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL))
1272 return __FAIL(ops, i);
1273
1274 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1275 return __FAIL(ops, i);
1276
1277 iova += SZ_1G;
1278 }
1279
1280 free_io_pgtable_ops(ops);
1281 }
1282
1283 selftest_running = false;
1284 return 0;
1285 }
1286
arm_lpae_do_selftests(void)1287 static int __init arm_lpae_do_selftests(void)
1288 {
1289 static const unsigned long pgsize[] __initconst = {
1290 SZ_4K | SZ_2M | SZ_1G,
1291 SZ_16K | SZ_32M,
1292 SZ_64K | SZ_512M,
1293 };
1294
1295 static const unsigned int ias[] __initconst = {
1296 32, 36, 40, 42, 44, 48,
1297 };
1298
1299 int i, j, pass = 0, fail = 0;
1300 struct device dev;
1301 struct io_pgtable_cfg cfg = {
1302 .tlb = &dummy_tlb_ops,
1303 .oas = 48,
1304 .coherent_walk = true,
1305 .iommu_dev = &dev,
1306 };
1307
1308 /* __arm_lpae_alloc_pages() merely needs dev_to_node() to work */
1309 set_dev_node(&dev, NUMA_NO_NODE);
1310
1311 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1312 for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1313 cfg.pgsize_bitmap = pgsize[i];
1314 cfg.ias = ias[j];
1315 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1316 pgsize[i], ias[j]);
1317 if (arm_lpae_run_tests(&cfg))
1318 fail++;
1319 else
1320 pass++;
1321 }
1322 }
1323
1324 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1325 return fail ? -EFAULT : 0;
1326 }
1327 subsys_initcall(arm_lpae_do_selftests);
1328 #endif
1329