1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * CPU-agnostic ARM page table allocator.
4 *
5 * Copyright (C) 2014 ARM Limited
6 *
7 * Author: Will Deacon <will.deacon@arm.com>
8 */
9
10 #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
11
12 #include <linux/atomic.h>
13 #include <linux/bitops.h>
14 #include <linux/io-pgtable.h>
15 #include <linux/kernel.h>
16 #include <linux/sizes.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
19 #include <linux/dma-mapping.h>
20
21 #include <asm/barrier.h>
22
23 #define ARM_LPAE_MAX_ADDR_BITS 52
24 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
25 #define ARM_LPAE_MAX_LEVELS 4
26
27 /* Struct accessors */
28 #define io_pgtable_to_data(x) \
29 container_of((x), struct arm_lpae_io_pgtable, iop)
30
31 #define io_pgtable_ops_to_data(x) \
32 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
33
34 /*
35 * For consistency with the architecture, we always consider
36 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
37 */
38 #define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
39
40 /*
41 * Calculate the right shift amount to get to the portion describing level l
42 * in a virtual address mapped by the pagetable in d.
43 */
44 #define ARM_LPAE_LVL_SHIFT(l,d) \
45 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
46 * (d)->bits_per_level) + (d)->pg_shift)
47
48 #define ARM_LPAE_GRANULE(d) (1UL << (d)->pg_shift)
49
50 #define ARM_LPAE_PAGES_PER_PGD(d) \
51 DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d))
52
53 /*
54 * Calculate the index at level l used to map virtual address a using the
55 * pagetable in d.
56 */
57 #define ARM_LPAE_PGD_IDX(l,d) \
58 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
59
60 #define ARM_LPAE_LVL_IDX(a,l,d) \
61 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
62 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
63
64 /* Calculate the block/page mapping size at level l for pagetable in d. */
65 #define ARM_LPAE_BLOCK_SIZE(l,d) \
66 (1ULL << (ilog2(sizeof(arm_lpae_iopte)) + \
67 ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
68
69 /* Page table bits */
70 #define ARM_LPAE_PTE_TYPE_SHIFT 0
71 #define ARM_LPAE_PTE_TYPE_MASK 0x3
72
73 #define ARM_LPAE_PTE_TYPE_BLOCK 1
74 #define ARM_LPAE_PTE_TYPE_TABLE 3
75 #define ARM_LPAE_PTE_TYPE_PAGE 3
76
77 #define ARM_LPAE_PTE_ADDR_MASK GENMASK_ULL(47,12)
78
79 #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
80 #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
81 #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
82 #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
83 #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
84 #define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
85 #define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
86 #define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
87
88 #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
89 /* Ignore the contiguous bit for block splitting */
90 #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
91 #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
92 ARM_LPAE_PTE_ATTR_HI_MASK)
93 /* Software bit for solving coherency races */
94 #define ARM_LPAE_PTE_SW_SYNC (((arm_lpae_iopte)1) << 55)
95
96 /* Stage-1 PTE */
97 #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
98 #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
99 #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
100 #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
101
102 /* Stage-2 PTE */
103 #define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
104 #define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
105 #define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
106 #define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
107 #define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
108 #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
109
110 /* Register bits */
111 #define ARM_32_LPAE_TCR_EAE (1 << 31)
112 #define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
113
114 #define ARM_LPAE_TCR_EPD1 (1 << 23)
115
116 #define ARM_LPAE_TCR_TG0_4K (0 << 14)
117 #define ARM_LPAE_TCR_TG0_64K (1 << 14)
118 #define ARM_LPAE_TCR_TG0_16K (2 << 14)
119
120 #define ARM_LPAE_TCR_SH0_SHIFT 12
121 #define ARM_LPAE_TCR_SH0_MASK 0x3
122 #define ARM_LPAE_TCR_SH_NS 0
123 #define ARM_LPAE_TCR_SH_OS 2
124 #define ARM_LPAE_TCR_SH_IS 3
125
126 #define ARM_LPAE_TCR_ORGN0_SHIFT 10
127 #define ARM_LPAE_TCR_IRGN0_SHIFT 8
128 #define ARM_LPAE_TCR_RGN_MASK 0x3
129 #define ARM_LPAE_TCR_RGN_NC 0
130 #define ARM_LPAE_TCR_RGN_WBWA 1
131 #define ARM_LPAE_TCR_RGN_WT 2
132 #define ARM_LPAE_TCR_RGN_WB 3
133
134 #define ARM_LPAE_TCR_SL0_SHIFT 6
135 #define ARM_LPAE_TCR_SL0_MASK 0x3
136
137 #define ARM_LPAE_TCR_T0SZ_SHIFT 0
138 #define ARM_LPAE_TCR_SZ_MASK 0xf
139
140 #define ARM_LPAE_TCR_PS_SHIFT 16
141 #define ARM_LPAE_TCR_PS_MASK 0x7
142
143 #define ARM_LPAE_TCR_IPS_SHIFT 32
144 #define ARM_LPAE_TCR_IPS_MASK 0x7
145
146 #define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
147 #define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
148 #define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
149 #define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
150 #define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
151 #define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
152 #define ARM_LPAE_TCR_PS_52_BIT 0x6ULL
153
154 #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
155 #define ARM_LPAE_MAIR_ATTR_MASK 0xff
156 #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
157 #define ARM_LPAE_MAIR_ATTR_NC 0x44
158 #define ARM_LPAE_MAIR_ATTR_INC_OWBRWA 0xf4
159 #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
160 #define ARM_LPAE_MAIR_ATTR_IDX_NC 0
161 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
162 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
163 #define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE 3
164
165 #define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0)
166 #define ARM_MALI_LPAE_TTBR_READ_INNER BIT(2)
167 #define ARM_MALI_LPAE_TTBR_SHARE_OUTER BIT(4)
168
169 #define ARM_MALI_LPAE_MEMATTR_IMP_DEF 0x88ULL
170 #define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
171
172 /* IOPTE accessors */
173 #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
174
175 #define iopte_type(pte,l) \
176 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
177
178 #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
179
180 struct arm_lpae_io_pgtable {
181 struct io_pgtable iop;
182
183 int levels;
184 size_t pgd_size;
185 unsigned long pg_shift;
186 unsigned long bits_per_level;
187
188 void *pgd;
189 };
190
191 typedef u64 arm_lpae_iopte;
192
iopte_leaf(arm_lpae_iopte pte,int lvl,enum io_pgtable_fmt fmt)193 static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
194 enum io_pgtable_fmt fmt)
195 {
196 if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE)
197 return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_PAGE;
198
199 return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_BLOCK;
200 }
201
paddr_to_iopte(phys_addr_t paddr,struct arm_lpae_io_pgtable * data)202 static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
203 struct arm_lpae_io_pgtable *data)
204 {
205 arm_lpae_iopte pte = paddr;
206
207 /* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */
208 return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK;
209 }
210
iopte_to_paddr(arm_lpae_iopte pte,struct arm_lpae_io_pgtable * data)211 static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
212 struct arm_lpae_io_pgtable *data)
213 {
214 u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
215
216 if (data->pg_shift < 16)
217 return paddr;
218
219 /* Rotate the packed high-order bits back to the top */
220 return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
221 }
222
223 static bool selftest_running = false;
224
__arm_lpae_dma_addr(void * pages)225 static dma_addr_t __arm_lpae_dma_addr(void *pages)
226 {
227 return (dma_addr_t)virt_to_phys(pages);
228 }
229
__arm_lpae_alloc_pages(size_t size,gfp_t gfp,struct io_pgtable_cfg * cfg,void * cookie)230 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
231 struct io_pgtable_cfg *cfg, void *cookie)
232 {
233 struct device *dev = cfg->iommu_dev;
234 int order = get_order(size);
235 dma_addr_t dma;
236 void *pages;
237
238 VM_BUG_ON((gfp & __GFP_HIGHMEM));
239 pages = io_pgtable_alloc_pages(cfg, cookie, order, gfp | __GFP_ZERO);
240 if (!pages)
241 return NULL;
242
243 if (!cfg->coherent_walk) {
244 dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
245 if (dma_mapping_error(dev, dma))
246 goto out_free;
247 /*
248 * We depend on the IOMMU being able to work with any physical
249 * address directly, so if the DMA layer suggests otherwise by
250 * translating or truncating them, that bodes very badly...
251 */
252 if (dma != virt_to_phys(pages))
253 goto out_unmap;
254 }
255
256 return pages;
257
258 out_unmap:
259 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
260 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
261 out_free:
262 io_pgtable_free_pages(cfg, cookie, pages, order);
263 return NULL;
264 }
265
__arm_lpae_free_pages(void * pages,size_t size,struct io_pgtable_cfg * cfg,void * cookie)266 static void __arm_lpae_free_pages(void *pages, size_t size,
267 struct io_pgtable_cfg *cfg, void *cookie)
268 {
269 if (!cfg->coherent_walk)
270 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
271 size, DMA_TO_DEVICE);
272 io_pgtable_free_pages(cfg, cookie, pages, get_order(size));
273 }
274
__arm_lpae_sync_pte(arm_lpae_iopte * ptep,struct io_pgtable_cfg * cfg)275 static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep,
276 struct io_pgtable_cfg *cfg)
277 {
278 dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep),
279 sizeof(*ptep), DMA_TO_DEVICE);
280 }
281
__arm_lpae_set_pte(arm_lpae_iopte * ptep,arm_lpae_iopte pte,struct io_pgtable_cfg * cfg)282 static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
283 struct io_pgtable_cfg *cfg)
284 {
285 *ptep = pte;
286
287 if (!cfg->coherent_walk)
288 __arm_lpae_sync_pte(ptep, cfg);
289 }
290
291 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
292 struct iommu_iotlb_gather *gather,
293 unsigned long iova, size_t size, int lvl,
294 arm_lpae_iopte *ptep);
295
__arm_lpae_init_pte(struct arm_lpae_io_pgtable * data,phys_addr_t paddr,arm_lpae_iopte prot,int lvl,arm_lpae_iopte * ptep)296 static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
297 phys_addr_t paddr, arm_lpae_iopte prot,
298 int lvl, arm_lpae_iopte *ptep)
299 {
300 arm_lpae_iopte pte = prot;
301
302 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
303 pte |= ARM_LPAE_PTE_NS;
304
305 if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1)
306 pte |= ARM_LPAE_PTE_TYPE_PAGE;
307 else
308 pte |= ARM_LPAE_PTE_TYPE_BLOCK;
309
310 if (data->iop.fmt != ARM_MALI_LPAE)
311 pte |= ARM_LPAE_PTE_AF;
312 pte |= ARM_LPAE_PTE_SH_IS;
313 pte |= paddr_to_iopte(paddr, data);
314
315 __arm_lpae_set_pte(ptep, pte, &data->iop.cfg);
316 }
317
arm_lpae_init_pte(struct arm_lpae_io_pgtable * data,unsigned long iova,phys_addr_t paddr,arm_lpae_iopte prot,int lvl,arm_lpae_iopte * ptep)318 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
319 unsigned long iova, phys_addr_t paddr,
320 arm_lpae_iopte prot, int lvl,
321 arm_lpae_iopte *ptep)
322 {
323 arm_lpae_iopte pte = *ptep;
324
325 if (iopte_leaf(pte, lvl, data->iop.fmt)) {
326 /* We require an unmap first */
327 WARN_ON(!selftest_running);
328 return -EEXIST;
329 } else if (iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
330 /*
331 * We need to unmap and free the old table before
332 * overwriting it with a block entry.
333 */
334 arm_lpae_iopte *tblp;
335 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
336
337 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
338 if (__arm_lpae_unmap(data, NULL, iova, sz, lvl, tblp) != sz) {
339 WARN_ON(1);
340 return -EINVAL;
341 }
342 }
343
344 __arm_lpae_init_pte(data, paddr, prot, lvl, ptep);
345 return 0;
346 }
347
arm_lpae_install_table(arm_lpae_iopte * table,arm_lpae_iopte * ptep,arm_lpae_iopte curr,struct arm_lpae_io_pgtable * data)348 static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
349 arm_lpae_iopte *ptep,
350 arm_lpae_iopte curr,
351 struct arm_lpae_io_pgtable *data)
352 {
353 arm_lpae_iopte old, new;
354 struct io_pgtable_cfg *cfg = &data->iop.cfg;
355
356 new = paddr_to_iopte(__pa(table), data) | ARM_LPAE_PTE_TYPE_TABLE;
357 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
358 new |= ARM_LPAE_PTE_NSTABLE;
359
360 /*
361 * Ensure the table itself is visible before its PTE can be.
362 * Whilst we could get away with cmpxchg64_release below, this
363 * doesn't have any ordering semantics when !CONFIG_SMP.
364 */
365 dma_wmb();
366
367 old = cmpxchg64_relaxed(ptep, curr, new);
368
369 if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC))
370 return old;
371
372 /* Even if it's not ours, there's no point waiting; just kick it */
373 __arm_lpae_sync_pte(ptep, cfg);
374 if (old == curr)
375 WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC);
376
377 return old;
378 }
379
__arm_lpae_map(struct arm_lpae_io_pgtable * data,unsigned long iova,phys_addr_t paddr,size_t size,arm_lpae_iopte prot,int lvl,arm_lpae_iopte * ptep)380 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
381 phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
382 int lvl, arm_lpae_iopte *ptep)
383 {
384 arm_lpae_iopte *cptep, pte;
385 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
386 size_t tblsz = ARM_LPAE_GRANULE(data);
387 struct io_pgtable_cfg *cfg = &data->iop.cfg;
388 void *cookie = data->iop.cookie;
389
390 /* Find our entry at the current level */
391 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
392
393 /* If we can install a leaf entry at this level, then do so */
394 if (size == block_size && (size & cfg->pgsize_bitmap))
395 return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
396
397 /* We can't allocate tables at the final level */
398 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
399 return -EINVAL;
400
401 /* Grab a pointer to the next level */
402 pte = READ_ONCE(*ptep);
403 if (!pte) {
404 cptep = __arm_lpae_alloc_pages(tblsz, GFP_ATOMIC, cfg, cookie);
405 if (!cptep)
406 return -ENOMEM;
407
408 pte = arm_lpae_install_table(cptep, ptep, 0, data);
409 if (pte)
410 __arm_lpae_free_pages(cptep, tblsz, cfg, cookie);
411 } else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
412 __arm_lpae_sync_pte(ptep, cfg);
413 }
414
415 if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) {
416 cptep = iopte_deref(pte, data);
417 } else if (pte) {
418 /* We require an unmap first */
419 WARN_ON(!selftest_running);
420 return -EEXIST;
421 }
422
423 /* Rinse, repeat */
424 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
425 }
426
arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable * data,int prot)427 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
428 int prot)
429 {
430 arm_lpae_iopte pte;
431
432 if (data->iop.fmt == ARM_64_LPAE_S1 ||
433 data->iop.fmt == ARM_32_LPAE_S1) {
434 pte = ARM_LPAE_PTE_nG;
435 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
436 pte |= ARM_LPAE_PTE_AP_RDONLY;
437 if (!(prot & IOMMU_PRIV))
438 pte |= ARM_LPAE_PTE_AP_UNPRIV;
439 } else {
440 pte = ARM_LPAE_PTE_HAP_FAULT;
441 if (prot & IOMMU_READ)
442 pte |= ARM_LPAE_PTE_HAP_READ;
443 if (prot & IOMMU_WRITE)
444 pte |= ARM_LPAE_PTE_HAP_WRITE;
445 }
446
447 /*
448 * Note that this logic is structured to accommodate Mali LPAE
449 * having stage-1-like attributes but stage-2-like permissions.
450 */
451 if (data->iop.fmt == ARM_64_LPAE_S2 ||
452 data->iop.fmt == ARM_32_LPAE_S2) {
453 if (prot & IOMMU_MMIO)
454 pte |= ARM_LPAE_PTE_MEMATTR_DEV;
455 else if (prot & IOMMU_CACHE)
456 pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
457 else
458 pte |= ARM_LPAE_PTE_MEMATTR_NC;
459 } else {
460 if (prot & IOMMU_MMIO)
461 pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
462 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
463 else if (prot & IOMMU_CACHE)
464 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
465 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
466 else if (prot & IOMMU_QCOM_SYS_CACHE)
467 pte |= (ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE
468 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
469 }
470
471 if (prot & IOMMU_NOEXEC)
472 pte |= ARM_LPAE_PTE_XN;
473
474 return pte;
475 }
476
arm_lpae_map(struct io_pgtable_ops * ops,unsigned long iova,phys_addr_t paddr,size_t size,int iommu_prot)477 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
478 phys_addr_t paddr, size_t size, int iommu_prot)
479 {
480 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
481 arm_lpae_iopte *ptep = data->pgd;
482 int ret, lvl = ARM_LPAE_START_LVL(data);
483 arm_lpae_iopte prot;
484
485 /* If no access, then nothing to do */
486 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
487 return 0;
488
489 if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) ||
490 paddr >= (1ULL << data->iop.cfg.oas)))
491 return -ERANGE;
492
493 prot = arm_lpae_prot_to_pte(data, iommu_prot);
494 ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
495 /*
496 * Synchronise all PTE updates for the new mapping before there's
497 * a chance for anything to kick off a table walk for the new iova.
498 */
499 wmb();
500
501 return ret;
502 }
503
__arm_lpae_free_pgtable(struct arm_lpae_io_pgtable * data,int lvl,arm_lpae_iopte * ptep)504 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
505 arm_lpae_iopte *ptep)
506 {
507 arm_lpae_iopte *start, *end;
508 unsigned long table_size;
509 void *cookie = data->iop.cookie;
510
511 if (lvl == ARM_LPAE_START_LVL(data))
512 table_size = data->pgd_size;
513 else
514 table_size = ARM_LPAE_GRANULE(data);
515
516 start = ptep;
517
518 /* Only leaf entries at the last level */
519 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
520 end = ptep;
521 else
522 end = (void *)ptep + table_size;
523
524 while (ptep != end) {
525 arm_lpae_iopte pte = *ptep++;
526
527 if (!pte || iopte_leaf(pte, lvl, data->iop.fmt))
528 continue;
529
530 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
531 }
532
533 __arm_lpae_free_pages(start, table_size, &data->iop.cfg, cookie);
534 }
535
arm_lpae_free_pgtable(struct io_pgtable * iop)536 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
537 {
538 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
539
540 __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
541 kfree(data);
542 }
543
arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable * data,struct iommu_iotlb_gather * gather,unsigned long iova,size_t size,arm_lpae_iopte blk_pte,int lvl,arm_lpae_iopte * ptep)544 static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
545 struct iommu_iotlb_gather *gather,
546 unsigned long iova, size_t size,
547 arm_lpae_iopte blk_pte, int lvl,
548 arm_lpae_iopte *ptep)
549 {
550 struct io_pgtable_cfg *cfg = &data->iop.cfg;
551 arm_lpae_iopte pte, *tablep;
552 phys_addr_t blk_paddr;
553 size_t tablesz = ARM_LPAE_GRANULE(data);
554 size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
555 int i, unmap_idx = -1;
556 void *cookie = data->iop.cookie;
557
558 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
559 return 0;
560
561 tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg, cookie);
562 if (!tablep)
563 return 0; /* Bytes unmapped */
564
565 if (size == split_sz)
566 unmap_idx = ARM_LPAE_LVL_IDX(iova, lvl, data);
567
568 blk_paddr = iopte_to_paddr(blk_pte, data);
569 pte = iopte_prot(blk_pte);
570
571 for (i = 0; i < tablesz / sizeof(pte); i++, blk_paddr += split_sz) {
572 /* Unmap! */
573 if (i == unmap_idx)
574 continue;
575
576 __arm_lpae_init_pte(data, blk_paddr, pte, lvl, &tablep[i]);
577 }
578
579 pte = arm_lpae_install_table(tablep, ptep, blk_pte, data);
580 if (pte != blk_pte) {
581 __arm_lpae_free_pages(tablep, tablesz, cfg, cookie);
582 /*
583 * We may race against someone unmapping another part of this
584 * block, but anything else is invalid. We can't misinterpret
585 * a page entry here since we're never at the last level.
586 */
587 if (iopte_type(pte, lvl - 1) != ARM_LPAE_PTE_TYPE_TABLE)
588 return 0;
589
590 tablep = iopte_deref(pte, data);
591 } else if (unmap_idx >= 0) {
592 io_pgtable_tlb_add_page(&data->iop, gather, iova, size);
593 return size;
594 }
595
596 return __arm_lpae_unmap(data, gather, iova, size, lvl, tablep);
597 }
598
__arm_lpae_unmap(struct arm_lpae_io_pgtable * data,struct iommu_iotlb_gather * gather,unsigned long iova,size_t size,int lvl,arm_lpae_iopte * ptep)599 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
600 struct iommu_iotlb_gather *gather,
601 unsigned long iova, size_t size, int lvl,
602 arm_lpae_iopte *ptep)
603 {
604 arm_lpae_iopte pte;
605 struct io_pgtable *iop = &data->iop;
606
607 /* Something went horribly wrong and we ran out of page table */
608 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
609 return 0;
610
611 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
612 pte = READ_ONCE(*ptep);
613 if (WARN_ON(!pte))
614 return 0;
615
616 /* If the size matches this level, we're in the right place */
617 if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
618 __arm_lpae_set_pte(ptep, 0, &iop->cfg);
619
620 if (!iopte_leaf(pte, lvl, iop->fmt)) {
621 /* Also flush any partial walks */
622 io_pgtable_tlb_flush_walk(iop, iova, size,
623 ARM_LPAE_GRANULE(data));
624 ptep = iopte_deref(pte, data);
625 __arm_lpae_free_pgtable(data, lvl + 1, ptep);
626 } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
627 /*
628 * Order the PTE update against queueing the IOVA, to
629 * guarantee that a flush callback from a different CPU
630 * has observed it before the TLBIALL can be issued.
631 */
632 smp_wmb();
633 } else {
634 io_pgtable_tlb_add_page(iop, gather, iova, size);
635 }
636
637 return size;
638 } else if (iopte_leaf(pte, lvl, iop->fmt)) {
639 /*
640 * Insert a table at the next level to map the old region,
641 * minus the part we want to unmap
642 */
643 return arm_lpae_split_blk_unmap(data, gather, iova, size, pte,
644 lvl + 1, ptep);
645 }
646
647 /* Keep on walkin' */
648 ptep = iopte_deref(pte, data);
649 return __arm_lpae_unmap(data, gather, iova, size, lvl + 1, ptep);
650 }
651
arm_lpae_unmap(struct io_pgtable_ops * ops,unsigned long iova,size_t size,struct iommu_iotlb_gather * gather)652 static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
653 size_t size, struct iommu_iotlb_gather *gather)
654 {
655 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
656 arm_lpae_iopte *ptep = data->pgd;
657 int lvl = ARM_LPAE_START_LVL(data);
658
659 if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias)))
660 return 0;
661
662 return __arm_lpae_unmap(data, gather, iova, size, lvl, ptep);
663 }
664
arm_lpae_iova_to_phys(struct io_pgtable_ops * ops,unsigned long iova)665 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
666 unsigned long iova)
667 {
668 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
669 arm_lpae_iopte pte, *ptep = data->pgd;
670 int lvl = ARM_LPAE_START_LVL(data);
671
672 do {
673 /* Valid IOPTE pointer? */
674 if (!ptep)
675 return 0;
676
677 /* Grab the IOPTE we're interested in */
678 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
679 pte = READ_ONCE(*ptep);
680
681 /* Valid entry? */
682 if (!pte)
683 return 0;
684
685 /* Leaf entry? */
686 if (iopte_leaf(pte, lvl, data->iop.fmt))
687 goto found_translation;
688
689 /* Take it to the next level */
690 ptep = iopte_deref(pte, data);
691 } while (++lvl < ARM_LPAE_MAX_LEVELS);
692
693 /* Ran out of page tables to walk */
694 return 0;
695
696 found_translation:
697 iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
698 return iopte_to_paddr(pte, data) | iova;
699 }
700
arm_lpae_restrict_pgsizes(struct io_pgtable_cfg * cfg)701 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
702 {
703 unsigned long granule, page_sizes;
704 unsigned int max_addr_bits = 48;
705
706 /*
707 * We need to restrict the supported page sizes to match the
708 * translation regime for a particular granule. Aim to match
709 * the CPU page size if possible, otherwise prefer smaller sizes.
710 * While we're at it, restrict the block sizes to match the
711 * chosen granule.
712 */
713 if (cfg->pgsize_bitmap & PAGE_SIZE)
714 granule = PAGE_SIZE;
715 else if (cfg->pgsize_bitmap & ~PAGE_MASK)
716 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
717 else if (cfg->pgsize_bitmap & PAGE_MASK)
718 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
719 else
720 granule = 0;
721
722 switch (granule) {
723 case SZ_4K:
724 page_sizes = (SZ_4K | SZ_2M | SZ_1G);
725 break;
726 case SZ_16K:
727 page_sizes = (SZ_16K | SZ_32M);
728 break;
729 case SZ_64K:
730 max_addr_bits = 52;
731 page_sizes = (SZ_64K | SZ_512M);
732 if (cfg->oas > 48)
733 page_sizes |= 1ULL << 42; /* 4TB */
734 break;
735 default:
736 page_sizes = 0;
737 }
738
739 cfg->pgsize_bitmap &= page_sizes;
740 cfg->ias = min(cfg->ias, max_addr_bits);
741 cfg->oas = min(cfg->oas, max_addr_bits);
742 }
743
744 static struct arm_lpae_io_pgtable *
arm_lpae_alloc_pgtable(struct io_pgtable_cfg * cfg)745 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
746 {
747 unsigned long va_bits, pgd_bits;
748 struct arm_lpae_io_pgtable *data;
749
750 arm_lpae_restrict_pgsizes(cfg);
751
752 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
753 return NULL;
754
755 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
756 return NULL;
757
758 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
759 return NULL;
760
761 if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) {
762 dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n");
763 return NULL;
764 }
765
766 data = kmalloc(sizeof(*data), GFP_KERNEL);
767 if (!data)
768 return NULL;
769
770 data->pg_shift = __ffs(cfg->pgsize_bitmap);
771 data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
772
773 va_bits = cfg->ias - data->pg_shift;
774 data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
775
776 /* Calculate the actual size of our pgd (without concatenation) */
777 pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
778 data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
779
780 data->iop.ops = (struct io_pgtable_ops) {
781 .map = arm_lpae_map,
782 .unmap = arm_lpae_unmap,
783 .iova_to_phys = arm_lpae_iova_to_phys,
784 };
785
786 return data;
787 }
788
789 static struct io_pgtable *
arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg * cfg,void * cookie)790 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
791 {
792 u64 reg;
793 struct arm_lpae_io_pgtable *data;
794
795 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
796 IO_PGTABLE_QUIRK_NON_STRICT))
797 return NULL;
798
799 data = arm_lpae_alloc_pgtable(cfg);
800 if (!data)
801 return NULL;
802
803 /* TCR */
804 if (cfg->coherent_walk) {
805 reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
806 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
807 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
808 } else {
809 reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) |
810 (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) |
811 (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_ORGN0_SHIFT);
812 }
813
814 switch (ARM_LPAE_GRANULE(data)) {
815 case SZ_4K:
816 reg |= ARM_LPAE_TCR_TG0_4K;
817 break;
818 case SZ_16K:
819 reg |= ARM_LPAE_TCR_TG0_16K;
820 break;
821 case SZ_64K:
822 reg |= ARM_LPAE_TCR_TG0_64K;
823 break;
824 }
825
826 switch (cfg->oas) {
827 case 32:
828 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
829 break;
830 case 36:
831 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
832 break;
833 case 40:
834 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
835 break;
836 case 42:
837 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
838 break;
839 case 44:
840 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
841 break;
842 case 48:
843 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
844 break;
845 case 52:
846 reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_IPS_SHIFT);
847 break;
848 default:
849 goto out_free_data;
850 }
851
852 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
853
854 /* Disable speculative walks through TTBR1 */
855 reg |= ARM_LPAE_TCR_EPD1;
856 cfg->arm_lpae_s1_cfg.tcr = reg;
857
858 /* MAIRs */
859 reg = (ARM_LPAE_MAIR_ATTR_NC
860 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
861 (ARM_LPAE_MAIR_ATTR_WBRWA
862 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
863 (ARM_LPAE_MAIR_ATTR_DEVICE
864 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) |
865 (ARM_LPAE_MAIR_ATTR_INC_OWBRWA
866 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE));
867
868 cfg->arm_lpae_s1_cfg.mair[0] = reg;
869 cfg->arm_lpae_s1_cfg.mair[1] = 0;
870
871 /* Looking good; allocate a pgd */
872 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg,
873 cookie);
874 if (!data->pgd)
875 goto out_free_data;
876
877 /* Ensure the empty pgd is visible before any actual TTBR write */
878 wmb();
879
880 /* TTBRs */
881 cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
882 cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
883 return &data->iop;
884
885 out_free_data:
886 kfree(data);
887 return NULL;
888 }
889
890 static struct io_pgtable *
arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg * cfg,void * cookie)891 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
892 {
893 u64 reg, sl;
894 struct arm_lpae_io_pgtable *data;
895
896 /* The NS quirk doesn't apply at stage 2 */
897 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NON_STRICT))
898 return NULL;
899
900 data = arm_lpae_alloc_pgtable(cfg);
901 if (!data)
902 return NULL;
903
904 /*
905 * Concatenate PGDs at level 1 if possible in order to reduce
906 * the depth of the stage-2 walk.
907 */
908 if (data->levels == ARM_LPAE_MAX_LEVELS) {
909 unsigned long pgd_pages;
910
911 pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
912 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
913 data->pgd_size = pgd_pages << data->pg_shift;
914 data->levels--;
915 }
916 }
917
918 /* VTCR */
919 reg = ARM_64_LPAE_S2_TCR_RES1 |
920 (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
921 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
922 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
923
924 sl = ARM_LPAE_START_LVL(data);
925
926 switch (ARM_LPAE_GRANULE(data)) {
927 case SZ_4K:
928 reg |= ARM_LPAE_TCR_TG0_4K;
929 sl++; /* SL0 format is different for 4K granule size */
930 break;
931 case SZ_16K:
932 reg |= ARM_LPAE_TCR_TG0_16K;
933 break;
934 case SZ_64K:
935 reg |= ARM_LPAE_TCR_TG0_64K;
936 break;
937 }
938
939 switch (cfg->oas) {
940 case 32:
941 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
942 break;
943 case 36:
944 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
945 break;
946 case 40:
947 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
948 break;
949 case 42:
950 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
951 break;
952 case 44:
953 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
954 break;
955 case 48:
956 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
957 break;
958 case 52:
959 reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_PS_SHIFT);
960 break;
961 default:
962 goto out_free_data;
963 }
964
965 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
966 reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
967 cfg->arm_lpae_s2_cfg.vtcr = reg;
968
969 /* Allocate pgd pages */
970 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg,
971 cookie);
972 if (!data->pgd)
973 goto out_free_data;
974
975 /* Ensure the empty pgd is visible before any actual TTBR write */
976 wmb();
977
978 /* VTTBR */
979 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
980 return &data->iop;
981
982 out_free_data:
983 kfree(data);
984 return NULL;
985 }
986
987 static struct io_pgtable *
arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg * cfg,void * cookie)988 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
989 {
990 struct io_pgtable *iop;
991
992 if (cfg->ias > 32 || cfg->oas > 40)
993 return NULL;
994
995 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
996 iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
997 if (iop) {
998 cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
999 cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
1000 }
1001
1002 return iop;
1003 }
1004
1005 static struct io_pgtable *
arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg * cfg,void * cookie)1006 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
1007 {
1008 struct io_pgtable *iop;
1009
1010 if (cfg->ias > 40 || cfg->oas > 40)
1011 return NULL;
1012
1013 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1014 iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
1015 if (iop)
1016 cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
1017
1018 return iop;
1019 }
1020
1021 static struct io_pgtable *
arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg * cfg,void * cookie)1022 arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
1023 {
1024 struct arm_lpae_io_pgtable *data;
1025
1026 /* No quirks for Mali (hopefully) */
1027 if (cfg->quirks)
1028 return NULL;
1029
1030 if (cfg->ias > 48 || cfg->oas > 40)
1031 return NULL;
1032
1033 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1034
1035 data = arm_lpae_alloc_pgtable(cfg);
1036 if (!data)
1037 return NULL;
1038
1039 /* Mali seems to need a full 4-level table regardless of IAS */
1040 if (data->levels < ARM_LPAE_MAX_LEVELS) {
1041 data->levels = ARM_LPAE_MAX_LEVELS;
1042 data->pgd_size = sizeof(arm_lpae_iopte);
1043 }
1044 /*
1045 * MEMATTR: Mali has no actual notion of a non-cacheable type, so the
1046 * best we can do is mimic the out-of-tree driver and hope that the
1047 * "implementation-defined caching policy" is good enough. Similarly,
1048 * we'll use it for the sake of a valid attribute for our 'device'
1049 * index, although callers should never request that in practice.
1050 */
1051 cfg->arm_mali_lpae_cfg.memattr =
1052 (ARM_MALI_LPAE_MEMATTR_IMP_DEF
1053 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
1054 (ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC
1055 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
1056 (ARM_MALI_LPAE_MEMATTR_IMP_DEF
1057 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
1058
1059 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg,
1060 cookie);
1061 if (!data->pgd)
1062 goto out_free_data;
1063
1064 /* Ensure the empty pgd is visible before TRANSTAB can be written */
1065 wmb();
1066
1067 cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
1068 ARM_MALI_LPAE_TTBR_READ_INNER |
1069 ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
1070 return &data->iop;
1071
1072 out_free_data:
1073 kfree(data);
1074 return NULL;
1075 }
1076
1077 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
1078 .alloc = arm_64_lpae_alloc_pgtable_s1,
1079 .free = arm_lpae_free_pgtable,
1080 };
1081
1082 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
1083 .alloc = arm_64_lpae_alloc_pgtable_s2,
1084 .free = arm_lpae_free_pgtable,
1085 };
1086
1087 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
1088 .alloc = arm_32_lpae_alloc_pgtable_s1,
1089 .free = arm_lpae_free_pgtable,
1090 };
1091
1092 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
1093 .alloc = arm_32_lpae_alloc_pgtable_s2,
1094 .free = arm_lpae_free_pgtable,
1095 };
1096
1097 struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
1098 .alloc = arm_mali_lpae_alloc_pgtable,
1099 .free = arm_lpae_free_pgtable,
1100 };
1101
1102 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
1103
1104 static struct io_pgtable_cfg *cfg_cookie;
1105
dummy_tlb_flush_all(void * cookie)1106 static void dummy_tlb_flush_all(void *cookie)
1107 {
1108 WARN_ON(cookie != cfg_cookie);
1109 }
1110
dummy_tlb_flush(unsigned long iova,size_t size,size_t granule,void * cookie)1111 static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule,
1112 void *cookie)
1113 {
1114 WARN_ON(cookie != cfg_cookie);
1115 WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
1116 }
1117
dummy_tlb_add_page(struct iommu_iotlb_gather * gather,unsigned long iova,size_t granule,void * cookie)1118 static void dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
1119 unsigned long iova, size_t granule, void *cookie)
1120 {
1121 dummy_tlb_flush(iova, granule, granule, cookie);
1122 }
1123
1124 static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
1125 .tlb_flush_all = dummy_tlb_flush_all,
1126 .tlb_flush_walk = dummy_tlb_flush,
1127 .tlb_flush_leaf = dummy_tlb_flush,
1128 .tlb_add_page = dummy_tlb_add_page,
1129 };
1130
arm_lpae_dump_ops(struct io_pgtable_ops * ops)1131 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
1132 {
1133 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
1134 struct io_pgtable_cfg *cfg = &data->iop.cfg;
1135
1136 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1137 cfg->pgsize_bitmap, cfg->ias);
1138 pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
1139 data->levels, data->pgd_size, data->pg_shift,
1140 data->bits_per_level, data->pgd);
1141 }
1142
1143 #define __FAIL(ops, i) ({ \
1144 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
1145 arm_lpae_dump_ops(ops); \
1146 selftest_running = false; \
1147 -EFAULT; \
1148 })
1149
arm_lpae_run_tests(struct io_pgtable_cfg * cfg)1150 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
1151 {
1152 static const enum io_pgtable_fmt fmts[] = {
1153 ARM_64_LPAE_S1,
1154 ARM_64_LPAE_S2,
1155 };
1156
1157 int i, j;
1158 unsigned long iova;
1159 size_t size;
1160 struct io_pgtable_ops *ops;
1161
1162 selftest_running = true;
1163
1164 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
1165 cfg_cookie = cfg;
1166 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
1167 if (!ops) {
1168 pr_err("selftest: failed to allocate io pgtable ops\n");
1169 return -ENOMEM;
1170 }
1171
1172 /*
1173 * Initial sanity checks.
1174 * Empty page tables shouldn't provide any translations.
1175 */
1176 if (ops->iova_to_phys(ops, 42))
1177 return __FAIL(ops, i);
1178
1179 if (ops->iova_to_phys(ops, SZ_1G + 42))
1180 return __FAIL(ops, i);
1181
1182 if (ops->iova_to_phys(ops, SZ_2G + 42))
1183 return __FAIL(ops, i);
1184
1185 /*
1186 * Distinct mappings of different granule sizes.
1187 */
1188 iova = 0;
1189 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1190 size = 1UL << j;
1191
1192 if (ops->map(ops, iova, iova, size, IOMMU_READ |
1193 IOMMU_WRITE |
1194 IOMMU_NOEXEC |
1195 IOMMU_CACHE))
1196 return __FAIL(ops, i);
1197
1198 /* Overlapping mappings */
1199 if (!ops->map(ops, iova, iova + size, size,
1200 IOMMU_READ | IOMMU_NOEXEC))
1201 return __FAIL(ops, i);
1202
1203 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1204 return __FAIL(ops, i);
1205
1206 iova += SZ_1G;
1207 }
1208
1209 /* Partial unmap */
1210 size = 1UL << __ffs(cfg->pgsize_bitmap);
1211 if (ops->unmap(ops, SZ_1G + size, size, NULL) != size)
1212 return __FAIL(ops, i);
1213
1214 /* Remap of partial unmap */
1215 if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
1216 return __FAIL(ops, i);
1217
1218 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
1219 return __FAIL(ops, i);
1220
1221 /* Full unmap */
1222 iova = 0;
1223 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1224 size = 1UL << j;
1225
1226 if (ops->unmap(ops, iova, size, NULL) != size)
1227 return __FAIL(ops, i);
1228
1229 if (ops->iova_to_phys(ops, iova + 42))
1230 return __FAIL(ops, i);
1231
1232 /* Remap full block */
1233 if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
1234 return __FAIL(ops, i);
1235
1236 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1237 return __FAIL(ops, i);
1238
1239 iova += SZ_1G;
1240 }
1241
1242 free_io_pgtable_ops(ops);
1243 }
1244
1245 selftest_running = false;
1246 return 0;
1247 }
1248
arm_lpae_do_selftests(void)1249 static int __init arm_lpae_do_selftests(void)
1250 {
1251 static const unsigned long pgsize[] = {
1252 SZ_4K | SZ_2M | SZ_1G,
1253 SZ_16K | SZ_32M,
1254 SZ_64K | SZ_512M,
1255 };
1256
1257 static const unsigned int ias[] = {
1258 32, 36, 40, 42, 44, 48,
1259 };
1260
1261 int i, j, pass = 0, fail = 0;
1262 struct io_pgtable_cfg cfg = {
1263 .tlb = &dummy_tlb_ops,
1264 .oas = 48,
1265 .coherent_walk = true,
1266 };
1267
1268 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1269 for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1270 cfg.pgsize_bitmap = pgsize[i];
1271 cfg.ias = ias[j];
1272 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1273 pgsize[i], ias[j]);
1274 if (arm_lpae_run_tests(&cfg))
1275 fail++;
1276 else
1277 pass++;
1278 }
1279 }
1280
1281 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1282 return fail ? -EFAULT : 0;
1283 }
1284 subsys_initcall(arm_lpae_do_selftests);
1285 #endif
1286