1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * CPU-agnostic ARM page table allocator.
4 *
5 * Copyright (C) 2014 ARM Limited
6 *
7 * Author: Will Deacon <will.deacon@arm.com>
8 */
9
10 #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
11
12 #include <linux/atomic.h>
13 #include <linux/bitops.h>
14 #include <linux/io-pgtable.h>
15 #include <linux/kernel.h>
16 #include <linux/sizes.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
19 #include <linux/dma-mapping.h>
20
21 #include <asm/barrier.h>
22
23 #include "io-pgtable-arm.h"
24
25 #define ARM_LPAE_MAX_ADDR_BITS 52
26 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
27 #define ARM_LPAE_MAX_LEVELS 4
28
29 /* Struct accessors */
30 #define io_pgtable_to_data(x) \
31 container_of((x), struct arm_lpae_io_pgtable, iop)
32
33 #define io_pgtable_ops_to_data(x) \
34 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
35
36 /*
37 * Calculate the right shift amount to get to the portion describing level l
38 * in a virtual address mapped by the pagetable in d.
39 */
40 #define ARM_LPAE_LVL_SHIFT(l,d) \
41 (((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) + \
42 ilog2(sizeof(arm_lpae_iopte)))
43
44 #define ARM_LPAE_GRANULE(d) \
45 (sizeof(arm_lpae_iopte) << (d)->bits_per_level)
46 #define ARM_LPAE_PGD_SIZE(d) \
47 (sizeof(arm_lpae_iopte) << (d)->pgd_bits)
48
49 #define ARM_LPAE_PTES_PER_TABLE(d) \
50 (ARM_LPAE_GRANULE(d) >> ilog2(sizeof(arm_lpae_iopte)))
51
52 /*
53 * Calculate the index at level l used to map virtual address a using the
54 * pagetable in d.
55 */
56 #define ARM_LPAE_PGD_IDX(l,d) \
57 ((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0)
58
59 #define ARM_LPAE_LVL_IDX(a,l,d) \
60 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
61 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
62
63 /* Calculate the block/page mapping size at level l for pagetable in d. */
64 #define ARM_LPAE_BLOCK_SIZE(l,d) (1ULL << ARM_LPAE_LVL_SHIFT(l,d))
65
66 /* Page table bits */
67 #define ARM_LPAE_PTE_TYPE_SHIFT 0
68 #define ARM_LPAE_PTE_TYPE_MASK 0x3
69
70 #define ARM_LPAE_PTE_TYPE_BLOCK 1
71 #define ARM_LPAE_PTE_TYPE_TABLE 3
72 #define ARM_LPAE_PTE_TYPE_PAGE 3
73
74 #define ARM_LPAE_PTE_ADDR_MASK GENMASK_ULL(47,12)
75
76 #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
77 #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
78 #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
79 #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
80 #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
81 #define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
82 #define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
83 #define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
84
85 #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
86 /* Ignore the contiguous bit for block splitting */
87 #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
88 #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
89 ARM_LPAE_PTE_ATTR_HI_MASK)
90 /* Software bit for solving coherency races */
91 #define ARM_LPAE_PTE_SW_SYNC (((arm_lpae_iopte)1) << 55)
92
93 /* Stage-1 PTE */
94 #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
95 #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
96 #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
97 #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
98
99 /* Stage-2 PTE */
100 #define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
101 #define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
102 #define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
103 #define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
104 #define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
105 #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
106
107 /* Register bits */
108 #define ARM_LPAE_VTCR_SL0_MASK 0x3
109
110 #define ARM_LPAE_TCR_T0SZ_SHIFT 0
111
112 #define ARM_LPAE_VTCR_PS_SHIFT 16
113 #define ARM_LPAE_VTCR_PS_MASK 0x7
114
115 #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
116 #define ARM_LPAE_MAIR_ATTR_MASK 0xff
117 #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
118 #define ARM_LPAE_MAIR_ATTR_NC 0x44
119 #define ARM_LPAE_MAIR_ATTR_INC_OWBRWA 0xf4
120 #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
121 #define ARM_LPAE_MAIR_ATTR_INC_OWBRANWA 0xe4ULL
122 #define ARM_LPAE_MAIR_ATTR_IWBRWA_OWBRANWA 0xefULL
123 #define ARM_LPAE_MAIR_ATTR_IDX_NC 0
124 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
125 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
126 #define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE 3
127 #define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE_NWA 4
128 #define ARM_LPAE_MAIR_ATTR_IDX_ICACHE_OCACHE_NWA 5
129
130 #define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0)
131 #define ARM_MALI_LPAE_TTBR_READ_INNER BIT(2)
132 #define ARM_MALI_LPAE_TTBR_SHARE_OUTER BIT(4)
133
134 #define ARM_MALI_LPAE_MEMATTR_IMP_DEF 0x88ULL
135 #define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
136
137 #define APPLE_DART_PTE_PROT_NO_WRITE (1<<7)
138 #define APPLE_DART_PTE_PROT_NO_READ (1<<8)
139
140 /* IOPTE accessors */
141 #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
142
143 #define iopte_type(pte) \
144 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
145
146 #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
147
148 struct arm_lpae_io_pgtable {
149 struct io_pgtable iop;
150
151 int pgd_bits;
152 int start_level;
153 int bits_per_level;
154
155 void *pgd;
156 };
157
158 typedef u64 arm_lpae_iopte;
159
iopte_leaf(arm_lpae_iopte pte,int lvl,enum io_pgtable_fmt fmt)160 static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
161 enum io_pgtable_fmt fmt)
162 {
163 if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE)
164 return iopte_type(pte) == ARM_LPAE_PTE_TYPE_PAGE;
165
166 return iopte_type(pte) == ARM_LPAE_PTE_TYPE_BLOCK;
167 }
168
paddr_to_iopte(phys_addr_t paddr,struct arm_lpae_io_pgtable * data)169 static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
170 struct arm_lpae_io_pgtable *data)
171 {
172 arm_lpae_iopte pte = paddr;
173
174 /* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */
175 return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK;
176 }
177
iopte_to_paddr(arm_lpae_iopte pte,struct arm_lpae_io_pgtable * data)178 static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
179 struct arm_lpae_io_pgtable *data)
180 {
181 u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
182
183 if (ARM_LPAE_GRANULE(data) < SZ_64K)
184 return paddr;
185
186 /* Rotate the packed high-order bits back to the top */
187 return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
188 }
189
190 static bool selftest_running = false;
191
__arm_lpae_dma_addr(void * pages)192 static dma_addr_t __arm_lpae_dma_addr(void *pages)
193 {
194 return (dma_addr_t)virt_to_phys(pages);
195 }
196
__arm_lpae_alloc_pages(size_t size,gfp_t gfp,struct io_pgtable_cfg * cfg)197 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
198 struct io_pgtable_cfg *cfg)
199 {
200 struct device *dev = cfg->iommu_dev;
201 int order = get_order(size);
202 struct page *p;
203 dma_addr_t dma;
204 void *pages;
205
206 VM_BUG_ON((gfp & __GFP_HIGHMEM));
207 p = alloc_pages_node(dev ? dev_to_node(dev) : NUMA_NO_NODE,
208 gfp | __GFP_ZERO, order);
209 if (!p)
210 return NULL;
211
212 pages = page_address(p);
213 if (!cfg->coherent_walk) {
214 dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
215 if (dma_mapping_error(dev, dma))
216 goto out_free;
217 /*
218 * We depend on the IOMMU being able to work with any physical
219 * address directly, so if the DMA layer suggests otherwise by
220 * translating or truncating them, that bodes very badly...
221 */
222 if (dma != virt_to_phys(pages))
223 goto out_unmap;
224 }
225
226 return pages;
227
228 out_unmap:
229 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
230 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
231 out_free:
232 __free_pages(p, order);
233 return NULL;
234 }
235
__arm_lpae_free_pages(void * pages,size_t size,struct io_pgtable_cfg * cfg)236 static void __arm_lpae_free_pages(void *pages, size_t size,
237 struct io_pgtable_cfg *cfg)
238 {
239 if (!cfg->coherent_walk)
240 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
241 size, DMA_TO_DEVICE);
242 free_pages((unsigned long)pages, get_order(size));
243 }
244
__arm_lpae_sync_pte(arm_lpae_iopte * ptep,int num_entries,struct io_pgtable_cfg * cfg)245 static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
246 struct io_pgtable_cfg *cfg)
247 {
248 dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep),
249 sizeof(*ptep) * num_entries, DMA_TO_DEVICE);
250 }
251
__arm_lpae_clear_pte(arm_lpae_iopte * ptep,struct io_pgtable_cfg * cfg)252 static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg)
253 {
254
255 *ptep = 0;
256
257 if (!cfg->coherent_walk)
258 __arm_lpae_sync_pte(ptep, 1, cfg);
259 }
260
261 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
262 struct iommu_iotlb_gather *gather,
263 unsigned long iova, size_t size, size_t pgcount,
264 int lvl, arm_lpae_iopte *ptep);
265
__arm_lpae_init_pte(struct arm_lpae_io_pgtable * data,phys_addr_t paddr,arm_lpae_iopte prot,int lvl,int num_entries,arm_lpae_iopte * ptep)266 static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
267 phys_addr_t paddr, arm_lpae_iopte prot,
268 int lvl, int num_entries, arm_lpae_iopte *ptep)
269 {
270 arm_lpae_iopte pte = prot;
271 struct io_pgtable_cfg *cfg = &data->iop.cfg;
272 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
273 int i;
274
275 if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1)
276 pte |= ARM_LPAE_PTE_TYPE_PAGE;
277 else
278 pte |= ARM_LPAE_PTE_TYPE_BLOCK;
279
280 for (i = 0; i < num_entries; i++)
281 ptep[i] = pte | paddr_to_iopte(paddr + i * sz, data);
282
283 if (!cfg->coherent_walk)
284 __arm_lpae_sync_pte(ptep, num_entries, cfg);
285 }
286
arm_lpae_init_pte(struct arm_lpae_io_pgtable * data,unsigned long iova,phys_addr_t paddr,arm_lpae_iopte prot,int lvl,int num_entries,arm_lpae_iopte * ptep)287 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
288 unsigned long iova, phys_addr_t paddr,
289 arm_lpae_iopte prot, int lvl, int num_entries,
290 arm_lpae_iopte *ptep)
291 {
292 int i;
293
294 for (i = 0; i < num_entries; i++)
295 if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) {
296 /* We require an unmap first */
297 WARN_ON(!selftest_running);
298 return -EEXIST;
299 } else if (iopte_type(ptep[i]) == ARM_LPAE_PTE_TYPE_TABLE) {
300 /*
301 * We need to unmap and free the old table before
302 * overwriting it with a block entry.
303 */
304 arm_lpae_iopte *tblp;
305 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
306
307 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
308 if (__arm_lpae_unmap(data, NULL, iova + i * sz, sz, 1,
309 lvl, tblp) != sz) {
310 WARN_ON(1);
311 return -EINVAL;
312 }
313 }
314
315 __arm_lpae_init_pte(data, paddr, prot, lvl, num_entries, ptep);
316 return 0;
317 }
318
arm_lpae_install_table(arm_lpae_iopte * table,arm_lpae_iopte * ptep,arm_lpae_iopte curr,struct arm_lpae_io_pgtable * data)319 static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
320 arm_lpae_iopte *ptep,
321 arm_lpae_iopte curr,
322 struct arm_lpae_io_pgtable *data)
323 {
324 arm_lpae_iopte old, new;
325 struct io_pgtable_cfg *cfg = &data->iop.cfg;
326
327 new = paddr_to_iopte(__pa(table), data) | ARM_LPAE_PTE_TYPE_TABLE;
328 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
329 new |= ARM_LPAE_PTE_NSTABLE;
330
331 /*
332 * Ensure the table itself is visible before its PTE can be.
333 * Whilst we could get away with cmpxchg64_release below, this
334 * doesn't have any ordering semantics when !CONFIG_SMP.
335 */
336 dma_wmb();
337
338 old = cmpxchg64_relaxed(ptep, curr, new);
339
340 if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC))
341 return old;
342
343 /* Even if it's not ours, there's no point waiting; just kick it */
344 __arm_lpae_sync_pte(ptep, 1, cfg);
345 if (old == curr)
346 WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC);
347
348 return old;
349 }
350
__arm_lpae_map(struct arm_lpae_io_pgtable * data,unsigned long iova,phys_addr_t paddr,size_t size,size_t pgcount,arm_lpae_iopte prot,int lvl,arm_lpae_iopte * ptep,gfp_t gfp,size_t * mapped)351 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
352 phys_addr_t paddr, size_t size, size_t pgcount,
353 arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep,
354 gfp_t gfp, size_t *mapped)
355 {
356 arm_lpae_iopte *cptep, pte;
357 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
358 size_t tblsz = ARM_LPAE_GRANULE(data);
359 struct io_pgtable_cfg *cfg = &data->iop.cfg;
360 int ret = 0, num_entries, max_entries, map_idx_start;
361
362 /* Find our entry at the current level */
363 map_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
364 ptep += map_idx_start;
365
366 /* If we can install a leaf entry at this level, then do so */
367 if (size == block_size) {
368 max_entries = ARM_LPAE_PTES_PER_TABLE(data) - map_idx_start;
369 num_entries = min_t(int, pgcount, max_entries);
370 ret = arm_lpae_init_pte(data, iova, paddr, prot, lvl, num_entries, ptep);
371 if (!ret && mapped)
372 *mapped += num_entries * size;
373
374 return ret;
375 }
376
377 /* We can't allocate tables at the final level */
378 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
379 return -EINVAL;
380
381 /* Grab a pointer to the next level */
382 pte = READ_ONCE(*ptep);
383 if (!pte) {
384 cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg);
385 if (!cptep)
386 return -ENOMEM;
387
388 pte = arm_lpae_install_table(cptep, ptep, 0, data);
389 if (pte)
390 __arm_lpae_free_pages(cptep, tblsz, cfg);
391 } else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
392 __arm_lpae_sync_pte(ptep, 1, cfg);
393 }
394
395 if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) {
396 cptep = iopte_deref(pte, data);
397 } else if (pte) {
398 /* We require an unmap first */
399 WARN_ON(!selftest_running);
400 return -EEXIST;
401 }
402
403 /* Rinse, repeat */
404 return __arm_lpae_map(data, iova, paddr, size, pgcount, prot, lvl + 1,
405 cptep, gfp, mapped);
406 }
407
arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable * data,int prot)408 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
409 int prot)
410 {
411 arm_lpae_iopte pte;
412
413 if (data->iop.fmt == APPLE_DART) {
414 pte = 0;
415 if (!(prot & IOMMU_WRITE))
416 pte |= APPLE_DART_PTE_PROT_NO_WRITE;
417 if (!(prot & IOMMU_READ))
418 pte |= APPLE_DART_PTE_PROT_NO_READ;
419 return pte;
420 }
421
422 if (data->iop.fmt == ARM_64_LPAE_S1 ||
423 data->iop.fmt == ARM_32_LPAE_S1) {
424 pte = ARM_LPAE_PTE_nG;
425 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
426 pte |= ARM_LPAE_PTE_AP_RDONLY;
427 if (!(prot & IOMMU_PRIV))
428 pte |= ARM_LPAE_PTE_AP_UNPRIV;
429 } else {
430 pte = ARM_LPAE_PTE_HAP_FAULT;
431 if (prot & IOMMU_READ)
432 pte |= ARM_LPAE_PTE_HAP_READ;
433 if (prot & IOMMU_WRITE)
434 pte |= ARM_LPAE_PTE_HAP_WRITE;
435 }
436
437 /*
438 * Note that this logic is structured to accommodate Mali LPAE
439 * having stage-1-like attributes but stage-2-like permissions.
440 */
441 if (data->iop.fmt == ARM_64_LPAE_S2 ||
442 data->iop.fmt == ARM_32_LPAE_S2) {
443 if (prot & IOMMU_MMIO)
444 pte |= ARM_LPAE_PTE_MEMATTR_DEV;
445 else if (prot & IOMMU_CACHE)
446 pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
447 else
448 pte |= ARM_LPAE_PTE_MEMATTR_NC;
449 } else {
450 if (prot & IOMMU_MMIO)
451 pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
452 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
453 else if ((prot & IOMMU_CACHE) && (prot & IOMMU_SYS_CACHE_NWA))
454 pte |= (ARM_LPAE_MAIR_ATTR_IDX_ICACHE_OCACHE_NWA
455 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
456 /* IOMMU_CACHE + IOMMU_SYS_CACHE equivalent to IOMMU_CACHE */
457 else if (prot & IOMMU_CACHE)
458 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
459 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
460 else if (prot & IOMMU_SYS_CACHE)
461 pte |= (ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE
462 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
463 else if (prot & IOMMU_SYS_CACHE_NWA)
464 pte |= (ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE_NWA
465 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
466 }
467
468 /*
469 * Also Mali has its own notions of shareability wherein its Inner
470 * domain covers the cores within the GPU, and its Outer domain is
471 * "outside the GPU" (i.e. either the Inner or System domain in CPU
472 * terms, depending on coherency).
473 */
474 if (prot & IOMMU_CACHE && data->iop.fmt != ARM_MALI_LPAE)
475 pte |= ARM_LPAE_PTE_SH_IS;
476 else
477 pte |= ARM_LPAE_PTE_SH_OS;
478
479 if (prot & IOMMU_NOEXEC)
480 pte |= ARM_LPAE_PTE_XN;
481
482 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
483 pte |= ARM_LPAE_PTE_NS;
484
485 if (data->iop.fmt != ARM_MALI_LPAE)
486 pte |= ARM_LPAE_PTE_AF;
487
488 return pte;
489 }
490
arm_lpae_map_pages(struct io_pgtable_ops * ops,unsigned long iova,phys_addr_t paddr,size_t pgsize,size_t pgcount,int iommu_prot,gfp_t gfp,size_t * mapped)491 static int arm_lpae_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
492 phys_addr_t paddr, size_t pgsize, size_t pgcount,
493 int iommu_prot, gfp_t gfp, size_t *mapped)
494 {
495 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
496 struct io_pgtable_cfg *cfg = &data->iop.cfg;
497 arm_lpae_iopte *ptep = data->pgd;
498 int ret, lvl = data->start_level;
499 arm_lpae_iopte prot;
500 long iaext = (s64)iova >> cfg->ias;
501
502 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize))
503 return -EINVAL;
504
505 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
506 iaext = ~iaext;
507 if (WARN_ON(iaext || paddr >> cfg->oas))
508 return -ERANGE;
509
510 /* If no access, then nothing to do */
511 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
512 return 0;
513
514 prot = arm_lpae_prot_to_pte(data, iommu_prot);
515 ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl,
516 ptep, gfp, mapped);
517 /*
518 * Synchronise all PTE updates for the new mapping before there's
519 * a chance for anything to kick off a table walk for the new iova.
520 */
521 wmb();
522
523 return ret;
524 }
525
arm_lpae_map(struct io_pgtable_ops * ops,unsigned long iova,phys_addr_t paddr,size_t size,int iommu_prot,gfp_t gfp)526 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
527 phys_addr_t paddr, size_t size, int iommu_prot, gfp_t gfp)
528 {
529 return arm_lpae_map_pages(ops, iova, paddr, size, 1, iommu_prot, gfp,
530 NULL);
531 }
532
__arm_lpae_free_pgtable(struct arm_lpae_io_pgtable * data,int lvl,arm_lpae_iopte * ptep)533 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
534 arm_lpae_iopte *ptep)
535 {
536 arm_lpae_iopte *start, *end;
537 unsigned long table_size;
538
539 if (lvl == data->start_level)
540 table_size = ARM_LPAE_PGD_SIZE(data);
541 else
542 table_size = ARM_LPAE_GRANULE(data);
543
544 start = ptep;
545
546 /* Only leaf entries at the last level */
547 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
548 end = ptep;
549 else
550 end = (void *)ptep + table_size;
551
552 while (ptep != end) {
553 arm_lpae_iopte pte = *ptep++;
554
555 if (!pte || iopte_leaf(pte, lvl, data->iop.fmt))
556 continue;
557
558 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
559 }
560
561 __arm_lpae_free_pages(start, table_size, &data->iop.cfg);
562 }
563
arm_lpae_free_pgtable(struct io_pgtable * iop)564 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
565 {
566 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
567
568 __arm_lpae_free_pgtable(data, data->start_level, data->pgd);
569 kfree(data);
570 }
571
arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable * data,struct iommu_iotlb_gather * gather,unsigned long iova,size_t size,arm_lpae_iopte blk_pte,int lvl,arm_lpae_iopte * ptep,size_t pgcount)572 static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
573 struct iommu_iotlb_gather *gather,
574 unsigned long iova, size_t size,
575 arm_lpae_iopte blk_pte, int lvl,
576 arm_lpae_iopte *ptep, size_t pgcount)
577 {
578 struct io_pgtable_cfg *cfg = &data->iop.cfg;
579 arm_lpae_iopte pte, *tablep;
580 phys_addr_t blk_paddr;
581 size_t tablesz = ARM_LPAE_GRANULE(data);
582 size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
583 int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data);
584 int i, unmap_idx_start = -1, num_entries = 0, max_entries;
585
586 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
587 return 0;
588
589 tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg);
590 if (!tablep)
591 return 0; /* Bytes unmapped */
592
593 if (size == split_sz) {
594 unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
595 max_entries = ptes_per_table - unmap_idx_start;
596 num_entries = min_t(int, pgcount, max_entries);
597 }
598
599 blk_paddr = iopte_to_paddr(blk_pte, data);
600 pte = iopte_prot(blk_pte);
601
602 for (i = 0; i < ptes_per_table; i++, blk_paddr += split_sz) {
603 /* Unmap! */
604 if (i >= unmap_idx_start && i < (unmap_idx_start + num_entries))
605 continue;
606
607 __arm_lpae_init_pte(data, blk_paddr, pte, lvl, 1, &tablep[i]);
608 }
609
610 pte = arm_lpae_install_table(tablep, ptep, blk_pte, data);
611 if (pte != blk_pte) {
612 __arm_lpae_free_pages(tablep, tablesz, cfg);
613 /*
614 * We may race against someone unmapping another part of this
615 * block, but anything else is invalid. We can't misinterpret
616 * a page entry here since we're never at the last level.
617 */
618 if (iopte_type(pte) != ARM_LPAE_PTE_TYPE_TABLE)
619 return 0;
620
621 tablep = iopte_deref(pte, data);
622 } else if (unmap_idx_start >= 0) {
623 for (i = 0; i < num_entries; i++)
624 io_pgtable_tlb_add_page(&data->iop, gather, iova + i * size, size);
625
626 return num_entries * size;
627 }
628
629 return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl, tablep);
630 }
631
__arm_lpae_unmap(struct arm_lpae_io_pgtable * data,struct iommu_iotlb_gather * gather,unsigned long iova,size_t size,size_t pgcount,int lvl,arm_lpae_iopte * ptep)632 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
633 struct iommu_iotlb_gather *gather,
634 unsigned long iova, size_t size, size_t pgcount,
635 int lvl, arm_lpae_iopte *ptep)
636 {
637 arm_lpae_iopte pte;
638 struct io_pgtable *iop = &data->iop;
639 int i = 0, num_entries, max_entries, unmap_idx_start;
640
641 /* Something went horribly wrong and we ran out of page table */
642 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
643 return 0;
644
645 unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
646 ptep += unmap_idx_start;
647 pte = READ_ONCE(*ptep);
648 if (WARN_ON(!pte))
649 return 0;
650
651 /* If the size matches this level, we're in the right place */
652 if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
653 max_entries = ARM_LPAE_PTES_PER_TABLE(data) - unmap_idx_start;
654 num_entries = min_t(int, pgcount, max_entries);
655
656 while (i < num_entries) {
657 pte = READ_ONCE(*ptep);
658 if (WARN_ON(!pte))
659 break;
660
661 __arm_lpae_clear_pte(ptep, &iop->cfg);
662
663 if (!iopte_leaf(pte, lvl, iop->fmt)) {
664 /* Also flush any partial walks */
665 io_pgtable_tlb_flush_walk(iop, iova + i * size, size,
666 ARM_LPAE_GRANULE(data));
667 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
668 } else if (!iommu_iotlb_gather_queued(gather)) {
669 io_pgtable_tlb_add_page(iop, gather, iova + i * size, size);
670 }
671
672 ptep++;
673 i++;
674 }
675
676 return i * size;
677 } else if (iopte_leaf(pte, lvl, iop->fmt)) {
678 /*
679 * Insert a table at the next level to map the old region,
680 * minus the part we want to unmap
681 */
682 return arm_lpae_split_blk_unmap(data, gather, iova, size, pte,
683 lvl + 1, ptep, pgcount);
684 }
685
686 /* Keep on walkin' */
687 ptep = iopte_deref(pte, data);
688 return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl + 1, ptep);
689 }
690
arm_lpae_unmap_pages(struct io_pgtable_ops * ops,unsigned long iova,size_t pgsize,size_t pgcount,struct iommu_iotlb_gather * gather)691 static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova,
692 size_t pgsize, size_t pgcount,
693 struct iommu_iotlb_gather *gather)
694 {
695 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
696 struct io_pgtable_cfg *cfg = &data->iop.cfg;
697 arm_lpae_iopte *ptep = data->pgd;
698 long iaext = (s64)iova >> cfg->ias;
699
700 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount))
701 return 0;
702
703 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
704 iaext = ~iaext;
705 if (WARN_ON(iaext))
706 return 0;
707
708 return __arm_lpae_unmap(data, gather, iova, pgsize, pgcount,
709 data->start_level, ptep);
710 }
711
arm_lpae_unmap(struct io_pgtable_ops * ops,unsigned long iova,size_t size,struct iommu_iotlb_gather * gather)712 static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
713 size_t size, struct iommu_iotlb_gather *gather)
714 {
715 return arm_lpae_unmap_pages(ops, iova, size, 1, gather);
716 }
717
arm_lpae_iova_to_phys(struct io_pgtable_ops * ops,unsigned long iova)718 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
719 unsigned long iova)
720 {
721 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
722 arm_lpae_iopte pte, *ptep = data->pgd;
723 int lvl = data->start_level;
724
725 do {
726 /* Valid IOPTE pointer? */
727 if (!ptep)
728 return 0;
729
730 /* Grab the IOPTE we're interested in */
731 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
732 pte = READ_ONCE(*ptep);
733
734 /* Valid entry? */
735 if (!pte)
736 return 0;
737
738 /* Leaf entry? */
739 if (iopte_leaf(pte, lvl, data->iop.fmt))
740 goto found_translation;
741
742 /* Take it to the next level */
743 ptep = iopte_deref(pte, data);
744 } while (++lvl < ARM_LPAE_MAX_LEVELS);
745
746 /* Ran out of page tables to walk */
747 return 0;
748
749 found_translation:
750 iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
751 return iopte_to_paddr(pte, data) | iova;
752 }
753
arm_lpae_restrict_pgsizes(struct io_pgtable_cfg * cfg)754 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
755 {
756 unsigned long granule, page_sizes;
757 unsigned int max_addr_bits = 48;
758
759 /*
760 * We need to restrict the supported page sizes to match the
761 * translation regime for a particular granule. Aim to match
762 * the CPU page size if possible, otherwise prefer smaller sizes.
763 * While we're at it, restrict the block sizes to match the
764 * chosen granule.
765 */
766 if (cfg->pgsize_bitmap & PAGE_SIZE)
767 granule = PAGE_SIZE;
768 else if (cfg->pgsize_bitmap & ~PAGE_MASK)
769 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
770 else if (cfg->pgsize_bitmap & PAGE_MASK)
771 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
772 else
773 granule = 0;
774
775 switch (granule) {
776 case SZ_4K:
777 page_sizes = (SZ_4K | SZ_2M | SZ_1G);
778 break;
779 case SZ_16K:
780 page_sizes = (SZ_16K | SZ_32M);
781 break;
782 case SZ_64K:
783 max_addr_bits = 52;
784 page_sizes = (SZ_64K | SZ_512M);
785 if (cfg->oas > 48)
786 page_sizes |= 1ULL << 42; /* 4TB */
787 break;
788 default:
789 page_sizes = 0;
790 }
791
792 cfg->pgsize_bitmap &= page_sizes;
793 cfg->ias = min(cfg->ias, max_addr_bits);
794 cfg->oas = min(cfg->oas, max_addr_bits);
795 }
796
797 static struct arm_lpae_io_pgtable *
arm_lpae_alloc_pgtable(struct io_pgtable_cfg * cfg)798 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
799 {
800 struct arm_lpae_io_pgtable *data;
801 int levels, va_bits, pg_shift;
802
803 arm_lpae_restrict_pgsizes(cfg);
804
805 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
806 return NULL;
807
808 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
809 return NULL;
810
811 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
812 return NULL;
813
814 data = kmalloc(sizeof(*data), GFP_KERNEL);
815 if (!data)
816 return NULL;
817
818 pg_shift = __ffs(cfg->pgsize_bitmap);
819 data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte));
820
821 va_bits = cfg->ias - pg_shift;
822 levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
823 data->start_level = ARM_LPAE_MAX_LEVELS - levels;
824
825 /* Calculate the actual size of our pgd (without concatenation) */
826 data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1));
827
828 data->iop.ops = (struct io_pgtable_ops) {
829 .map = arm_lpae_map,
830 .map_pages = arm_lpae_map_pages,
831 .unmap = arm_lpae_unmap,
832 .unmap_pages = arm_lpae_unmap_pages,
833 .iova_to_phys = arm_lpae_iova_to_phys,
834 };
835
836 return data;
837 }
838
839 static struct io_pgtable *
arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg * cfg,void * cookie)840 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
841 {
842 u64 reg;
843 struct arm_lpae_io_pgtable *data;
844 typeof(&cfg->arm_lpae_s1_cfg.tcr) tcr = &cfg->arm_lpae_s1_cfg.tcr;
845 bool tg1;
846
847 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
848 IO_PGTABLE_QUIRK_ARM_TTBR1 |
849 IO_PGTABLE_QUIRK_ARM_OUTER_WBWA))
850 return NULL;
851
852 data = arm_lpae_alloc_pgtable(cfg);
853 if (!data)
854 return NULL;
855
856 /* TCR */
857 if (cfg->coherent_walk) {
858 tcr->sh = ARM_LPAE_TCR_SH_IS;
859 tcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
860 tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
861 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA)
862 goto out_free_data;
863 } else {
864 tcr->sh = ARM_LPAE_TCR_SH_OS;
865 tcr->irgn = ARM_LPAE_TCR_RGN_NC;
866 if (!(cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA))
867 tcr->orgn = ARM_LPAE_TCR_RGN_NC;
868 else
869 tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
870 }
871
872 tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1;
873 switch (ARM_LPAE_GRANULE(data)) {
874 case SZ_4K:
875 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_4K : ARM_LPAE_TCR_TG0_4K;
876 break;
877 case SZ_16K:
878 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_16K : ARM_LPAE_TCR_TG0_16K;
879 break;
880 case SZ_64K:
881 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_64K : ARM_LPAE_TCR_TG0_64K;
882 break;
883 }
884
885 switch (cfg->oas) {
886 case 32:
887 tcr->ips = ARM_LPAE_TCR_PS_32_BIT;
888 break;
889 case 36:
890 tcr->ips = ARM_LPAE_TCR_PS_36_BIT;
891 break;
892 case 40:
893 tcr->ips = ARM_LPAE_TCR_PS_40_BIT;
894 break;
895 case 42:
896 tcr->ips = ARM_LPAE_TCR_PS_42_BIT;
897 break;
898 case 44:
899 tcr->ips = ARM_LPAE_TCR_PS_44_BIT;
900 break;
901 case 48:
902 tcr->ips = ARM_LPAE_TCR_PS_48_BIT;
903 break;
904 case 52:
905 tcr->ips = ARM_LPAE_TCR_PS_52_BIT;
906 break;
907 default:
908 goto out_free_data;
909 }
910
911 tcr->tsz = 64ULL - cfg->ias;
912
913 /* MAIRs */
914 reg = (ARM_LPAE_MAIR_ATTR_NC
915 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
916 (ARM_LPAE_MAIR_ATTR_WBRWA
917 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
918 (ARM_LPAE_MAIR_ATTR_DEVICE
919 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) |
920 (ARM_LPAE_MAIR_ATTR_INC_OWBRWA
921 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE)) |
922 (ARM_LPAE_MAIR_ATTR_INC_OWBRANWA
923 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE_NWA)) |
924 (ARM_LPAE_MAIR_ATTR_IWBRWA_OWBRANWA
925 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_ICACHE_OCACHE_NWA));
926
927 cfg->arm_lpae_s1_cfg.mair = reg;
928
929 /* Looking good; allocate a pgd */
930 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
931 GFP_KERNEL, cfg);
932 if (!data->pgd)
933 goto out_free_data;
934
935 /* Ensure the empty pgd is visible before any actual TTBR write */
936 wmb();
937
938 /* TTBR */
939 cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd);
940 return &data->iop;
941
942 out_free_data:
943 kfree(data);
944 return NULL;
945 }
946
947 static struct io_pgtable *
arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg * cfg,void * cookie)948 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
949 {
950 u64 sl;
951 struct arm_lpae_io_pgtable *data;
952 typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr;
953
954 /* The NS quirk doesn't apply at stage 2 */
955 if (cfg->quirks)
956 return NULL;
957
958 data = arm_lpae_alloc_pgtable(cfg);
959 if (!data)
960 return NULL;
961
962 /*
963 * Concatenate PGDs at level 1 if possible in order to reduce
964 * the depth of the stage-2 walk.
965 */
966 if (data->start_level == 0) {
967 unsigned long pgd_pages;
968
969 pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
970 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
971 data->pgd_bits += data->bits_per_level;
972 data->start_level++;
973 }
974 }
975
976 /* VTCR */
977 if (cfg->coherent_walk) {
978 vtcr->sh = ARM_LPAE_TCR_SH_IS;
979 vtcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
980 vtcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
981 } else {
982 vtcr->sh = ARM_LPAE_TCR_SH_OS;
983 vtcr->irgn = ARM_LPAE_TCR_RGN_NC;
984 vtcr->orgn = ARM_LPAE_TCR_RGN_NC;
985 }
986
987 sl = data->start_level;
988
989 switch (ARM_LPAE_GRANULE(data)) {
990 case SZ_4K:
991 vtcr->tg = ARM_LPAE_TCR_TG0_4K;
992 sl++; /* SL0 format is different for 4K granule size */
993 break;
994 case SZ_16K:
995 vtcr->tg = ARM_LPAE_TCR_TG0_16K;
996 break;
997 case SZ_64K:
998 vtcr->tg = ARM_LPAE_TCR_TG0_64K;
999 break;
1000 }
1001
1002 switch (cfg->oas) {
1003 case 32:
1004 vtcr->ps = ARM_LPAE_TCR_PS_32_BIT;
1005 break;
1006 case 36:
1007 vtcr->ps = ARM_LPAE_TCR_PS_36_BIT;
1008 break;
1009 case 40:
1010 vtcr->ps = ARM_LPAE_TCR_PS_40_BIT;
1011 break;
1012 case 42:
1013 vtcr->ps = ARM_LPAE_TCR_PS_42_BIT;
1014 break;
1015 case 44:
1016 vtcr->ps = ARM_LPAE_TCR_PS_44_BIT;
1017 break;
1018 case 48:
1019 vtcr->ps = ARM_LPAE_TCR_PS_48_BIT;
1020 break;
1021 case 52:
1022 vtcr->ps = ARM_LPAE_TCR_PS_52_BIT;
1023 break;
1024 default:
1025 goto out_free_data;
1026 }
1027
1028 vtcr->tsz = 64ULL - cfg->ias;
1029 vtcr->sl = ~sl & ARM_LPAE_VTCR_SL0_MASK;
1030
1031 /* Allocate pgd pages */
1032 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
1033 GFP_KERNEL, cfg);
1034 if (!data->pgd)
1035 goto out_free_data;
1036
1037 /* Ensure the empty pgd is visible before any actual TTBR write */
1038 wmb();
1039
1040 /* VTTBR */
1041 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
1042 return &data->iop;
1043
1044 out_free_data:
1045 kfree(data);
1046 return NULL;
1047 }
1048
1049 static struct io_pgtable *
arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg * cfg,void * cookie)1050 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
1051 {
1052 if (cfg->ias > 32 || cfg->oas > 40)
1053 return NULL;
1054
1055 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1056 return arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
1057 }
1058
1059 static struct io_pgtable *
arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg * cfg,void * cookie)1060 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
1061 {
1062 if (cfg->ias > 40 || cfg->oas > 40)
1063 return NULL;
1064
1065 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1066 return arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
1067 }
1068
1069 static struct io_pgtable *
arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg * cfg,void * cookie)1070 arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
1071 {
1072 struct arm_lpae_io_pgtable *data;
1073
1074 /* No quirks for Mali (hopefully) */
1075 if (cfg->quirks)
1076 return NULL;
1077
1078 if (cfg->ias > 48 || cfg->oas > 40)
1079 return NULL;
1080
1081 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1082
1083 data = arm_lpae_alloc_pgtable(cfg);
1084 if (!data)
1085 return NULL;
1086
1087 /* Mali seems to need a full 4-level table regardless of IAS */
1088 if (data->start_level > 0) {
1089 data->start_level = 0;
1090 data->pgd_bits = 0;
1091 }
1092 /*
1093 * MEMATTR: Mali has no actual notion of a non-cacheable type, so the
1094 * best we can do is mimic the out-of-tree driver and hope that the
1095 * "implementation-defined caching policy" is good enough. Similarly,
1096 * we'll use it for the sake of a valid attribute for our 'device'
1097 * index, although callers should never request that in practice.
1098 */
1099 cfg->arm_mali_lpae_cfg.memattr =
1100 (ARM_MALI_LPAE_MEMATTR_IMP_DEF
1101 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
1102 (ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC
1103 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
1104 (ARM_MALI_LPAE_MEMATTR_IMP_DEF
1105 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
1106
1107 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
1108 cfg);
1109 if (!data->pgd)
1110 goto out_free_data;
1111
1112 /* Ensure the empty pgd is visible before TRANSTAB can be written */
1113 wmb();
1114
1115 cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
1116 ARM_MALI_LPAE_TTBR_READ_INNER |
1117 ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
1118 if (cfg->coherent_walk)
1119 cfg->arm_mali_lpae_cfg.transtab |= ARM_MALI_LPAE_TTBR_SHARE_OUTER;
1120
1121 return &data->iop;
1122
1123 out_free_data:
1124 kfree(data);
1125 return NULL;
1126 }
1127
1128 static struct io_pgtable *
apple_dart_alloc_pgtable(struct io_pgtable_cfg * cfg,void * cookie)1129 apple_dart_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
1130 {
1131 struct arm_lpae_io_pgtable *data;
1132 int i;
1133
1134 if (cfg->oas > 36)
1135 return NULL;
1136
1137 data = arm_lpae_alloc_pgtable(cfg);
1138 if (!data)
1139 return NULL;
1140
1141 /*
1142 * The table format itself always uses two levels, but the total VA
1143 * space is mapped by four separate tables, making the MMIO registers
1144 * an effective "level 1". For simplicity, though, we treat this
1145 * equivalently to LPAE stage 2 concatenation at level 2, with the
1146 * additional TTBRs each just pointing at consecutive pages.
1147 */
1148 if (data->start_level < 1)
1149 goto out_free_data;
1150 if (data->start_level == 1 && data->pgd_bits > 2)
1151 goto out_free_data;
1152 if (data->start_level > 1)
1153 data->pgd_bits = 0;
1154 data->start_level = 2;
1155 cfg->apple_dart_cfg.n_ttbrs = 1 << data->pgd_bits;
1156 data->pgd_bits += data->bits_per_level;
1157
1158 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
1159 cfg);
1160 if (!data->pgd)
1161 goto out_free_data;
1162
1163 for (i = 0; i < cfg->apple_dart_cfg.n_ttbrs; ++i)
1164 cfg->apple_dart_cfg.ttbr[i] =
1165 virt_to_phys(data->pgd + i * ARM_LPAE_GRANULE(data));
1166
1167 return &data->iop;
1168
1169 out_free_data:
1170 kfree(data);
1171 return NULL;
1172 }
1173
1174 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
1175 .alloc = arm_64_lpae_alloc_pgtable_s1,
1176 .free = arm_lpae_free_pgtable,
1177 };
1178
1179 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
1180 .alloc = arm_64_lpae_alloc_pgtable_s2,
1181 .free = arm_lpae_free_pgtable,
1182 };
1183
1184 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
1185 .alloc = arm_32_lpae_alloc_pgtable_s1,
1186 .free = arm_lpae_free_pgtable,
1187 };
1188
1189 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
1190 .alloc = arm_32_lpae_alloc_pgtable_s2,
1191 .free = arm_lpae_free_pgtable,
1192 };
1193
1194 struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
1195 .alloc = arm_mali_lpae_alloc_pgtable,
1196 .free = arm_lpae_free_pgtable,
1197 };
1198
1199 struct io_pgtable_init_fns io_pgtable_apple_dart_init_fns = {
1200 .alloc = apple_dart_alloc_pgtable,
1201 .free = arm_lpae_free_pgtable,
1202 };
1203
1204 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
1205
1206 static struct io_pgtable_cfg *cfg_cookie __initdata;
1207
dummy_tlb_flush_all(void * cookie)1208 static void __init dummy_tlb_flush_all(void *cookie)
1209 {
1210 WARN_ON(cookie != cfg_cookie);
1211 }
1212
dummy_tlb_flush(unsigned long iova,size_t size,size_t granule,void * cookie)1213 static void __init dummy_tlb_flush(unsigned long iova, size_t size,
1214 size_t granule, void *cookie)
1215 {
1216 WARN_ON(cookie != cfg_cookie);
1217 WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
1218 }
1219
dummy_tlb_add_page(struct iommu_iotlb_gather * gather,unsigned long iova,size_t granule,void * cookie)1220 static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
1221 unsigned long iova, size_t granule,
1222 void *cookie)
1223 {
1224 dummy_tlb_flush(iova, granule, granule, cookie);
1225 }
1226
1227 static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
1228 .tlb_flush_all = dummy_tlb_flush_all,
1229 .tlb_flush_walk = dummy_tlb_flush,
1230 .tlb_add_page = dummy_tlb_add_page,
1231 };
1232
arm_lpae_dump_ops(struct io_pgtable_ops * ops)1233 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
1234 {
1235 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
1236 struct io_pgtable_cfg *cfg = &data->iop.cfg;
1237
1238 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1239 cfg->pgsize_bitmap, cfg->ias);
1240 pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n",
1241 ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data),
1242 ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd);
1243 }
1244
1245 #define __FAIL(ops, i) ({ \
1246 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
1247 arm_lpae_dump_ops(ops); \
1248 selftest_running = false; \
1249 -EFAULT; \
1250 })
1251
arm_lpae_run_tests(struct io_pgtable_cfg * cfg)1252 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
1253 {
1254 static const enum io_pgtable_fmt fmts[] __initconst = {
1255 ARM_64_LPAE_S1,
1256 ARM_64_LPAE_S2,
1257 };
1258
1259 int i, j;
1260 unsigned long iova;
1261 size_t size;
1262 struct io_pgtable_ops *ops;
1263
1264 selftest_running = true;
1265
1266 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
1267 cfg_cookie = cfg;
1268 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
1269 if (!ops) {
1270 pr_err("selftest: failed to allocate io pgtable ops\n");
1271 return -ENOMEM;
1272 }
1273
1274 /*
1275 * Initial sanity checks.
1276 * Empty page tables shouldn't provide any translations.
1277 */
1278 if (ops->iova_to_phys(ops, 42))
1279 return __FAIL(ops, i);
1280
1281 if (ops->iova_to_phys(ops, SZ_1G + 42))
1282 return __FAIL(ops, i);
1283
1284 if (ops->iova_to_phys(ops, SZ_2G + 42))
1285 return __FAIL(ops, i);
1286
1287 /*
1288 * Distinct mappings of different granule sizes.
1289 */
1290 iova = 0;
1291 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1292 size = 1UL << j;
1293
1294 if (ops->map(ops, iova, iova, size, IOMMU_READ |
1295 IOMMU_WRITE |
1296 IOMMU_NOEXEC |
1297 IOMMU_CACHE, GFP_KERNEL))
1298 return __FAIL(ops, i);
1299
1300 /* Overlapping mappings */
1301 if (!ops->map(ops, iova, iova + size, size,
1302 IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL))
1303 return __FAIL(ops, i);
1304
1305 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1306 return __FAIL(ops, i);
1307
1308 iova += SZ_1G;
1309 }
1310
1311 /* Partial unmap */
1312 size = 1UL << __ffs(cfg->pgsize_bitmap);
1313 if (ops->unmap(ops, SZ_1G + size, size, NULL) != size)
1314 return __FAIL(ops, i);
1315
1316 /* Remap of partial unmap */
1317 if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ, GFP_KERNEL))
1318 return __FAIL(ops, i);
1319
1320 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
1321 return __FAIL(ops, i);
1322
1323 /* Full unmap */
1324 iova = 0;
1325 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1326 size = 1UL << j;
1327
1328 if (ops->unmap(ops, iova, size, NULL) != size)
1329 return __FAIL(ops, i);
1330
1331 if (ops->iova_to_phys(ops, iova + 42))
1332 return __FAIL(ops, i);
1333
1334 /* Remap full block */
1335 if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL))
1336 return __FAIL(ops, i);
1337
1338 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1339 return __FAIL(ops, i);
1340
1341 iova += SZ_1G;
1342 }
1343
1344 free_io_pgtable_ops(ops);
1345 }
1346
1347 selftest_running = false;
1348 return 0;
1349 }
1350
arm_lpae_do_selftests(void)1351 static int __init arm_lpae_do_selftests(void)
1352 {
1353 static const unsigned long pgsize[] __initconst = {
1354 SZ_4K | SZ_2M | SZ_1G,
1355 SZ_16K | SZ_32M,
1356 SZ_64K | SZ_512M,
1357 };
1358
1359 static const unsigned int ias[] __initconst = {
1360 32, 36, 40, 42, 44, 48,
1361 };
1362
1363 int i, j, pass = 0, fail = 0;
1364 struct io_pgtable_cfg cfg = {
1365 .tlb = &dummy_tlb_ops,
1366 .oas = 48,
1367 .coherent_walk = true,
1368 };
1369
1370 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1371 for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1372 cfg.pgsize_bitmap = pgsize[i];
1373 cfg.ias = ias[j];
1374 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1375 pgsize[i], ias[j]);
1376 if (arm_lpae_run_tests(&cfg))
1377 fail++;
1378 else
1379 pass++;
1380 }
1381 }
1382
1383 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1384 return fail ? -EFAULT : 0;
1385 }
1386 subsys_initcall(arm_lpae_do_selftests);
1387 #endif
1388