1 /*
2 * CPU-agnostic ARM page table allocator.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 * Copyright (C) 2014 ARM Limited
17 *
18 * Author: Will Deacon <will.deacon@arm.com>
19 */
20
21 #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
22
23 #include <linux/iommu.h>
24 #include <linux/kernel.h>
25 #include <linux/sizes.h>
26 #include <linux/slab.h>
27 #include <linux/types.h>
28
29 #include <asm/barrier.h>
30
31 #include "io-pgtable.h"
32
33 #define ARM_LPAE_MAX_ADDR_BITS 48
34 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
35 #define ARM_LPAE_MAX_LEVELS 4
36
37 /* Struct accessors */
38 #define io_pgtable_to_data(x) \
39 container_of((x), struct arm_lpae_io_pgtable, iop)
40
41 #define io_pgtable_ops_to_pgtable(x) \
42 container_of((x), struct io_pgtable, ops)
43
44 #define io_pgtable_ops_to_data(x) \
45 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
46
47 /*
48 * For consistency with the architecture, we always consider
49 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
50 */
51 #define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
52
53 /*
54 * Calculate the right shift amount to get to the portion describing level l
55 * in a virtual address mapped by the pagetable in d.
56 */
57 #define ARM_LPAE_LVL_SHIFT(l,d) \
58 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
59 * (d)->bits_per_level) + (d)->pg_shift)
60
61 #define ARM_LPAE_PAGES_PER_PGD(d) \
62 DIV_ROUND_UP((d)->pgd_size, 1UL << (d)->pg_shift)
63
64 /*
65 * Calculate the index at level l used to map virtual address a using the
66 * pagetable in d.
67 */
68 #define ARM_LPAE_PGD_IDX(l,d) \
69 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
70
71 #define ARM_LPAE_LVL_IDX(a,l,d) \
72 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
73 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
74
75 /* Calculate the block/page mapping size at level l for pagetable in d. */
76 #define ARM_LPAE_BLOCK_SIZE(l,d) \
77 (1 << (ilog2(sizeof(arm_lpae_iopte)) + \
78 ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
79
80 /* Page table bits */
81 #define ARM_LPAE_PTE_TYPE_SHIFT 0
82 #define ARM_LPAE_PTE_TYPE_MASK 0x3
83
84 #define ARM_LPAE_PTE_TYPE_BLOCK 1
85 #define ARM_LPAE_PTE_TYPE_TABLE 3
86 #define ARM_LPAE_PTE_TYPE_PAGE 3
87
88 #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
89 #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
90 #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
91 #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
92 #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
93 #define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
94 #define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
95 #define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
96
97 #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
98 /* Ignore the contiguous bit for block splitting */
99 #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
100 #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
101 ARM_LPAE_PTE_ATTR_HI_MASK)
102
103 /* Stage-1 PTE */
104 #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
105 #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
106 #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
107 #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
108
109 /* Stage-2 PTE */
110 #define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
111 #define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
112 #define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
113 #define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
114 #define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
115 #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
116
117 /* Register bits */
118 #define ARM_32_LPAE_TCR_EAE (1 << 31)
119 #define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
120
121 #define ARM_LPAE_TCR_EPD1 (1 << 23)
122
123 #define ARM_LPAE_TCR_TG0_4K (0 << 14)
124 #define ARM_LPAE_TCR_TG0_64K (1 << 14)
125 #define ARM_LPAE_TCR_TG0_16K (2 << 14)
126
127 #define ARM_LPAE_TCR_SH0_SHIFT 12
128 #define ARM_LPAE_TCR_SH0_MASK 0x3
129 #define ARM_LPAE_TCR_SH_NS 0
130 #define ARM_LPAE_TCR_SH_OS 2
131 #define ARM_LPAE_TCR_SH_IS 3
132
133 #define ARM_LPAE_TCR_ORGN0_SHIFT 10
134 #define ARM_LPAE_TCR_IRGN0_SHIFT 8
135 #define ARM_LPAE_TCR_RGN_MASK 0x3
136 #define ARM_LPAE_TCR_RGN_NC 0
137 #define ARM_LPAE_TCR_RGN_WBWA 1
138 #define ARM_LPAE_TCR_RGN_WT 2
139 #define ARM_LPAE_TCR_RGN_WB 3
140
141 #define ARM_LPAE_TCR_SL0_SHIFT 6
142 #define ARM_LPAE_TCR_SL0_MASK 0x3
143
144 #define ARM_LPAE_TCR_T0SZ_SHIFT 0
145 #define ARM_LPAE_TCR_SZ_MASK 0xf
146
147 #define ARM_LPAE_TCR_PS_SHIFT 16
148 #define ARM_LPAE_TCR_PS_MASK 0x7
149
150 #define ARM_LPAE_TCR_IPS_SHIFT 32
151 #define ARM_LPAE_TCR_IPS_MASK 0x7
152
153 #define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
154 #define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
155 #define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
156 #define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
157 #define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
158 #define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
159
160 #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
161 #define ARM_LPAE_MAIR_ATTR_MASK 0xff
162 #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
163 #define ARM_LPAE_MAIR_ATTR_NC 0x44
164 #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
165 #define ARM_LPAE_MAIR_ATTR_IDX_NC 0
166 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
167 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
168
169 /* IOPTE accessors */
170 #define iopte_deref(pte,d) \
171 (__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \
172 & ~((1ULL << (d)->pg_shift) - 1)))
173
174 #define iopte_type(pte,l) \
175 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
176
177 #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
178
179 #define iopte_leaf(pte,l) \
180 (l == (ARM_LPAE_MAX_LEVELS - 1) ? \
181 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \
182 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
183
184 #define iopte_to_pfn(pte,d) \
185 (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift)
186
187 #define pfn_to_iopte(pfn,d) \
188 (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1))
189
190 struct arm_lpae_io_pgtable {
191 struct io_pgtable iop;
192
193 int levels;
194 size_t pgd_size;
195 unsigned long pg_shift;
196 unsigned long bits_per_level;
197
198 void *pgd;
199 };
200
201 typedef u64 arm_lpae_iopte;
202
203 static bool selftest_running = false;
204
__arm_lpae_dma_addr(void * pages)205 static dma_addr_t __arm_lpae_dma_addr(void *pages)
206 {
207 return (dma_addr_t)virt_to_phys(pages);
208 }
209
__arm_lpae_alloc_pages(size_t size,gfp_t gfp,struct io_pgtable_cfg * cfg)210 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
211 struct io_pgtable_cfg *cfg)
212 {
213 struct device *dev = cfg->iommu_dev;
214 dma_addr_t dma;
215 void *pages = alloc_pages_exact(size, gfp | __GFP_ZERO);
216
217 if (!pages)
218 return NULL;
219
220 if (!selftest_running) {
221 dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
222 if (dma_mapping_error(dev, dma))
223 goto out_free;
224 /*
225 * We depend on the IOMMU being able to work with any physical
226 * address directly, so if the DMA layer suggests otherwise by
227 * translating or truncating them, that bodes very badly...
228 */
229 if (dma != virt_to_phys(pages))
230 goto out_unmap;
231 }
232
233 return pages;
234
235 out_unmap:
236 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
237 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
238 out_free:
239 free_pages_exact(pages, size);
240 return NULL;
241 }
242
__arm_lpae_free_pages(void * pages,size_t size,struct io_pgtable_cfg * cfg)243 static void __arm_lpae_free_pages(void *pages, size_t size,
244 struct io_pgtable_cfg *cfg)
245 {
246 if (!selftest_running)
247 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
248 size, DMA_TO_DEVICE);
249 free_pages_exact(pages, size);
250 }
251
__arm_lpae_set_pte(arm_lpae_iopte * ptep,arm_lpae_iopte pte,struct io_pgtable_cfg * cfg)252 static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
253 struct io_pgtable_cfg *cfg)
254 {
255 *ptep = pte;
256
257 if (!selftest_running)
258 dma_sync_single_for_device(cfg->iommu_dev,
259 __arm_lpae_dma_addr(ptep),
260 sizeof(pte), DMA_TO_DEVICE);
261 }
262
263 static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
264 unsigned long iova, size_t size, int lvl,
265 arm_lpae_iopte *ptep);
266
arm_lpae_init_pte(struct arm_lpae_io_pgtable * data,unsigned long iova,phys_addr_t paddr,arm_lpae_iopte prot,int lvl,arm_lpae_iopte * ptep)267 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
268 unsigned long iova, phys_addr_t paddr,
269 arm_lpae_iopte prot, int lvl,
270 arm_lpae_iopte *ptep)
271 {
272 arm_lpae_iopte pte = prot;
273 struct io_pgtable_cfg *cfg = &data->iop.cfg;
274
275 if (iopte_leaf(*ptep, lvl)) {
276 /* We require an unmap first */
277 WARN_ON(!selftest_running);
278 return -EEXIST;
279 } else if (iopte_type(*ptep, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
280 /*
281 * We need to unmap and free the old table before
282 * overwriting it with a block entry.
283 */
284 arm_lpae_iopte *tblp;
285 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
286
287 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
288 if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
289 return -EINVAL;
290 }
291
292 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
293 pte |= ARM_LPAE_PTE_NS;
294
295 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
296 pte |= ARM_LPAE_PTE_TYPE_PAGE;
297 else
298 pte |= ARM_LPAE_PTE_TYPE_BLOCK;
299
300 pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
301 pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
302
303 __arm_lpae_set_pte(ptep, pte, cfg);
304 return 0;
305 }
306
__arm_lpae_map(struct arm_lpae_io_pgtable * data,unsigned long iova,phys_addr_t paddr,size_t size,arm_lpae_iopte prot,int lvl,arm_lpae_iopte * ptep)307 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
308 phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
309 int lvl, arm_lpae_iopte *ptep)
310 {
311 arm_lpae_iopte *cptep, pte;
312 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
313 struct io_pgtable_cfg *cfg = &data->iop.cfg;
314
315 /* Find our entry at the current level */
316 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
317
318 /* If we can install a leaf entry at this level, then do so */
319 if (size == block_size && (size & cfg->pgsize_bitmap))
320 return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
321
322 /* We can't allocate tables at the final level */
323 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
324 return -EINVAL;
325
326 /* Grab a pointer to the next level */
327 pte = *ptep;
328 if (!pte) {
329 cptep = __arm_lpae_alloc_pages(1UL << data->pg_shift,
330 GFP_ATOMIC, cfg);
331 if (!cptep)
332 return -ENOMEM;
333
334 pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE;
335 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
336 pte |= ARM_LPAE_PTE_NSTABLE;
337 __arm_lpae_set_pte(ptep, pte, cfg);
338 } else if (!iopte_leaf(pte, lvl)) {
339 cptep = iopte_deref(pte, data);
340 } else {
341 /* We require an unmap first */
342 WARN_ON(!selftest_running);
343 return -EEXIST;
344 }
345
346 /* Rinse, repeat */
347 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
348 }
349
arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable * data,int prot)350 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
351 int prot)
352 {
353 arm_lpae_iopte pte;
354
355 if (data->iop.fmt == ARM_64_LPAE_S1 ||
356 data->iop.fmt == ARM_32_LPAE_S1) {
357 pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG;
358
359 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
360 pte |= ARM_LPAE_PTE_AP_RDONLY;
361
362 if (prot & IOMMU_CACHE)
363 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
364 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
365 } else {
366 pte = ARM_LPAE_PTE_HAP_FAULT;
367 if (prot & IOMMU_READ)
368 pte |= ARM_LPAE_PTE_HAP_READ;
369 if (prot & IOMMU_WRITE)
370 pte |= ARM_LPAE_PTE_HAP_WRITE;
371 if (prot & IOMMU_CACHE)
372 pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
373 else
374 pte |= ARM_LPAE_PTE_MEMATTR_NC;
375 }
376
377 if (prot & IOMMU_NOEXEC)
378 pte |= ARM_LPAE_PTE_XN;
379
380 return pte;
381 }
382
arm_lpae_map(struct io_pgtable_ops * ops,unsigned long iova,phys_addr_t paddr,size_t size,int iommu_prot)383 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
384 phys_addr_t paddr, size_t size, int iommu_prot)
385 {
386 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
387 arm_lpae_iopte *ptep = data->pgd;
388 int ret, lvl = ARM_LPAE_START_LVL(data);
389 arm_lpae_iopte prot;
390
391 /* If no access, then nothing to do */
392 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
393 return 0;
394
395 prot = arm_lpae_prot_to_pte(data, iommu_prot);
396 ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
397 /*
398 * Synchronise all PTE updates for the new mapping before there's
399 * a chance for anything to kick off a table walk for the new iova.
400 */
401 wmb();
402
403 return ret;
404 }
405
__arm_lpae_free_pgtable(struct arm_lpae_io_pgtable * data,int lvl,arm_lpae_iopte * ptep)406 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
407 arm_lpae_iopte *ptep)
408 {
409 arm_lpae_iopte *start, *end;
410 unsigned long table_size;
411
412 if (lvl == ARM_LPAE_START_LVL(data))
413 table_size = data->pgd_size;
414 else
415 table_size = 1UL << data->pg_shift;
416
417 start = ptep;
418
419 /* Only leaf entries at the last level */
420 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
421 end = ptep;
422 else
423 end = (void *)ptep + table_size;
424
425 while (ptep != end) {
426 arm_lpae_iopte pte = *ptep++;
427
428 if (!pte || iopte_leaf(pte, lvl))
429 continue;
430
431 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
432 }
433
434 __arm_lpae_free_pages(start, table_size, &data->iop.cfg);
435 }
436
arm_lpae_free_pgtable(struct io_pgtable * iop)437 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
438 {
439 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
440
441 __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
442 kfree(data);
443 }
444
arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable * data,unsigned long iova,size_t size,arm_lpae_iopte prot,int lvl,arm_lpae_iopte * ptep,size_t blk_size)445 static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
446 unsigned long iova, size_t size,
447 arm_lpae_iopte prot, int lvl,
448 arm_lpae_iopte *ptep, size_t blk_size)
449 {
450 unsigned long blk_start, blk_end;
451 phys_addr_t blk_paddr;
452 arm_lpae_iopte table = 0;
453 struct io_pgtable_cfg *cfg = &data->iop.cfg;
454
455 blk_start = iova & ~(blk_size - 1);
456 blk_end = blk_start + blk_size;
457 blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift;
458
459 for (; blk_start < blk_end; blk_start += size, blk_paddr += size) {
460 arm_lpae_iopte *tablep;
461
462 /* Unmap! */
463 if (blk_start == iova)
464 continue;
465
466 /* __arm_lpae_map expects a pointer to the start of the table */
467 tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data);
468 if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl,
469 tablep) < 0) {
470 if (table) {
471 /* Free the table we allocated */
472 tablep = iopte_deref(table, data);
473 __arm_lpae_free_pgtable(data, lvl + 1, tablep);
474 }
475 return 0; /* Bytes unmapped */
476 }
477 }
478
479 __arm_lpae_set_pte(ptep, table, cfg);
480 iova &= ~(blk_size - 1);
481 cfg->tlb->tlb_add_flush(iova, blk_size, true, data->iop.cookie);
482 return size;
483 }
484
__arm_lpae_unmap(struct arm_lpae_io_pgtable * data,unsigned long iova,size_t size,int lvl,arm_lpae_iopte * ptep)485 static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
486 unsigned long iova, size_t size, int lvl,
487 arm_lpae_iopte *ptep)
488 {
489 arm_lpae_iopte pte;
490 const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
491 void *cookie = data->iop.cookie;
492 size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
493
494 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
495 pte = *ptep;
496
497 /* Something went horribly wrong and we ran out of page table */
498 if (WARN_ON(!pte || (lvl == ARM_LPAE_MAX_LEVELS)))
499 return 0;
500
501 /* If the size matches this level, we're in the right place */
502 if (size == blk_size) {
503 __arm_lpae_set_pte(ptep, 0, &data->iop.cfg);
504
505 if (!iopte_leaf(pte, lvl)) {
506 /* Also flush any partial walks */
507 tlb->tlb_add_flush(iova, size, false, cookie);
508 tlb->tlb_sync(cookie);
509 ptep = iopte_deref(pte, data);
510 __arm_lpae_free_pgtable(data, lvl + 1, ptep);
511 } else {
512 tlb->tlb_add_flush(iova, size, true, cookie);
513 }
514
515 return size;
516 } else if (iopte_leaf(pte, lvl)) {
517 /*
518 * Insert a table at the next level to map the old region,
519 * minus the part we want to unmap
520 */
521 return arm_lpae_split_blk_unmap(data, iova, size,
522 iopte_prot(pte), lvl, ptep,
523 blk_size);
524 }
525
526 /* Keep on walkin' */
527 ptep = iopte_deref(pte, data);
528 return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
529 }
530
arm_lpae_unmap(struct io_pgtable_ops * ops,unsigned long iova,size_t size)531 static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
532 size_t size)
533 {
534 size_t unmapped;
535 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
536 struct io_pgtable *iop = &data->iop;
537 arm_lpae_iopte *ptep = data->pgd;
538 int lvl = ARM_LPAE_START_LVL(data);
539
540 unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
541 if (unmapped)
542 iop->cfg.tlb->tlb_sync(iop->cookie);
543
544 return unmapped;
545 }
546
arm_lpae_iova_to_phys(struct io_pgtable_ops * ops,unsigned long iova)547 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
548 unsigned long iova)
549 {
550 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
551 arm_lpae_iopte pte, *ptep = data->pgd;
552 int lvl = ARM_LPAE_START_LVL(data);
553
554 do {
555 /* Valid IOPTE pointer? */
556 if (!ptep)
557 return 0;
558
559 /* Grab the IOPTE we're interested in */
560 pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data));
561
562 /* Valid entry? */
563 if (!pte)
564 return 0;
565
566 /* Leaf entry? */
567 if (iopte_leaf(pte,lvl))
568 goto found_translation;
569
570 /* Take it to the next level */
571 ptep = iopte_deref(pte, data);
572 } while (++lvl < ARM_LPAE_MAX_LEVELS);
573
574 /* Ran out of page tables to walk */
575 return 0;
576
577 found_translation:
578 iova &= ((1 << data->pg_shift) - 1);
579 return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova;
580 }
581
arm_lpae_restrict_pgsizes(struct io_pgtable_cfg * cfg)582 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
583 {
584 unsigned long granule;
585
586 /*
587 * We need to restrict the supported page sizes to match the
588 * translation regime for a particular granule. Aim to match
589 * the CPU page size if possible, otherwise prefer smaller sizes.
590 * While we're at it, restrict the block sizes to match the
591 * chosen granule.
592 */
593 if (cfg->pgsize_bitmap & PAGE_SIZE)
594 granule = PAGE_SIZE;
595 else if (cfg->pgsize_bitmap & ~PAGE_MASK)
596 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
597 else if (cfg->pgsize_bitmap & PAGE_MASK)
598 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
599 else
600 granule = 0;
601
602 switch (granule) {
603 case SZ_4K:
604 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
605 break;
606 case SZ_16K:
607 cfg->pgsize_bitmap &= (SZ_16K | SZ_32M);
608 break;
609 case SZ_64K:
610 cfg->pgsize_bitmap &= (SZ_64K | SZ_512M);
611 break;
612 default:
613 cfg->pgsize_bitmap = 0;
614 }
615 }
616
617 static struct arm_lpae_io_pgtable *
arm_lpae_alloc_pgtable(struct io_pgtable_cfg * cfg)618 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
619 {
620 unsigned long va_bits, pgd_bits;
621 struct arm_lpae_io_pgtable *data;
622
623 arm_lpae_restrict_pgsizes(cfg);
624
625 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
626 return NULL;
627
628 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
629 return NULL;
630
631 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
632 return NULL;
633
634 if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) {
635 dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n");
636 return NULL;
637 }
638
639 data = kmalloc(sizeof(*data), GFP_KERNEL);
640 if (!data)
641 return NULL;
642
643 data->pg_shift = __ffs(cfg->pgsize_bitmap);
644 data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
645
646 va_bits = cfg->ias - data->pg_shift;
647 data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
648
649 /* Calculate the actual size of our pgd (without concatenation) */
650 pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
651 data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
652
653 data->iop.ops = (struct io_pgtable_ops) {
654 .map = arm_lpae_map,
655 .unmap = arm_lpae_unmap,
656 .iova_to_phys = arm_lpae_iova_to_phys,
657 };
658
659 return data;
660 }
661
662 static struct io_pgtable *
arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg * cfg,void * cookie)663 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
664 {
665 u64 reg;
666 struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
667
668 if (!data)
669 return NULL;
670
671 /* TCR */
672 reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
673 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
674 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
675
676 switch (1 << data->pg_shift) {
677 case SZ_4K:
678 reg |= ARM_LPAE_TCR_TG0_4K;
679 break;
680 case SZ_16K:
681 reg |= ARM_LPAE_TCR_TG0_16K;
682 break;
683 case SZ_64K:
684 reg |= ARM_LPAE_TCR_TG0_64K;
685 break;
686 }
687
688 switch (cfg->oas) {
689 case 32:
690 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
691 break;
692 case 36:
693 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
694 break;
695 case 40:
696 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
697 break;
698 case 42:
699 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
700 break;
701 case 44:
702 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
703 break;
704 case 48:
705 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
706 break;
707 default:
708 goto out_free_data;
709 }
710
711 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
712
713 /* Disable speculative walks through TTBR1 */
714 reg |= ARM_LPAE_TCR_EPD1;
715 cfg->arm_lpae_s1_cfg.tcr = reg;
716
717 /* MAIRs */
718 reg = (ARM_LPAE_MAIR_ATTR_NC
719 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
720 (ARM_LPAE_MAIR_ATTR_WBRWA
721 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
722 (ARM_LPAE_MAIR_ATTR_DEVICE
723 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
724
725 cfg->arm_lpae_s1_cfg.mair[0] = reg;
726 cfg->arm_lpae_s1_cfg.mair[1] = 0;
727
728 /* Looking good; allocate a pgd */
729 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
730 if (!data->pgd)
731 goto out_free_data;
732
733 /* Ensure the empty pgd is visible before any actual TTBR write */
734 wmb();
735
736 /* TTBRs */
737 cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
738 cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
739 return &data->iop;
740
741 out_free_data:
742 kfree(data);
743 return NULL;
744 }
745
746 static struct io_pgtable *
arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg * cfg,void * cookie)747 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
748 {
749 u64 reg, sl;
750 struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg);
751
752 if (!data)
753 return NULL;
754
755 /*
756 * Concatenate PGDs at level 1 if possible in order to reduce
757 * the depth of the stage-2 walk.
758 */
759 if (data->levels == ARM_LPAE_MAX_LEVELS) {
760 unsigned long pgd_pages;
761
762 pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
763 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
764 data->pgd_size = pgd_pages << data->pg_shift;
765 data->levels--;
766 }
767 }
768
769 /* VTCR */
770 reg = ARM_64_LPAE_S2_TCR_RES1 |
771 (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
772 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
773 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
774
775 sl = ARM_LPAE_START_LVL(data);
776
777 switch (1 << data->pg_shift) {
778 case SZ_4K:
779 reg |= ARM_LPAE_TCR_TG0_4K;
780 sl++; /* SL0 format is different for 4K granule size */
781 break;
782 case SZ_16K:
783 reg |= ARM_LPAE_TCR_TG0_16K;
784 break;
785 case SZ_64K:
786 reg |= ARM_LPAE_TCR_TG0_64K;
787 break;
788 }
789
790 switch (cfg->oas) {
791 case 32:
792 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
793 break;
794 case 36:
795 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
796 break;
797 case 40:
798 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
799 break;
800 case 42:
801 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
802 break;
803 case 44:
804 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
805 break;
806 case 48:
807 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
808 break;
809 default:
810 goto out_free_data;
811 }
812
813 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
814 reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
815 cfg->arm_lpae_s2_cfg.vtcr = reg;
816
817 /* Allocate pgd pages */
818 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
819 if (!data->pgd)
820 goto out_free_data;
821
822 /* Ensure the empty pgd is visible before any actual TTBR write */
823 wmb();
824
825 /* VTTBR */
826 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
827 return &data->iop;
828
829 out_free_data:
830 kfree(data);
831 return NULL;
832 }
833
834 static struct io_pgtable *
arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg * cfg,void * cookie)835 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
836 {
837 struct io_pgtable *iop;
838
839 if (cfg->ias > 32 || cfg->oas > 40)
840 return NULL;
841
842 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
843 iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
844 if (iop) {
845 cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
846 cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
847 }
848
849 return iop;
850 }
851
852 static struct io_pgtable *
arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg * cfg,void * cookie)853 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
854 {
855 struct io_pgtable *iop;
856
857 if (cfg->ias > 40 || cfg->oas > 40)
858 return NULL;
859
860 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
861 iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
862 if (iop)
863 cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
864
865 return iop;
866 }
867
868 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
869 .alloc = arm_64_lpae_alloc_pgtable_s1,
870 .free = arm_lpae_free_pgtable,
871 };
872
873 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
874 .alloc = arm_64_lpae_alloc_pgtable_s2,
875 .free = arm_lpae_free_pgtable,
876 };
877
878 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
879 .alloc = arm_32_lpae_alloc_pgtable_s1,
880 .free = arm_lpae_free_pgtable,
881 };
882
883 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
884 .alloc = arm_32_lpae_alloc_pgtable_s2,
885 .free = arm_lpae_free_pgtable,
886 };
887
888 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
889
890 static struct io_pgtable_cfg *cfg_cookie;
891
dummy_tlb_flush_all(void * cookie)892 static void dummy_tlb_flush_all(void *cookie)
893 {
894 WARN_ON(cookie != cfg_cookie);
895 }
896
dummy_tlb_add_flush(unsigned long iova,size_t size,bool leaf,void * cookie)897 static void dummy_tlb_add_flush(unsigned long iova, size_t size, bool leaf,
898 void *cookie)
899 {
900 WARN_ON(cookie != cfg_cookie);
901 WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
902 }
903
dummy_tlb_sync(void * cookie)904 static void dummy_tlb_sync(void *cookie)
905 {
906 WARN_ON(cookie != cfg_cookie);
907 }
908
909 static struct iommu_gather_ops dummy_tlb_ops __initdata = {
910 .tlb_flush_all = dummy_tlb_flush_all,
911 .tlb_add_flush = dummy_tlb_add_flush,
912 .tlb_sync = dummy_tlb_sync,
913 };
914
arm_lpae_dump_ops(struct io_pgtable_ops * ops)915 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
916 {
917 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
918 struct io_pgtable_cfg *cfg = &data->iop.cfg;
919
920 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
921 cfg->pgsize_bitmap, cfg->ias);
922 pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
923 data->levels, data->pgd_size, data->pg_shift,
924 data->bits_per_level, data->pgd);
925 }
926
927 #define __FAIL(ops, i) ({ \
928 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
929 arm_lpae_dump_ops(ops); \
930 selftest_running = false; \
931 -EFAULT; \
932 })
933
arm_lpae_run_tests(struct io_pgtable_cfg * cfg)934 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
935 {
936 static const enum io_pgtable_fmt fmts[] = {
937 ARM_64_LPAE_S1,
938 ARM_64_LPAE_S2,
939 };
940
941 int i, j;
942 unsigned long iova;
943 size_t size;
944 struct io_pgtable_ops *ops;
945
946 selftest_running = true;
947
948 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
949 cfg_cookie = cfg;
950 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
951 if (!ops) {
952 pr_err("selftest: failed to allocate io pgtable ops\n");
953 return -ENOMEM;
954 }
955
956 /*
957 * Initial sanity checks.
958 * Empty page tables shouldn't provide any translations.
959 */
960 if (ops->iova_to_phys(ops, 42))
961 return __FAIL(ops, i);
962
963 if (ops->iova_to_phys(ops, SZ_1G + 42))
964 return __FAIL(ops, i);
965
966 if (ops->iova_to_phys(ops, SZ_2G + 42))
967 return __FAIL(ops, i);
968
969 /*
970 * Distinct mappings of different granule sizes.
971 */
972 iova = 0;
973 j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
974 while (j != BITS_PER_LONG) {
975 size = 1UL << j;
976
977 if (ops->map(ops, iova, iova, size, IOMMU_READ |
978 IOMMU_WRITE |
979 IOMMU_NOEXEC |
980 IOMMU_CACHE))
981 return __FAIL(ops, i);
982
983 /* Overlapping mappings */
984 if (!ops->map(ops, iova, iova + size, size,
985 IOMMU_READ | IOMMU_NOEXEC))
986 return __FAIL(ops, i);
987
988 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
989 return __FAIL(ops, i);
990
991 iova += SZ_1G;
992 j++;
993 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
994 }
995
996 /* Partial unmap */
997 size = 1UL << __ffs(cfg->pgsize_bitmap);
998 if (ops->unmap(ops, SZ_1G + size, size) != size)
999 return __FAIL(ops, i);
1000
1001 /* Remap of partial unmap */
1002 if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
1003 return __FAIL(ops, i);
1004
1005 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
1006 return __FAIL(ops, i);
1007
1008 /* Full unmap */
1009 iova = 0;
1010 j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
1011 while (j != BITS_PER_LONG) {
1012 size = 1UL << j;
1013
1014 if (ops->unmap(ops, iova, size) != size)
1015 return __FAIL(ops, i);
1016
1017 if (ops->iova_to_phys(ops, iova + 42))
1018 return __FAIL(ops, i);
1019
1020 /* Remap full block */
1021 if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
1022 return __FAIL(ops, i);
1023
1024 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1025 return __FAIL(ops, i);
1026
1027 iova += SZ_1G;
1028 j++;
1029 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
1030 }
1031
1032 free_io_pgtable_ops(ops);
1033 }
1034
1035 selftest_running = false;
1036 return 0;
1037 }
1038
arm_lpae_do_selftests(void)1039 static int __init arm_lpae_do_selftests(void)
1040 {
1041 static const unsigned long pgsize[] = {
1042 SZ_4K | SZ_2M | SZ_1G,
1043 SZ_16K | SZ_32M,
1044 SZ_64K | SZ_512M,
1045 };
1046
1047 static const unsigned int ias[] = {
1048 32, 36, 40, 42, 44, 48,
1049 };
1050
1051 int i, j, pass = 0, fail = 0;
1052 struct io_pgtable_cfg cfg = {
1053 .tlb = &dummy_tlb_ops,
1054 .oas = 48,
1055 };
1056
1057 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1058 for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1059 cfg.pgsize_bitmap = pgsize[i];
1060 cfg.ias = ias[j];
1061 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1062 pgsize[i], ias[j]);
1063 if (arm_lpae_run_tests(&cfg))
1064 fail++;
1065 else
1066 pass++;
1067 }
1068 }
1069
1070 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1071 return fail ? -EFAULT : 0;
1072 }
1073 subsys_initcall(arm_lpae_do_selftests);
1074 #endif
1075