1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 // Copyright (C) 2016-2018, Allwinner Technology CO., LTD.
3 // Copyright (C) 2019-2020, Cerno
4
5 #include <linux/bitfield.h>
6 #include <linux/bug.h>
7 #include <linux/clk.h>
8 #include <linux/device.h>
9 #include <linux/dma-direction.h>
10 #include <linux/dma-iommu.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/err.h>
13 #include <linux/errno.h>
14 #include <linux/interrupt.h>
15 #include <linux/iommu.h>
16 #include <linux/iopoll.h>
17 #include <linux/ioport.h>
18 #include <linux/log2.h>
19 #include <linux/module.h>
20 #include <linux/of_platform.h>
21 #include <linux/platform_device.h>
22 #include <linux/pm.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/reset.h>
25 #include <linux/sizes.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/types.h>
29
30 #define IOMMU_RESET_REG 0x010
31 #define IOMMU_RESET_RELEASE_ALL 0xffffffff
32 #define IOMMU_ENABLE_REG 0x020
33 #define IOMMU_ENABLE_ENABLE BIT(0)
34
35 #define IOMMU_BYPASS_REG 0x030
36 #define IOMMU_AUTO_GATING_REG 0x040
37 #define IOMMU_AUTO_GATING_ENABLE BIT(0)
38
39 #define IOMMU_WBUF_CTRL_REG 0x044
40 #define IOMMU_OOO_CTRL_REG 0x048
41 #define IOMMU_4KB_BDY_PRT_CTRL_REG 0x04c
42 #define IOMMU_TTB_REG 0x050
43 #define IOMMU_TLB_ENABLE_REG 0x060
44 #define IOMMU_TLB_PREFETCH_REG 0x070
45 #define IOMMU_TLB_PREFETCH_MASTER_ENABLE(m) BIT(m)
46
47 #define IOMMU_TLB_FLUSH_REG 0x080
48 #define IOMMU_TLB_FLUSH_PTW_CACHE BIT(17)
49 #define IOMMU_TLB_FLUSH_MACRO_TLB BIT(16)
50 #define IOMMU_TLB_FLUSH_MICRO_TLB(i) (BIT(i) & GENMASK(5, 0))
51
52 #define IOMMU_TLB_IVLD_ADDR_REG 0x090
53 #define IOMMU_TLB_IVLD_ADDR_MASK_REG 0x094
54 #define IOMMU_TLB_IVLD_ENABLE_REG 0x098
55 #define IOMMU_TLB_IVLD_ENABLE_ENABLE BIT(0)
56
57 #define IOMMU_PC_IVLD_ADDR_REG 0x0a0
58 #define IOMMU_PC_IVLD_ENABLE_REG 0x0a8
59 #define IOMMU_PC_IVLD_ENABLE_ENABLE BIT(0)
60
61 #define IOMMU_DM_AUT_CTRL_REG(d) (0x0b0 + ((d) / 2) * 4)
62 #define IOMMU_DM_AUT_CTRL_RD_UNAVAIL(d, m) (1 << (((d & 1) * 16) + ((m) * 2)))
63 #define IOMMU_DM_AUT_CTRL_WR_UNAVAIL(d, m) (1 << (((d & 1) * 16) + ((m) * 2) + 1))
64
65 #define IOMMU_DM_AUT_OVWT_REG 0x0d0
66 #define IOMMU_INT_ENABLE_REG 0x100
67 #define IOMMU_INT_CLR_REG 0x104
68 #define IOMMU_INT_STA_REG 0x108
69 #define IOMMU_INT_ERR_ADDR_REG(i) (0x110 + (i) * 4)
70 #define IOMMU_INT_ERR_ADDR_L1_REG 0x130
71 #define IOMMU_INT_ERR_ADDR_L2_REG 0x134
72 #define IOMMU_INT_ERR_DATA_REG(i) (0x150 + (i) * 4)
73 #define IOMMU_L1PG_INT_REG 0x0180
74 #define IOMMU_L2PG_INT_REG 0x0184
75
76 #define IOMMU_INT_INVALID_L2PG BIT(17)
77 #define IOMMU_INT_INVALID_L1PG BIT(16)
78 #define IOMMU_INT_MASTER_PERMISSION(m) BIT(m)
79 #define IOMMU_INT_MASTER_MASK (IOMMU_INT_MASTER_PERMISSION(0) | \
80 IOMMU_INT_MASTER_PERMISSION(1) | \
81 IOMMU_INT_MASTER_PERMISSION(2) | \
82 IOMMU_INT_MASTER_PERMISSION(3) | \
83 IOMMU_INT_MASTER_PERMISSION(4) | \
84 IOMMU_INT_MASTER_PERMISSION(5))
85 #define IOMMU_INT_MASK (IOMMU_INT_INVALID_L1PG | \
86 IOMMU_INT_INVALID_L2PG | \
87 IOMMU_INT_MASTER_MASK)
88
89 #define PT_ENTRY_SIZE sizeof(u32)
90
91 #define NUM_DT_ENTRIES 4096
92 #define DT_SIZE (NUM_DT_ENTRIES * PT_ENTRY_SIZE)
93
94 #define NUM_PT_ENTRIES 256
95 #define PT_SIZE (NUM_PT_ENTRIES * PT_ENTRY_SIZE)
96
97 struct sun50i_iommu {
98 struct iommu_device iommu;
99
100 /* Lock to modify the IOMMU registers */
101 spinlock_t iommu_lock;
102
103 struct device *dev;
104 void __iomem *base;
105 struct reset_control *reset;
106 struct clk *clk;
107
108 struct iommu_domain *domain;
109 struct iommu_group *group;
110 struct kmem_cache *pt_pool;
111 };
112
113 struct sun50i_iommu_domain {
114 struct iommu_domain domain;
115
116 /* Number of devices attached to the domain */
117 refcount_t refcnt;
118
119 /* L1 Page Table */
120 u32 *dt;
121 dma_addr_t dt_dma;
122
123 struct sun50i_iommu *iommu;
124 };
125
to_sun50i_domain(struct iommu_domain * domain)126 static struct sun50i_iommu_domain *to_sun50i_domain(struct iommu_domain *domain)
127 {
128 return container_of(domain, struct sun50i_iommu_domain, domain);
129 }
130
sun50i_iommu_from_dev(struct device * dev)131 static struct sun50i_iommu *sun50i_iommu_from_dev(struct device *dev)
132 {
133 return dev_iommu_priv_get(dev);
134 }
135
iommu_read(struct sun50i_iommu * iommu,u32 offset)136 static u32 iommu_read(struct sun50i_iommu *iommu, u32 offset)
137 {
138 return readl(iommu->base + offset);
139 }
140
iommu_write(struct sun50i_iommu * iommu,u32 offset,u32 value)141 static void iommu_write(struct sun50i_iommu *iommu, u32 offset, u32 value)
142 {
143 writel(value, iommu->base + offset);
144 }
145
146 /*
147 * The Allwinner H6 IOMMU uses a 2-level page table.
148 *
149 * The first level is the usual Directory Table (DT), that consists of
150 * 4096 4-bytes Directory Table Entries (DTE), each pointing to a Page
151 * Table (PT).
152 *
153 * Each PT consits of 256 4-bytes Page Table Entries (PTE), each
154 * pointing to a 4kB page of physical memory.
155 *
156 * The IOMMU supports a single DT, pointed by the IOMMU_TTB_REG
157 * register that contains its physical address.
158 */
159
160 #define SUN50I_IOVA_DTE_MASK GENMASK(31, 20)
161 #define SUN50I_IOVA_PTE_MASK GENMASK(19, 12)
162 #define SUN50I_IOVA_PAGE_MASK GENMASK(11, 0)
163
sun50i_iova_get_dte_index(dma_addr_t iova)164 static u32 sun50i_iova_get_dte_index(dma_addr_t iova)
165 {
166 return FIELD_GET(SUN50I_IOVA_DTE_MASK, iova);
167 }
168
sun50i_iova_get_pte_index(dma_addr_t iova)169 static u32 sun50i_iova_get_pte_index(dma_addr_t iova)
170 {
171 return FIELD_GET(SUN50I_IOVA_PTE_MASK, iova);
172 }
173
sun50i_iova_get_page_offset(dma_addr_t iova)174 static u32 sun50i_iova_get_page_offset(dma_addr_t iova)
175 {
176 return FIELD_GET(SUN50I_IOVA_PAGE_MASK, iova);
177 }
178
179 /*
180 * Each Directory Table Entry has a Page Table address and a valid
181 * bit:
182
183 * +---------------------+-----------+-+
184 * | PT address | Reserved |V|
185 * +---------------------+-----------+-+
186 * 31:10 - Page Table address
187 * 9:2 - Reserved
188 * 1:0 - 1 if the entry is valid
189 */
190
191 #define SUN50I_DTE_PT_ADDRESS_MASK GENMASK(31, 10)
192 #define SUN50I_DTE_PT_ATTRS GENMASK(1, 0)
193 #define SUN50I_DTE_PT_VALID 1
194
sun50i_dte_get_pt_address(u32 dte)195 static phys_addr_t sun50i_dte_get_pt_address(u32 dte)
196 {
197 return (phys_addr_t)dte & SUN50I_DTE_PT_ADDRESS_MASK;
198 }
199
sun50i_dte_is_pt_valid(u32 dte)200 static bool sun50i_dte_is_pt_valid(u32 dte)
201 {
202 return (dte & SUN50I_DTE_PT_ATTRS) == SUN50I_DTE_PT_VALID;
203 }
204
sun50i_mk_dte(dma_addr_t pt_dma)205 static u32 sun50i_mk_dte(dma_addr_t pt_dma)
206 {
207 return (pt_dma & SUN50I_DTE_PT_ADDRESS_MASK) | SUN50I_DTE_PT_VALID;
208 }
209
210 /*
211 * Each PTE has a Page address, an authority index and a valid bit:
212 *
213 * +----------------+-----+-----+-----+---+-----+
214 * | Page address | Rsv | ACI | Rsv | V | Rsv |
215 * +----------------+-----+-----+-----+---+-----+
216 * 31:12 - Page address
217 * 11:8 - Reserved
218 * 7:4 - Authority Control Index
219 * 3:2 - Reserved
220 * 1 - 1 if the entry is valid
221 * 0 - Reserved
222 *
223 * The way permissions work is that the IOMMU has 16 "domains" that
224 * can be configured to give each masters either read or write
225 * permissions through the IOMMU_DM_AUT_CTRL_REG registers. The domain
226 * 0 seems like the default domain, and its permissions in the
227 * IOMMU_DM_AUT_CTRL_REG are only read-only, so it's not really
228 * useful to enforce any particular permission.
229 *
230 * Each page entry will then have a reference to the domain they are
231 * affected to, so that we can actually enforce them on a per-page
232 * basis.
233 *
234 * In order to make it work with the IOMMU framework, we will be using
235 * 4 different domains, starting at 1: RD_WR, RD, WR and NONE
236 * depending on the permission we want to enforce. Each domain will
237 * have each master setup in the same way, since the IOMMU framework
238 * doesn't seem to restrict page access on a per-device basis. And
239 * then we will use the relevant domain index when generating the page
240 * table entry depending on the permissions we want to be enforced.
241 */
242
243 enum sun50i_iommu_aci {
244 SUN50I_IOMMU_ACI_DO_NOT_USE = 0,
245 SUN50I_IOMMU_ACI_NONE,
246 SUN50I_IOMMU_ACI_RD,
247 SUN50I_IOMMU_ACI_WR,
248 SUN50I_IOMMU_ACI_RD_WR,
249 };
250
251 #define SUN50I_PTE_PAGE_ADDRESS_MASK GENMASK(31, 12)
252 #define SUN50I_PTE_ACI_MASK GENMASK(7, 4)
253 #define SUN50I_PTE_PAGE_VALID BIT(1)
254
sun50i_pte_get_page_address(u32 pte)255 static phys_addr_t sun50i_pte_get_page_address(u32 pte)
256 {
257 return (phys_addr_t)pte & SUN50I_PTE_PAGE_ADDRESS_MASK;
258 }
259
sun50i_get_pte_aci(u32 pte)260 static enum sun50i_iommu_aci sun50i_get_pte_aci(u32 pte)
261 {
262 return FIELD_GET(SUN50I_PTE_ACI_MASK, pte);
263 }
264
sun50i_pte_is_page_valid(u32 pte)265 static bool sun50i_pte_is_page_valid(u32 pte)
266 {
267 return pte & SUN50I_PTE_PAGE_VALID;
268 }
269
sun50i_mk_pte(phys_addr_t page,int prot)270 static u32 sun50i_mk_pte(phys_addr_t page, int prot)
271 {
272 enum sun50i_iommu_aci aci;
273 u32 flags = 0;
274
275 if ((prot & (IOMMU_READ | IOMMU_WRITE)) == (IOMMU_READ | IOMMU_WRITE))
276 aci = SUN50I_IOMMU_ACI_RD_WR;
277 else if (prot & IOMMU_READ)
278 aci = SUN50I_IOMMU_ACI_RD;
279 else if (prot & IOMMU_WRITE)
280 aci = SUN50I_IOMMU_ACI_WR;
281 else
282 aci = SUN50I_IOMMU_ACI_NONE;
283
284 flags |= FIELD_PREP(SUN50I_PTE_ACI_MASK, aci);
285 page &= SUN50I_PTE_PAGE_ADDRESS_MASK;
286 return page | flags | SUN50I_PTE_PAGE_VALID;
287 }
288
sun50i_table_flush(struct sun50i_iommu_domain * sun50i_domain,void * vaddr,unsigned int count)289 static void sun50i_table_flush(struct sun50i_iommu_domain *sun50i_domain,
290 void *vaddr, unsigned int count)
291 {
292 struct sun50i_iommu *iommu = sun50i_domain->iommu;
293 dma_addr_t dma = virt_to_phys(vaddr);
294 size_t size = count * PT_ENTRY_SIZE;
295
296 dma_sync_single_for_device(iommu->dev, dma, size, DMA_TO_DEVICE);
297 }
298
sun50i_iommu_flush_all_tlb(struct sun50i_iommu * iommu)299 static int sun50i_iommu_flush_all_tlb(struct sun50i_iommu *iommu)
300 {
301 u32 reg;
302 int ret;
303
304 assert_spin_locked(&iommu->iommu_lock);
305
306 iommu_write(iommu,
307 IOMMU_TLB_FLUSH_REG,
308 IOMMU_TLB_FLUSH_PTW_CACHE |
309 IOMMU_TLB_FLUSH_MACRO_TLB |
310 IOMMU_TLB_FLUSH_MICRO_TLB(5) |
311 IOMMU_TLB_FLUSH_MICRO_TLB(4) |
312 IOMMU_TLB_FLUSH_MICRO_TLB(3) |
313 IOMMU_TLB_FLUSH_MICRO_TLB(2) |
314 IOMMU_TLB_FLUSH_MICRO_TLB(1) |
315 IOMMU_TLB_FLUSH_MICRO_TLB(0));
316
317 ret = readl_poll_timeout_atomic(iommu->base + IOMMU_TLB_FLUSH_REG,
318 reg, !reg,
319 1, 2000);
320 if (ret)
321 dev_warn(iommu->dev, "TLB Flush timed out!\n");
322
323 return ret;
324 }
325
sun50i_iommu_flush_iotlb_all(struct iommu_domain * domain)326 static void sun50i_iommu_flush_iotlb_all(struct iommu_domain *domain)
327 {
328 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
329 struct sun50i_iommu *iommu = sun50i_domain->iommu;
330 unsigned long flags;
331
332 /*
333 * At boot, we'll have a first call into .flush_iotlb_all right after
334 * .probe_device, and since we link our (single) domain to our iommu in
335 * the .attach_device callback, we don't have that pointer set.
336 *
337 * It shouldn't really be any trouble to ignore it though since we flush
338 * all caches as part of the device powerup.
339 */
340 if (!iommu)
341 return;
342
343 spin_lock_irqsave(&iommu->iommu_lock, flags);
344 sun50i_iommu_flush_all_tlb(iommu);
345 spin_unlock_irqrestore(&iommu->iommu_lock, flags);
346 }
347
sun50i_iommu_iotlb_sync(struct iommu_domain * domain,struct iommu_iotlb_gather * gather)348 static void sun50i_iommu_iotlb_sync(struct iommu_domain *domain,
349 struct iommu_iotlb_gather *gather)
350 {
351 sun50i_iommu_flush_iotlb_all(domain);
352 }
353
sun50i_iommu_enable(struct sun50i_iommu * iommu)354 static int sun50i_iommu_enable(struct sun50i_iommu *iommu)
355 {
356 struct sun50i_iommu_domain *sun50i_domain;
357 unsigned long flags;
358 int ret;
359
360 if (!iommu->domain)
361 return 0;
362
363 sun50i_domain = to_sun50i_domain(iommu->domain);
364
365 ret = reset_control_deassert(iommu->reset);
366 if (ret)
367 return ret;
368
369 ret = clk_prepare_enable(iommu->clk);
370 if (ret)
371 goto err_reset_assert;
372
373 spin_lock_irqsave(&iommu->iommu_lock, flags);
374
375 iommu_write(iommu, IOMMU_TTB_REG, sun50i_domain->dt_dma);
376 iommu_write(iommu, IOMMU_TLB_PREFETCH_REG,
377 IOMMU_TLB_PREFETCH_MASTER_ENABLE(0) |
378 IOMMU_TLB_PREFETCH_MASTER_ENABLE(1) |
379 IOMMU_TLB_PREFETCH_MASTER_ENABLE(2) |
380 IOMMU_TLB_PREFETCH_MASTER_ENABLE(3) |
381 IOMMU_TLB_PREFETCH_MASTER_ENABLE(4) |
382 IOMMU_TLB_PREFETCH_MASTER_ENABLE(5));
383 iommu_write(iommu, IOMMU_INT_ENABLE_REG, IOMMU_INT_MASK);
384 iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_NONE),
385 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 0) |
386 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 0) |
387 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 1) |
388 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 1) |
389 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 2) |
390 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 2) |
391 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 3) |
392 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 3) |
393 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 4) |
394 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 4) |
395 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 5) |
396 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 5));
397
398 iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_RD),
399 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 0) |
400 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 1) |
401 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 2) |
402 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 3) |
403 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 4) |
404 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 5));
405
406 iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_WR),
407 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 0) |
408 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 1) |
409 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 2) |
410 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 3) |
411 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 4) |
412 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 5));
413
414 ret = sun50i_iommu_flush_all_tlb(iommu);
415 if (ret) {
416 spin_unlock_irqrestore(&iommu->iommu_lock, flags);
417 goto err_clk_disable;
418 }
419
420 iommu_write(iommu, IOMMU_AUTO_GATING_REG, IOMMU_AUTO_GATING_ENABLE);
421 iommu_write(iommu, IOMMU_ENABLE_REG, IOMMU_ENABLE_ENABLE);
422
423 spin_unlock_irqrestore(&iommu->iommu_lock, flags);
424
425 return 0;
426
427 err_clk_disable:
428 clk_disable_unprepare(iommu->clk);
429
430 err_reset_assert:
431 reset_control_assert(iommu->reset);
432
433 return ret;
434 }
435
sun50i_iommu_disable(struct sun50i_iommu * iommu)436 static void sun50i_iommu_disable(struct sun50i_iommu *iommu)
437 {
438 unsigned long flags;
439
440 spin_lock_irqsave(&iommu->iommu_lock, flags);
441
442 iommu_write(iommu, IOMMU_ENABLE_REG, 0);
443 iommu_write(iommu, IOMMU_TTB_REG, 0);
444
445 spin_unlock_irqrestore(&iommu->iommu_lock, flags);
446
447 clk_disable_unprepare(iommu->clk);
448 reset_control_assert(iommu->reset);
449 }
450
sun50i_iommu_alloc_page_table(struct sun50i_iommu * iommu,gfp_t gfp)451 static void *sun50i_iommu_alloc_page_table(struct sun50i_iommu *iommu,
452 gfp_t gfp)
453 {
454 dma_addr_t pt_dma;
455 u32 *page_table;
456
457 page_table = kmem_cache_zalloc(iommu->pt_pool, gfp);
458 if (!page_table)
459 return ERR_PTR(-ENOMEM);
460
461 pt_dma = dma_map_single(iommu->dev, page_table, PT_SIZE, DMA_TO_DEVICE);
462 if (dma_mapping_error(iommu->dev, pt_dma)) {
463 dev_err(iommu->dev, "Couldn't map L2 Page Table\n");
464 kmem_cache_free(iommu->pt_pool, page_table);
465 return ERR_PTR(-ENOMEM);
466 }
467
468 /* We rely on the physical address and DMA address being the same */
469 WARN_ON(pt_dma != virt_to_phys(page_table));
470
471 return page_table;
472 }
473
sun50i_iommu_free_page_table(struct sun50i_iommu * iommu,u32 * page_table)474 static void sun50i_iommu_free_page_table(struct sun50i_iommu *iommu,
475 u32 *page_table)
476 {
477 phys_addr_t pt_phys = virt_to_phys(page_table);
478
479 dma_unmap_single(iommu->dev, pt_phys, PT_SIZE, DMA_TO_DEVICE);
480 kmem_cache_free(iommu->pt_pool, page_table);
481 }
482
sun50i_dte_get_page_table(struct sun50i_iommu_domain * sun50i_domain,dma_addr_t iova,gfp_t gfp)483 static u32 *sun50i_dte_get_page_table(struct sun50i_iommu_domain *sun50i_domain,
484 dma_addr_t iova, gfp_t gfp)
485 {
486 struct sun50i_iommu *iommu = sun50i_domain->iommu;
487 u32 *page_table;
488 u32 *dte_addr;
489 u32 old_dte;
490 u32 dte;
491
492 dte_addr = &sun50i_domain->dt[sun50i_iova_get_dte_index(iova)];
493 dte = *dte_addr;
494 if (sun50i_dte_is_pt_valid(dte)) {
495 phys_addr_t pt_phys = sun50i_dte_get_pt_address(dte);
496 return (u32 *)phys_to_virt(pt_phys);
497 }
498
499 page_table = sun50i_iommu_alloc_page_table(iommu, gfp);
500 if (IS_ERR(page_table))
501 return page_table;
502
503 dte = sun50i_mk_dte(virt_to_phys(page_table));
504 old_dte = cmpxchg(dte_addr, 0, dte);
505 if (old_dte) {
506 phys_addr_t installed_pt_phys =
507 sun50i_dte_get_pt_address(old_dte);
508 u32 *installed_pt = phys_to_virt(installed_pt_phys);
509 u32 *drop_pt = page_table;
510
511 page_table = installed_pt;
512 dte = old_dte;
513 sun50i_iommu_free_page_table(iommu, drop_pt);
514 }
515
516 sun50i_table_flush(sun50i_domain, page_table, NUM_PT_ENTRIES);
517 sun50i_table_flush(sun50i_domain, dte_addr, 1);
518
519 return page_table;
520 }
521
sun50i_iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot,gfp_t gfp)522 static int sun50i_iommu_map(struct iommu_domain *domain, unsigned long iova,
523 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
524 {
525 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
526 struct sun50i_iommu *iommu = sun50i_domain->iommu;
527 u32 pte_index;
528 u32 *page_table, *pte_addr;
529 int ret = 0;
530
531 page_table = sun50i_dte_get_page_table(sun50i_domain, iova, gfp);
532 if (IS_ERR(page_table)) {
533 ret = PTR_ERR(page_table);
534 goto out;
535 }
536
537 pte_index = sun50i_iova_get_pte_index(iova);
538 pte_addr = &page_table[pte_index];
539 if (unlikely(sun50i_pte_is_page_valid(*pte_addr))) {
540 phys_addr_t page_phys = sun50i_pte_get_page_address(*pte_addr);
541 dev_err(iommu->dev,
542 "iova %pad already mapped to %pa cannot remap to %pa prot: %#x\n",
543 &iova, &page_phys, &paddr, prot);
544 ret = -EBUSY;
545 goto out;
546 }
547
548 *pte_addr = sun50i_mk_pte(paddr, prot);
549 sun50i_table_flush(sun50i_domain, pte_addr, 1);
550
551 out:
552 return ret;
553 }
554
sun50i_iommu_unmap(struct iommu_domain * domain,unsigned long iova,size_t size,struct iommu_iotlb_gather * gather)555 static size_t sun50i_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
556 size_t size, struct iommu_iotlb_gather *gather)
557 {
558 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
559 phys_addr_t pt_phys;
560 u32 *pte_addr;
561 u32 dte;
562
563 dte = sun50i_domain->dt[sun50i_iova_get_dte_index(iova)];
564 if (!sun50i_dte_is_pt_valid(dte))
565 return 0;
566
567 pt_phys = sun50i_dte_get_pt_address(dte);
568 pte_addr = (u32 *)phys_to_virt(pt_phys) + sun50i_iova_get_pte_index(iova);
569
570 if (!sun50i_pte_is_page_valid(*pte_addr))
571 return 0;
572
573 memset(pte_addr, 0, sizeof(*pte_addr));
574 sun50i_table_flush(sun50i_domain, pte_addr, 1);
575
576 return SZ_4K;
577 }
578
sun50i_iommu_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)579 static phys_addr_t sun50i_iommu_iova_to_phys(struct iommu_domain *domain,
580 dma_addr_t iova)
581 {
582 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
583 phys_addr_t pt_phys;
584 u32 *page_table;
585 u32 dte, pte;
586
587 dte = sun50i_domain->dt[sun50i_iova_get_dte_index(iova)];
588 if (!sun50i_dte_is_pt_valid(dte))
589 return 0;
590
591 pt_phys = sun50i_dte_get_pt_address(dte);
592 page_table = (u32 *)phys_to_virt(pt_phys);
593 pte = page_table[sun50i_iova_get_pte_index(iova)];
594 if (!sun50i_pte_is_page_valid(pte))
595 return 0;
596
597 return sun50i_pte_get_page_address(pte) +
598 sun50i_iova_get_page_offset(iova);
599 }
600
sun50i_iommu_domain_alloc(unsigned type)601 static struct iommu_domain *sun50i_iommu_domain_alloc(unsigned type)
602 {
603 struct sun50i_iommu_domain *sun50i_domain;
604
605 if (type != IOMMU_DOMAIN_DMA &&
606 type != IOMMU_DOMAIN_UNMANAGED)
607 return NULL;
608
609 sun50i_domain = kzalloc(sizeof(*sun50i_domain), GFP_KERNEL);
610 if (!sun50i_domain)
611 return NULL;
612
613 if (type == IOMMU_DOMAIN_DMA &&
614 iommu_get_dma_cookie(&sun50i_domain->domain))
615 goto err_free_domain;
616
617 sun50i_domain->dt = (u32 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
618 get_order(DT_SIZE));
619 if (!sun50i_domain->dt)
620 goto err_put_cookie;
621
622 refcount_set(&sun50i_domain->refcnt, 1);
623
624 sun50i_domain->domain.geometry.aperture_start = 0;
625 sun50i_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32);
626 sun50i_domain->domain.geometry.force_aperture = true;
627
628 return &sun50i_domain->domain;
629
630 err_put_cookie:
631 if (type == IOMMU_DOMAIN_DMA)
632 iommu_put_dma_cookie(&sun50i_domain->domain);
633
634 err_free_domain:
635 kfree(sun50i_domain);
636
637 return NULL;
638 }
639
sun50i_iommu_domain_free(struct iommu_domain * domain)640 static void sun50i_iommu_domain_free(struct iommu_domain *domain)
641 {
642 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
643
644 free_pages((unsigned long)sun50i_domain->dt, get_order(DT_SIZE));
645 sun50i_domain->dt = NULL;
646
647 iommu_put_dma_cookie(domain);
648
649 kfree(sun50i_domain);
650 }
651
sun50i_iommu_attach_domain(struct sun50i_iommu * iommu,struct sun50i_iommu_domain * sun50i_domain)652 static int sun50i_iommu_attach_domain(struct sun50i_iommu *iommu,
653 struct sun50i_iommu_domain *sun50i_domain)
654 {
655 iommu->domain = &sun50i_domain->domain;
656 sun50i_domain->iommu = iommu;
657
658 sun50i_domain->dt_dma = dma_map_single(iommu->dev, sun50i_domain->dt,
659 DT_SIZE, DMA_TO_DEVICE);
660 if (dma_mapping_error(iommu->dev, sun50i_domain->dt_dma)) {
661 dev_err(iommu->dev, "Couldn't map L1 Page Table\n");
662 return -ENOMEM;
663 }
664
665 return sun50i_iommu_enable(iommu);
666 }
667
sun50i_iommu_detach_domain(struct sun50i_iommu * iommu,struct sun50i_iommu_domain * sun50i_domain)668 static void sun50i_iommu_detach_domain(struct sun50i_iommu *iommu,
669 struct sun50i_iommu_domain *sun50i_domain)
670 {
671 unsigned int i;
672
673 for (i = 0; i < NUM_DT_ENTRIES; i++) {
674 phys_addr_t pt_phys;
675 u32 *page_table;
676 u32 *dte_addr;
677 u32 dte;
678
679 dte_addr = &sun50i_domain->dt[i];
680 dte = *dte_addr;
681 if (!sun50i_dte_is_pt_valid(dte))
682 continue;
683
684 memset(dte_addr, 0, sizeof(*dte_addr));
685 sun50i_table_flush(sun50i_domain, dte_addr, 1);
686
687 pt_phys = sun50i_dte_get_pt_address(dte);
688 page_table = phys_to_virt(pt_phys);
689 sun50i_iommu_free_page_table(iommu, page_table);
690 }
691
692
693 sun50i_iommu_disable(iommu);
694
695 dma_unmap_single(iommu->dev, virt_to_phys(sun50i_domain->dt),
696 DT_SIZE, DMA_TO_DEVICE);
697
698 iommu->domain = NULL;
699 }
700
sun50i_iommu_detach_device(struct iommu_domain * domain,struct device * dev)701 static void sun50i_iommu_detach_device(struct iommu_domain *domain,
702 struct device *dev)
703 {
704 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
705 struct sun50i_iommu *iommu = dev_iommu_priv_get(dev);
706
707 dev_dbg(dev, "Detaching from IOMMU domain\n");
708
709 if (iommu->domain != domain)
710 return;
711
712 if (refcount_dec_and_test(&sun50i_domain->refcnt))
713 sun50i_iommu_detach_domain(iommu, sun50i_domain);
714 }
715
sun50i_iommu_attach_device(struct iommu_domain * domain,struct device * dev)716 static int sun50i_iommu_attach_device(struct iommu_domain *domain,
717 struct device *dev)
718 {
719 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
720 struct sun50i_iommu *iommu;
721
722 iommu = sun50i_iommu_from_dev(dev);
723 if (!iommu)
724 return -ENODEV;
725
726 dev_dbg(dev, "Attaching to IOMMU domain\n");
727
728 refcount_inc(&sun50i_domain->refcnt);
729
730 if (iommu->domain == domain)
731 return 0;
732
733 if (iommu->domain)
734 sun50i_iommu_detach_device(iommu->domain, dev);
735
736 sun50i_iommu_attach_domain(iommu, sun50i_domain);
737
738 return 0;
739 }
740
sun50i_iommu_probe_device(struct device * dev)741 static struct iommu_device *sun50i_iommu_probe_device(struct device *dev)
742 {
743 struct sun50i_iommu *iommu;
744
745 iommu = sun50i_iommu_from_dev(dev);
746 if (!iommu)
747 return ERR_PTR(-ENODEV);
748
749 return &iommu->iommu;
750 }
751
sun50i_iommu_release_device(struct device * dev)752 static void sun50i_iommu_release_device(struct device *dev) {}
753
sun50i_iommu_device_group(struct device * dev)754 static struct iommu_group *sun50i_iommu_device_group(struct device *dev)
755 {
756 struct sun50i_iommu *iommu = sun50i_iommu_from_dev(dev);
757
758 return iommu_group_ref_get(iommu->group);
759 }
760
sun50i_iommu_of_xlate(struct device * dev,struct of_phandle_args * args)761 static int sun50i_iommu_of_xlate(struct device *dev,
762 struct of_phandle_args *args)
763 {
764 struct platform_device *iommu_pdev = of_find_device_by_node(args->np);
765 unsigned id = args->args[0];
766
767 dev_iommu_priv_set(dev, platform_get_drvdata(iommu_pdev));
768
769 return iommu_fwspec_add_ids(dev, &id, 1);
770 }
771
772 static const struct iommu_ops sun50i_iommu_ops = {
773 .pgsize_bitmap = SZ_4K,
774 .attach_dev = sun50i_iommu_attach_device,
775 .detach_dev = sun50i_iommu_detach_device,
776 .device_group = sun50i_iommu_device_group,
777 .domain_alloc = sun50i_iommu_domain_alloc,
778 .domain_free = sun50i_iommu_domain_free,
779 .flush_iotlb_all = sun50i_iommu_flush_iotlb_all,
780 .iotlb_sync = sun50i_iommu_iotlb_sync,
781 .iova_to_phys = sun50i_iommu_iova_to_phys,
782 .map = sun50i_iommu_map,
783 .of_xlate = sun50i_iommu_of_xlate,
784 .probe_device = sun50i_iommu_probe_device,
785 .release_device = sun50i_iommu_release_device,
786 .unmap = sun50i_iommu_unmap,
787 };
788
sun50i_iommu_report_fault(struct sun50i_iommu * iommu,unsigned master,phys_addr_t iova,unsigned prot)789 static void sun50i_iommu_report_fault(struct sun50i_iommu *iommu,
790 unsigned master, phys_addr_t iova,
791 unsigned prot)
792 {
793 dev_err(iommu->dev, "Page fault for %pad (master %d, dir %s)\n",
794 &iova, master, (prot == IOMMU_FAULT_WRITE) ? "wr" : "rd");
795
796 if (iommu->domain)
797 report_iommu_fault(iommu->domain, iommu->dev, iova, prot);
798 else
799 dev_err(iommu->dev, "Page fault while iommu not attached to any domain?\n");
800 }
801
sun50i_iommu_handle_pt_irq(struct sun50i_iommu * iommu,unsigned addr_reg,unsigned blame_reg)802 static phys_addr_t sun50i_iommu_handle_pt_irq(struct sun50i_iommu *iommu,
803 unsigned addr_reg,
804 unsigned blame_reg)
805 {
806 phys_addr_t iova;
807 unsigned master;
808 u32 blame;
809
810 assert_spin_locked(&iommu->iommu_lock);
811
812 iova = iommu_read(iommu, addr_reg);
813 blame = iommu_read(iommu, blame_reg);
814 master = ilog2(blame & IOMMU_INT_MASTER_MASK);
815
816 /*
817 * If the address is not in the page table, we can't get what
818 * operation triggered the fault. Assume it's a read
819 * operation.
820 */
821 sun50i_iommu_report_fault(iommu, master, iova, IOMMU_FAULT_READ);
822
823 return iova;
824 }
825
sun50i_iommu_handle_perm_irq(struct sun50i_iommu * iommu)826 static phys_addr_t sun50i_iommu_handle_perm_irq(struct sun50i_iommu *iommu)
827 {
828 enum sun50i_iommu_aci aci;
829 phys_addr_t iova;
830 unsigned master;
831 unsigned dir;
832 u32 blame;
833
834 assert_spin_locked(&iommu->iommu_lock);
835
836 blame = iommu_read(iommu, IOMMU_INT_STA_REG);
837 master = ilog2(blame & IOMMU_INT_MASTER_MASK);
838 iova = iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG(master));
839 aci = sun50i_get_pte_aci(iommu_read(iommu,
840 IOMMU_INT_ERR_DATA_REG(master)));
841
842 switch (aci) {
843 /*
844 * If we are in the read-only domain, then it means we
845 * tried to write.
846 */
847 case SUN50I_IOMMU_ACI_RD:
848 dir = IOMMU_FAULT_WRITE;
849 break;
850
851 /*
852 * If we are in the write-only domain, then it means
853 * we tried to read.
854 */
855 case SUN50I_IOMMU_ACI_WR:
856
857 /*
858 * If we are in the domain without any permission, we
859 * can't really tell. Let's default to a read
860 * operation.
861 */
862 case SUN50I_IOMMU_ACI_NONE:
863
864 /* WTF? */
865 case SUN50I_IOMMU_ACI_RD_WR:
866 default:
867 dir = IOMMU_FAULT_READ;
868 break;
869 }
870
871 /*
872 * If the address is not in the page table, we can't get what
873 * operation triggered the fault. Assume it's a read
874 * operation.
875 */
876 sun50i_iommu_report_fault(iommu, master, iova, dir);
877
878 return iova;
879 }
880
sun50i_iommu_irq(int irq,void * dev_id)881 static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id)
882 {
883 u32 status, l1_status, l2_status, resets;
884 struct sun50i_iommu *iommu = dev_id;
885
886 spin_lock(&iommu->iommu_lock);
887
888 status = iommu_read(iommu, IOMMU_INT_STA_REG);
889 if (!(status & IOMMU_INT_MASK)) {
890 spin_unlock(&iommu->iommu_lock);
891 return IRQ_NONE;
892 }
893
894 l1_status = iommu_read(iommu, IOMMU_L1PG_INT_REG);
895 l2_status = iommu_read(iommu, IOMMU_L2PG_INT_REG);
896
897 if (status & IOMMU_INT_INVALID_L2PG)
898 sun50i_iommu_handle_pt_irq(iommu,
899 IOMMU_INT_ERR_ADDR_L2_REG,
900 IOMMU_L2PG_INT_REG);
901 else if (status & IOMMU_INT_INVALID_L1PG)
902 sun50i_iommu_handle_pt_irq(iommu,
903 IOMMU_INT_ERR_ADDR_L1_REG,
904 IOMMU_L1PG_INT_REG);
905 else
906 sun50i_iommu_handle_perm_irq(iommu);
907
908 iommu_write(iommu, IOMMU_INT_CLR_REG, status);
909
910 resets = (status | l1_status | l2_status) & IOMMU_INT_MASTER_MASK;
911 iommu_write(iommu, IOMMU_RESET_REG, ~resets);
912 iommu_write(iommu, IOMMU_RESET_REG, IOMMU_RESET_RELEASE_ALL);
913
914 spin_unlock(&iommu->iommu_lock);
915
916 return IRQ_HANDLED;
917 }
918
sun50i_iommu_probe(struct platform_device * pdev)919 static int sun50i_iommu_probe(struct platform_device *pdev)
920 {
921 struct sun50i_iommu *iommu;
922 int ret, irq;
923
924 iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
925 if (!iommu)
926 return -ENOMEM;
927 spin_lock_init(&iommu->iommu_lock);
928 platform_set_drvdata(pdev, iommu);
929 iommu->dev = &pdev->dev;
930
931 iommu->pt_pool = kmem_cache_create(dev_name(&pdev->dev),
932 PT_SIZE, PT_SIZE,
933 SLAB_HWCACHE_ALIGN,
934 NULL);
935 if (!iommu->pt_pool)
936 return -ENOMEM;
937
938 iommu->group = iommu_group_alloc();
939 if (IS_ERR(iommu->group)) {
940 ret = PTR_ERR(iommu->group);
941 goto err_free_cache;
942 }
943
944 iommu->base = devm_platform_ioremap_resource(pdev, 0);
945 if (IS_ERR(iommu->base)) {
946 ret = PTR_ERR(iommu->base);
947 goto err_free_group;
948 }
949
950 irq = platform_get_irq(pdev, 0);
951 if (irq < 0) {
952 ret = irq;
953 goto err_free_group;
954 }
955
956 iommu->clk = devm_clk_get(&pdev->dev, NULL);
957 if (IS_ERR(iommu->clk)) {
958 dev_err(&pdev->dev, "Couldn't get our clock.\n");
959 ret = PTR_ERR(iommu->clk);
960 goto err_free_group;
961 }
962
963 iommu->reset = devm_reset_control_get(&pdev->dev, NULL);
964 if (IS_ERR(iommu->reset)) {
965 dev_err(&pdev->dev, "Couldn't get our reset line.\n");
966 ret = PTR_ERR(iommu->reset);
967 goto err_free_group;
968 }
969
970 ret = iommu_device_sysfs_add(&iommu->iommu, &pdev->dev,
971 NULL, dev_name(&pdev->dev));
972 if (ret)
973 goto err_free_group;
974
975 iommu_device_set_ops(&iommu->iommu, &sun50i_iommu_ops);
976 iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode);
977
978 ret = iommu_device_register(&iommu->iommu);
979 if (ret)
980 goto err_remove_sysfs;
981
982 ret = devm_request_irq(&pdev->dev, irq, sun50i_iommu_irq, 0,
983 dev_name(&pdev->dev), iommu);
984 if (ret < 0)
985 goto err_unregister;
986
987 bus_set_iommu(&platform_bus_type, &sun50i_iommu_ops);
988
989 return 0;
990
991 err_unregister:
992 iommu_device_unregister(&iommu->iommu);
993
994 err_remove_sysfs:
995 iommu_device_sysfs_remove(&iommu->iommu);
996
997 err_free_group:
998 iommu_group_put(iommu->group);
999
1000 err_free_cache:
1001 kmem_cache_destroy(iommu->pt_pool);
1002
1003 return ret;
1004 }
1005
1006 static const struct of_device_id sun50i_iommu_dt[] = {
1007 { .compatible = "allwinner,sun50i-h6-iommu", },
1008 { /* sentinel */ },
1009 };
1010 MODULE_DEVICE_TABLE(of, sun50i_iommu_dt);
1011
1012 static struct platform_driver sun50i_iommu_driver = {
1013 .driver = {
1014 .name = "sun50i-iommu",
1015 .of_match_table = sun50i_iommu_dt,
1016 .suppress_bind_attrs = true,
1017 }
1018 };
1019 builtin_platform_driver_probe(sun50i_iommu_driver, sun50i_iommu_probe);
1020
1021 MODULE_DESCRIPTION("Allwinner H6 IOMMU driver");
1022 MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>");
1023 MODULE_AUTHOR("zhuxianbin <zhuxianbin@allwinnertech.com>");
1024 MODULE_LICENSE("Dual BSD/GPL");
1025