1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 // Copyright (C) 2016-2018, Allwinner Technology CO., LTD.
3 // Copyright (C) 2019-2020, Cerno
4
5 #include <linux/bitfield.h>
6 #include <linux/bug.h>
7 #include <linux/clk.h>
8 #include <linux/device.h>
9 #include <linux/dma-direction.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/err.h>
12 #include <linux/errno.h>
13 #include <linux/interrupt.h>
14 #include <linux/iommu.h>
15 #include <linux/iopoll.h>
16 #include <linux/ioport.h>
17 #include <linux/log2.h>
18 #include <linux/module.h>
19 #include <linux/of_platform.h>
20 #include <linux/platform_device.h>
21 #include <linux/pm.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/reset.h>
24 #include <linux/sizes.h>
25 #include <linux/slab.h>
26 #include <linux/spinlock.h>
27 #include <linux/types.h>
28
29 #define IOMMU_RESET_REG 0x010
30 #define IOMMU_RESET_RELEASE_ALL 0xffffffff
31 #define IOMMU_ENABLE_REG 0x020
32 #define IOMMU_ENABLE_ENABLE BIT(0)
33
34 #define IOMMU_BYPASS_REG 0x030
35 #define IOMMU_AUTO_GATING_REG 0x040
36 #define IOMMU_AUTO_GATING_ENABLE BIT(0)
37
38 #define IOMMU_WBUF_CTRL_REG 0x044
39 #define IOMMU_OOO_CTRL_REG 0x048
40 #define IOMMU_4KB_BDY_PRT_CTRL_REG 0x04c
41 #define IOMMU_TTB_REG 0x050
42 #define IOMMU_TLB_ENABLE_REG 0x060
43 #define IOMMU_TLB_PREFETCH_REG 0x070
44 #define IOMMU_TLB_PREFETCH_MASTER_ENABLE(m) BIT(m)
45
46 #define IOMMU_TLB_FLUSH_REG 0x080
47 #define IOMMU_TLB_FLUSH_PTW_CACHE BIT(17)
48 #define IOMMU_TLB_FLUSH_MACRO_TLB BIT(16)
49 #define IOMMU_TLB_FLUSH_MICRO_TLB(i) (BIT(i) & GENMASK(5, 0))
50
51 #define IOMMU_TLB_IVLD_ADDR_REG 0x090
52 #define IOMMU_TLB_IVLD_ADDR_MASK_REG 0x094
53 #define IOMMU_TLB_IVLD_ENABLE_REG 0x098
54 #define IOMMU_TLB_IVLD_ENABLE_ENABLE BIT(0)
55
56 #define IOMMU_PC_IVLD_ADDR_REG 0x0a0
57 #define IOMMU_PC_IVLD_ENABLE_REG 0x0a8
58 #define IOMMU_PC_IVLD_ENABLE_ENABLE BIT(0)
59
60 #define IOMMU_DM_AUT_CTRL_REG(d) (0x0b0 + ((d) / 2) * 4)
61 #define IOMMU_DM_AUT_CTRL_RD_UNAVAIL(d, m) (1 << (((d & 1) * 16) + ((m) * 2)))
62 #define IOMMU_DM_AUT_CTRL_WR_UNAVAIL(d, m) (1 << (((d & 1) * 16) + ((m) * 2) + 1))
63
64 #define IOMMU_DM_AUT_OVWT_REG 0x0d0
65 #define IOMMU_INT_ENABLE_REG 0x100
66 #define IOMMU_INT_CLR_REG 0x104
67 #define IOMMU_INT_STA_REG 0x108
68 #define IOMMU_INT_ERR_ADDR_REG(i) (0x110 + (i) * 4)
69 #define IOMMU_INT_ERR_ADDR_L1_REG 0x130
70 #define IOMMU_INT_ERR_ADDR_L2_REG 0x134
71 #define IOMMU_INT_ERR_DATA_REG(i) (0x150 + (i) * 4)
72 #define IOMMU_L1PG_INT_REG 0x0180
73 #define IOMMU_L2PG_INT_REG 0x0184
74
75 #define IOMMU_INT_INVALID_L2PG BIT(17)
76 #define IOMMU_INT_INVALID_L1PG BIT(16)
77 #define IOMMU_INT_MASTER_PERMISSION(m) BIT(m)
78 #define IOMMU_INT_MASTER_MASK (IOMMU_INT_MASTER_PERMISSION(0) | \
79 IOMMU_INT_MASTER_PERMISSION(1) | \
80 IOMMU_INT_MASTER_PERMISSION(2) | \
81 IOMMU_INT_MASTER_PERMISSION(3) | \
82 IOMMU_INT_MASTER_PERMISSION(4) | \
83 IOMMU_INT_MASTER_PERMISSION(5))
84 #define IOMMU_INT_MASK (IOMMU_INT_INVALID_L1PG | \
85 IOMMU_INT_INVALID_L2PG | \
86 IOMMU_INT_MASTER_MASK)
87
88 #define PT_ENTRY_SIZE sizeof(u32)
89
90 #define NUM_DT_ENTRIES 4096
91 #define DT_SIZE (NUM_DT_ENTRIES * PT_ENTRY_SIZE)
92
93 #define NUM_PT_ENTRIES 256
94 #define PT_SIZE (NUM_PT_ENTRIES * PT_ENTRY_SIZE)
95
96 struct sun50i_iommu {
97 struct iommu_device iommu;
98
99 /* Lock to modify the IOMMU registers */
100 spinlock_t iommu_lock;
101
102 struct device *dev;
103 void __iomem *base;
104 struct reset_control *reset;
105 struct clk *clk;
106
107 struct iommu_domain *domain;
108 struct iommu_group *group;
109 struct kmem_cache *pt_pool;
110 };
111
112 struct sun50i_iommu_domain {
113 struct iommu_domain domain;
114
115 /* Number of devices attached to the domain */
116 refcount_t refcnt;
117
118 /* L1 Page Table */
119 u32 *dt;
120 dma_addr_t dt_dma;
121
122 struct sun50i_iommu *iommu;
123 };
124
to_sun50i_domain(struct iommu_domain * domain)125 static struct sun50i_iommu_domain *to_sun50i_domain(struct iommu_domain *domain)
126 {
127 return container_of(domain, struct sun50i_iommu_domain, domain);
128 }
129
sun50i_iommu_from_dev(struct device * dev)130 static struct sun50i_iommu *sun50i_iommu_from_dev(struct device *dev)
131 {
132 return dev_iommu_priv_get(dev);
133 }
134
iommu_read(struct sun50i_iommu * iommu,u32 offset)135 static u32 iommu_read(struct sun50i_iommu *iommu, u32 offset)
136 {
137 return readl(iommu->base + offset);
138 }
139
iommu_write(struct sun50i_iommu * iommu,u32 offset,u32 value)140 static void iommu_write(struct sun50i_iommu *iommu, u32 offset, u32 value)
141 {
142 writel(value, iommu->base + offset);
143 }
144
145 /*
146 * The Allwinner H6 IOMMU uses a 2-level page table.
147 *
148 * The first level is the usual Directory Table (DT), that consists of
149 * 4096 4-bytes Directory Table Entries (DTE), each pointing to a Page
150 * Table (PT).
151 *
152 * Each PT consits of 256 4-bytes Page Table Entries (PTE), each
153 * pointing to a 4kB page of physical memory.
154 *
155 * The IOMMU supports a single DT, pointed by the IOMMU_TTB_REG
156 * register that contains its physical address.
157 */
158
159 #define SUN50I_IOVA_DTE_MASK GENMASK(31, 20)
160 #define SUN50I_IOVA_PTE_MASK GENMASK(19, 12)
161 #define SUN50I_IOVA_PAGE_MASK GENMASK(11, 0)
162
sun50i_iova_get_dte_index(dma_addr_t iova)163 static u32 sun50i_iova_get_dte_index(dma_addr_t iova)
164 {
165 return FIELD_GET(SUN50I_IOVA_DTE_MASK, iova);
166 }
167
sun50i_iova_get_pte_index(dma_addr_t iova)168 static u32 sun50i_iova_get_pte_index(dma_addr_t iova)
169 {
170 return FIELD_GET(SUN50I_IOVA_PTE_MASK, iova);
171 }
172
sun50i_iova_get_page_offset(dma_addr_t iova)173 static u32 sun50i_iova_get_page_offset(dma_addr_t iova)
174 {
175 return FIELD_GET(SUN50I_IOVA_PAGE_MASK, iova);
176 }
177
178 /*
179 * Each Directory Table Entry has a Page Table address and a valid
180 * bit:
181
182 * +---------------------+-----------+-+
183 * | PT address | Reserved |V|
184 * +---------------------+-----------+-+
185 * 31:10 - Page Table address
186 * 9:2 - Reserved
187 * 1:0 - 1 if the entry is valid
188 */
189
190 #define SUN50I_DTE_PT_ADDRESS_MASK GENMASK(31, 10)
191 #define SUN50I_DTE_PT_ATTRS GENMASK(1, 0)
192 #define SUN50I_DTE_PT_VALID 1
193
sun50i_dte_get_pt_address(u32 dte)194 static phys_addr_t sun50i_dte_get_pt_address(u32 dte)
195 {
196 return (phys_addr_t)dte & SUN50I_DTE_PT_ADDRESS_MASK;
197 }
198
sun50i_dte_is_pt_valid(u32 dte)199 static bool sun50i_dte_is_pt_valid(u32 dte)
200 {
201 return (dte & SUN50I_DTE_PT_ATTRS) == SUN50I_DTE_PT_VALID;
202 }
203
sun50i_mk_dte(dma_addr_t pt_dma)204 static u32 sun50i_mk_dte(dma_addr_t pt_dma)
205 {
206 return (pt_dma & SUN50I_DTE_PT_ADDRESS_MASK) | SUN50I_DTE_PT_VALID;
207 }
208
209 /*
210 * Each PTE has a Page address, an authority index and a valid bit:
211 *
212 * +----------------+-----+-----+-----+---+-----+
213 * | Page address | Rsv | ACI | Rsv | V | Rsv |
214 * +----------------+-----+-----+-----+---+-----+
215 * 31:12 - Page address
216 * 11:8 - Reserved
217 * 7:4 - Authority Control Index
218 * 3:2 - Reserved
219 * 1 - 1 if the entry is valid
220 * 0 - Reserved
221 *
222 * The way permissions work is that the IOMMU has 16 "domains" that
223 * can be configured to give each masters either read or write
224 * permissions through the IOMMU_DM_AUT_CTRL_REG registers. The domain
225 * 0 seems like the default domain, and its permissions in the
226 * IOMMU_DM_AUT_CTRL_REG are only read-only, so it's not really
227 * useful to enforce any particular permission.
228 *
229 * Each page entry will then have a reference to the domain they are
230 * affected to, so that we can actually enforce them on a per-page
231 * basis.
232 *
233 * In order to make it work with the IOMMU framework, we will be using
234 * 4 different domains, starting at 1: RD_WR, RD, WR and NONE
235 * depending on the permission we want to enforce. Each domain will
236 * have each master setup in the same way, since the IOMMU framework
237 * doesn't seem to restrict page access on a per-device basis. And
238 * then we will use the relevant domain index when generating the page
239 * table entry depending on the permissions we want to be enforced.
240 */
241
242 enum sun50i_iommu_aci {
243 SUN50I_IOMMU_ACI_DO_NOT_USE = 0,
244 SUN50I_IOMMU_ACI_NONE,
245 SUN50I_IOMMU_ACI_RD,
246 SUN50I_IOMMU_ACI_WR,
247 SUN50I_IOMMU_ACI_RD_WR,
248 };
249
250 #define SUN50I_PTE_PAGE_ADDRESS_MASK GENMASK(31, 12)
251 #define SUN50I_PTE_ACI_MASK GENMASK(7, 4)
252 #define SUN50I_PTE_PAGE_VALID BIT(1)
253
sun50i_pte_get_page_address(u32 pte)254 static phys_addr_t sun50i_pte_get_page_address(u32 pte)
255 {
256 return (phys_addr_t)pte & SUN50I_PTE_PAGE_ADDRESS_MASK;
257 }
258
sun50i_get_pte_aci(u32 pte)259 static enum sun50i_iommu_aci sun50i_get_pte_aci(u32 pte)
260 {
261 return FIELD_GET(SUN50I_PTE_ACI_MASK, pte);
262 }
263
sun50i_pte_is_page_valid(u32 pte)264 static bool sun50i_pte_is_page_valid(u32 pte)
265 {
266 return pte & SUN50I_PTE_PAGE_VALID;
267 }
268
sun50i_mk_pte(phys_addr_t page,int prot)269 static u32 sun50i_mk_pte(phys_addr_t page, int prot)
270 {
271 enum sun50i_iommu_aci aci;
272 u32 flags = 0;
273
274 if ((prot & (IOMMU_READ | IOMMU_WRITE)) == (IOMMU_READ | IOMMU_WRITE))
275 aci = SUN50I_IOMMU_ACI_RD_WR;
276 else if (prot & IOMMU_READ)
277 aci = SUN50I_IOMMU_ACI_RD;
278 else if (prot & IOMMU_WRITE)
279 aci = SUN50I_IOMMU_ACI_WR;
280 else
281 aci = SUN50I_IOMMU_ACI_NONE;
282
283 flags |= FIELD_PREP(SUN50I_PTE_ACI_MASK, aci);
284 page &= SUN50I_PTE_PAGE_ADDRESS_MASK;
285 return page | flags | SUN50I_PTE_PAGE_VALID;
286 }
287
sun50i_table_flush(struct sun50i_iommu_domain * sun50i_domain,void * vaddr,unsigned int count)288 static void sun50i_table_flush(struct sun50i_iommu_domain *sun50i_domain,
289 void *vaddr, unsigned int count)
290 {
291 struct sun50i_iommu *iommu = sun50i_domain->iommu;
292 dma_addr_t dma = virt_to_phys(vaddr);
293 size_t size = count * PT_ENTRY_SIZE;
294
295 dma_sync_single_for_device(iommu->dev, dma, size, DMA_TO_DEVICE);
296 }
297
sun50i_iommu_flush_all_tlb(struct sun50i_iommu * iommu)298 static int sun50i_iommu_flush_all_tlb(struct sun50i_iommu *iommu)
299 {
300 u32 reg;
301 int ret;
302
303 assert_spin_locked(&iommu->iommu_lock);
304
305 iommu_write(iommu,
306 IOMMU_TLB_FLUSH_REG,
307 IOMMU_TLB_FLUSH_PTW_CACHE |
308 IOMMU_TLB_FLUSH_MACRO_TLB |
309 IOMMU_TLB_FLUSH_MICRO_TLB(5) |
310 IOMMU_TLB_FLUSH_MICRO_TLB(4) |
311 IOMMU_TLB_FLUSH_MICRO_TLB(3) |
312 IOMMU_TLB_FLUSH_MICRO_TLB(2) |
313 IOMMU_TLB_FLUSH_MICRO_TLB(1) |
314 IOMMU_TLB_FLUSH_MICRO_TLB(0));
315
316 ret = readl_poll_timeout_atomic(iommu->base + IOMMU_TLB_FLUSH_REG,
317 reg, !reg,
318 1, 2000);
319 if (ret)
320 dev_warn(iommu->dev, "TLB Flush timed out!\n");
321
322 return ret;
323 }
324
sun50i_iommu_flush_iotlb_all(struct iommu_domain * domain)325 static void sun50i_iommu_flush_iotlb_all(struct iommu_domain *domain)
326 {
327 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
328 struct sun50i_iommu *iommu = sun50i_domain->iommu;
329 unsigned long flags;
330
331 /*
332 * At boot, we'll have a first call into .flush_iotlb_all right after
333 * .probe_device, and since we link our (single) domain to our iommu in
334 * the .attach_device callback, we don't have that pointer set.
335 *
336 * It shouldn't really be any trouble to ignore it though since we flush
337 * all caches as part of the device powerup.
338 */
339 if (!iommu)
340 return;
341
342 spin_lock_irqsave(&iommu->iommu_lock, flags);
343 sun50i_iommu_flush_all_tlb(iommu);
344 spin_unlock_irqrestore(&iommu->iommu_lock, flags);
345 }
346
sun50i_iommu_iotlb_sync(struct iommu_domain * domain,struct iommu_iotlb_gather * gather)347 static void sun50i_iommu_iotlb_sync(struct iommu_domain *domain,
348 struct iommu_iotlb_gather *gather)
349 {
350 sun50i_iommu_flush_iotlb_all(domain);
351 }
352
sun50i_iommu_enable(struct sun50i_iommu * iommu)353 static int sun50i_iommu_enable(struct sun50i_iommu *iommu)
354 {
355 struct sun50i_iommu_domain *sun50i_domain;
356 unsigned long flags;
357 int ret;
358
359 if (!iommu->domain)
360 return 0;
361
362 sun50i_domain = to_sun50i_domain(iommu->domain);
363
364 ret = reset_control_deassert(iommu->reset);
365 if (ret)
366 return ret;
367
368 ret = clk_prepare_enable(iommu->clk);
369 if (ret)
370 goto err_reset_assert;
371
372 spin_lock_irqsave(&iommu->iommu_lock, flags);
373
374 iommu_write(iommu, IOMMU_TTB_REG, sun50i_domain->dt_dma);
375 iommu_write(iommu, IOMMU_TLB_PREFETCH_REG,
376 IOMMU_TLB_PREFETCH_MASTER_ENABLE(0) |
377 IOMMU_TLB_PREFETCH_MASTER_ENABLE(1) |
378 IOMMU_TLB_PREFETCH_MASTER_ENABLE(2) |
379 IOMMU_TLB_PREFETCH_MASTER_ENABLE(3) |
380 IOMMU_TLB_PREFETCH_MASTER_ENABLE(4) |
381 IOMMU_TLB_PREFETCH_MASTER_ENABLE(5));
382 iommu_write(iommu, IOMMU_INT_ENABLE_REG, IOMMU_INT_MASK);
383 iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_NONE),
384 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 0) |
385 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 0) |
386 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 1) |
387 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 1) |
388 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 2) |
389 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 2) |
390 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 3) |
391 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 3) |
392 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 4) |
393 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 4) |
394 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 5) |
395 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_NONE, 5));
396
397 iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_RD),
398 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 0) |
399 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 1) |
400 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 2) |
401 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 3) |
402 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 4) |
403 IOMMU_DM_AUT_CTRL_WR_UNAVAIL(SUN50I_IOMMU_ACI_RD, 5));
404
405 iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_WR),
406 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 0) |
407 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 1) |
408 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 2) |
409 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 3) |
410 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 4) |
411 IOMMU_DM_AUT_CTRL_RD_UNAVAIL(SUN50I_IOMMU_ACI_WR, 5));
412
413 ret = sun50i_iommu_flush_all_tlb(iommu);
414 if (ret) {
415 spin_unlock_irqrestore(&iommu->iommu_lock, flags);
416 goto err_clk_disable;
417 }
418
419 iommu_write(iommu, IOMMU_AUTO_GATING_REG, IOMMU_AUTO_GATING_ENABLE);
420 iommu_write(iommu, IOMMU_ENABLE_REG, IOMMU_ENABLE_ENABLE);
421
422 spin_unlock_irqrestore(&iommu->iommu_lock, flags);
423
424 return 0;
425
426 err_clk_disable:
427 clk_disable_unprepare(iommu->clk);
428
429 err_reset_assert:
430 reset_control_assert(iommu->reset);
431
432 return ret;
433 }
434
sun50i_iommu_disable(struct sun50i_iommu * iommu)435 static void sun50i_iommu_disable(struct sun50i_iommu *iommu)
436 {
437 unsigned long flags;
438
439 spin_lock_irqsave(&iommu->iommu_lock, flags);
440
441 iommu_write(iommu, IOMMU_ENABLE_REG, 0);
442 iommu_write(iommu, IOMMU_TTB_REG, 0);
443
444 spin_unlock_irqrestore(&iommu->iommu_lock, flags);
445
446 clk_disable_unprepare(iommu->clk);
447 reset_control_assert(iommu->reset);
448 }
449
sun50i_iommu_alloc_page_table(struct sun50i_iommu * iommu,gfp_t gfp)450 static void *sun50i_iommu_alloc_page_table(struct sun50i_iommu *iommu,
451 gfp_t gfp)
452 {
453 dma_addr_t pt_dma;
454 u32 *page_table;
455
456 page_table = kmem_cache_zalloc(iommu->pt_pool, gfp);
457 if (!page_table)
458 return ERR_PTR(-ENOMEM);
459
460 pt_dma = dma_map_single(iommu->dev, page_table, PT_SIZE, DMA_TO_DEVICE);
461 if (dma_mapping_error(iommu->dev, pt_dma)) {
462 dev_err(iommu->dev, "Couldn't map L2 Page Table\n");
463 kmem_cache_free(iommu->pt_pool, page_table);
464 return ERR_PTR(-ENOMEM);
465 }
466
467 /* We rely on the physical address and DMA address being the same */
468 WARN_ON(pt_dma != virt_to_phys(page_table));
469
470 return page_table;
471 }
472
sun50i_iommu_free_page_table(struct sun50i_iommu * iommu,u32 * page_table)473 static void sun50i_iommu_free_page_table(struct sun50i_iommu *iommu,
474 u32 *page_table)
475 {
476 phys_addr_t pt_phys = virt_to_phys(page_table);
477
478 dma_unmap_single(iommu->dev, pt_phys, PT_SIZE, DMA_TO_DEVICE);
479 kmem_cache_free(iommu->pt_pool, page_table);
480 }
481
sun50i_dte_get_page_table(struct sun50i_iommu_domain * sun50i_domain,dma_addr_t iova,gfp_t gfp)482 static u32 *sun50i_dte_get_page_table(struct sun50i_iommu_domain *sun50i_domain,
483 dma_addr_t iova, gfp_t gfp)
484 {
485 struct sun50i_iommu *iommu = sun50i_domain->iommu;
486 u32 *page_table;
487 u32 *dte_addr;
488 u32 old_dte;
489 u32 dte;
490
491 dte_addr = &sun50i_domain->dt[sun50i_iova_get_dte_index(iova)];
492 dte = *dte_addr;
493 if (sun50i_dte_is_pt_valid(dte)) {
494 phys_addr_t pt_phys = sun50i_dte_get_pt_address(dte);
495 return (u32 *)phys_to_virt(pt_phys);
496 }
497
498 page_table = sun50i_iommu_alloc_page_table(iommu, gfp);
499 if (IS_ERR(page_table))
500 return page_table;
501
502 dte = sun50i_mk_dte(virt_to_phys(page_table));
503 old_dte = cmpxchg(dte_addr, 0, dte);
504 if (old_dte) {
505 phys_addr_t installed_pt_phys =
506 sun50i_dte_get_pt_address(old_dte);
507 u32 *installed_pt = phys_to_virt(installed_pt_phys);
508 u32 *drop_pt = page_table;
509
510 page_table = installed_pt;
511 dte = old_dte;
512 sun50i_iommu_free_page_table(iommu, drop_pt);
513 }
514
515 sun50i_table_flush(sun50i_domain, page_table, NUM_PT_ENTRIES);
516 sun50i_table_flush(sun50i_domain, dte_addr, 1);
517
518 return page_table;
519 }
520
sun50i_iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot,gfp_t gfp)521 static int sun50i_iommu_map(struct iommu_domain *domain, unsigned long iova,
522 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
523 {
524 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
525 struct sun50i_iommu *iommu = sun50i_domain->iommu;
526 u32 pte_index;
527 u32 *page_table, *pte_addr;
528 int ret = 0;
529
530 page_table = sun50i_dte_get_page_table(sun50i_domain, iova, gfp);
531 if (IS_ERR(page_table)) {
532 ret = PTR_ERR(page_table);
533 goto out;
534 }
535
536 pte_index = sun50i_iova_get_pte_index(iova);
537 pte_addr = &page_table[pte_index];
538 if (unlikely(sun50i_pte_is_page_valid(*pte_addr))) {
539 phys_addr_t page_phys = sun50i_pte_get_page_address(*pte_addr);
540 dev_err(iommu->dev,
541 "iova %pad already mapped to %pa cannot remap to %pa prot: %#x\n",
542 &iova, &page_phys, &paddr, prot);
543 ret = -EBUSY;
544 goto out;
545 }
546
547 *pte_addr = sun50i_mk_pte(paddr, prot);
548 sun50i_table_flush(sun50i_domain, pte_addr, 1);
549
550 out:
551 return ret;
552 }
553
sun50i_iommu_unmap(struct iommu_domain * domain,unsigned long iova,size_t size,struct iommu_iotlb_gather * gather)554 static size_t sun50i_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
555 size_t size, struct iommu_iotlb_gather *gather)
556 {
557 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
558 phys_addr_t pt_phys;
559 u32 *pte_addr;
560 u32 dte;
561
562 dte = sun50i_domain->dt[sun50i_iova_get_dte_index(iova)];
563 if (!sun50i_dte_is_pt_valid(dte))
564 return 0;
565
566 pt_phys = sun50i_dte_get_pt_address(dte);
567 pte_addr = (u32 *)phys_to_virt(pt_phys) + sun50i_iova_get_pte_index(iova);
568
569 if (!sun50i_pte_is_page_valid(*pte_addr))
570 return 0;
571
572 memset(pte_addr, 0, sizeof(*pte_addr));
573 sun50i_table_flush(sun50i_domain, pte_addr, 1);
574
575 return SZ_4K;
576 }
577
sun50i_iommu_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)578 static phys_addr_t sun50i_iommu_iova_to_phys(struct iommu_domain *domain,
579 dma_addr_t iova)
580 {
581 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
582 phys_addr_t pt_phys;
583 u32 *page_table;
584 u32 dte, pte;
585
586 dte = sun50i_domain->dt[sun50i_iova_get_dte_index(iova)];
587 if (!sun50i_dte_is_pt_valid(dte))
588 return 0;
589
590 pt_phys = sun50i_dte_get_pt_address(dte);
591 page_table = (u32 *)phys_to_virt(pt_phys);
592 pte = page_table[sun50i_iova_get_pte_index(iova)];
593 if (!sun50i_pte_is_page_valid(pte))
594 return 0;
595
596 return sun50i_pte_get_page_address(pte) +
597 sun50i_iova_get_page_offset(iova);
598 }
599
sun50i_iommu_domain_alloc(unsigned type)600 static struct iommu_domain *sun50i_iommu_domain_alloc(unsigned type)
601 {
602 struct sun50i_iommu_domain *sun50i_domain;
603
604 if (type != IOMMU_DOMAIN_DMA &&
605 type != IOMMU_DOMAIN_UNMANAGED)
606 return NULL;
607
608 sun50i_domain = kzalloc(sizeof(*sun50i_domain), GFP_KERNEL);
609 if (!sun50i_domain)
610 return NULL;
611
612 sun50i_domain->dt = (u32 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
613 get_order(DT_SIZE));
614 if (!sun50i_domain->dt)
615 goto err_free_domain;
616
617 refcount_set(&sun50i_domain->refcnt, 1);
618
619 sun50i_domain->domain.geometry.aperture_start = 0;
620 sun50i_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32);
621 sun50i_domain->domain.geometry.force_aperture = true;
622
623 return &sun50i_domain->domain;
624
625 err_free_domain:
626 kfree(sun50i_domain);
627
628 return NULL;
629 }
630
sun50i_iommu_domain_free(struct iommu_domain * domain)631 static void sun50i_iommu_domain_free(struct iommu_domain *domain)
632 {
633 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
634
635 free_pages((unsigned long)sun50i_domain->dt, get_order(DT_SIZE));
636 sun50i_domain->dt = NULL;
637
638 kfree(sun50i_domain);
639 }
640
sun50i_iommu_attach_domain(struct sun50i_iommu * iommu,struct sun50i_iommu_domain * sun50i_domain)641 static int sun50i_iommu_attach_domain(struct sun50i_iommu *iommu,
642 struct sun50i_iommu_domain *sun50i_domain)
643 {
644 iommu->domain = &sun50i_domain->domain;
645 sun50i_domain->iommu = iommu;
646
647 sun50i_domain->dt_dma = dma_map_single(iommu->dev, sun50i_domain->dt,
648 DT_SIZE, DMA_TO_DEVICE);
649 if (dma_mapping_error(iommu->dev, sun50i_domain->dt_dma)) {
650 dev_err(iommu->dev, "Couldn't map L1 Page Table\n");
651 return -ENOMEM;
652 }
653
654 return sun50i_iommu_enable(iommu);
655 }
656
sun50i_iommu_detach_domain(struct sun50i_iommu * iommu,struct sun50i_iommu_domain * sun50i_domain)657 static void sun50i_iommu_detach_domain(struct sun50i_iommu *iommu,
658 struct sun50i_iommu_domain *sun50i_domain)
659 {
660 unsigned int i;
661
662 for (i = 0; i < NUM_DT_ENTRIES; i++) {
663 phys_addr_t pt_phys;
664 u32 *page_table;
665 u32 *dte_addr;
666 u32 dte;
667
668 dte_addr = &sun50i_domain->dt[i];
669 dte = *dte_addr;
670 if (!sun50i_dte_is_pt_valid(dte))
671 continue;
672
673 memset(dte_addr, 0, sizeof(*dte_addr));
674 sun50i_table_flush(sun50i_domain, dte_addr, 1);
675
676 pt_phys = sun50i_dte_get_pt_address(dte);
677 page_table = phys_to_virt(pt_phys);
678 sun50i_iommu_free_page_table(iommu, page_table);
679 }
680
681
682 sun50i_iommu_disable(iommu);
683
684 dma_unmap_single(iommu->dev, virt_to_phys(sun50i_domain->dt),
685 DT_SIZE, DMA_TO_DEVICE);
686
687 iommu->domain = NULL;
688 }
689
sun50i_iommu_detach_device(struct iommu_domain * domain,struct device * dev)690 static void sun50i_iommu_detach_device(struct iommu_domain *domain,
691 struct device *dev)
692 {
693 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
694 struct sun50i_iommu *iommu = dev_iommu_priv_get(dev);
695
696 dev_dbg(dev, "Detaching from IOMMU domain\n");
697
698 if (iommu->domain != domain)
699 return;
700
701 if (refcount_dec_and_test(&sun50i_domain->refcnt))
702 sun50i_iommu_detach_domain(iommu, sun50i_domain);
703 }
704
sun50i_iommu_attach_device(struct iommu_domain * domain,struct device * dev)705 static int sun50i_iommu_attach_device(struct iommu_domain *domain,
706 struct device *dev)
707 {
708 struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
709 struct sun50i_iommu *iommu;
710
711 iommu = sun50i_iommu_from_dev(dev);
712 if (!iommu)
713 return -ENODEV;
714
715 dev_dbg(dev, "Attaching to IOMMU domain\n");
716
717 refcount_inc(&sun50i_domain->refcnt);
718
719 if (iommu->domain == domain)
720 return 0;
721
722 if (iommu->domain)
723 sun50i_iommu_detach_device(iommu->domain, dev);
724
725 sun50i_iommu_attach_domain(iommu, sun50i_domain);
726
727 return 0;
728 }
729
sun50i_iommu_probe_device(struct device * dev)730 static struct iommu_device *sun50i_iommu_probe_device(struct device *dev)
731 {
732 struct sun50i_iommu *iommu;
733
734 iommu = sun50i_iommu_from_dev(dev);
735 if (!iommu)
736 return ERR_PTR(-ENODEV);
737
738 return &iommu->iommu;
739 }
740
sun50i_iommu_release_device(struct device * dev)741 static void sun50i_iommu_release_device(struct device *dev) {}
742
sun50i_iommu_device_group(struct device * dev)743 static struct iommu_group *sun50i_iommu_device_group(struct device *dev)
744 {
745 struct sun50i_iommu *iommu = sun50i_iommu_from_dev(dev);
746
747 return iommu_group_ref_get(iommu->group);
748 }
749
sun50i_iommu_of_xlate(struct device * dev,struct of_phandle_args * args)750 static int sun50i_iommu_of_xlate(struct device *dev,
751 struct of_phandle_args *args)
752 {
753 struct platform_device *iommu_pdev = of_find_device_by_node(args->np);
754 unsigned id = args->args[0];
755
756 dev_iommu_priv_set(dev, platform_get_drvdata(iommu_pdev));
757
758 return iommu_fwspec_add_ids(dev, &id, 1);
759 }
760
761 static const struct iommu_ops sun50i_iommu_ops = {
762 .pgsize_bitmap = SZ_4K,
763 .attach_dev = sun50i_iommu_attach_device,
764 .detach_dev = sun50i_iommu_detach_device,
765 .device_group = sun50i_iommu_device_group,
766 .domain_alloc = sun50i_iommu_domain_alloc,
767 .domain_free = sun50i_iommu_domain_free,
768 .flush_iotlb_all = sun50i_iommu_flush_iotlb_all,
769 .iotlb_sync = sun50i_iommu_iotlb_sync,
770 .iova_to_phys = sun50i_iommu_iova_to_phys,
771 .map = sun50i_iommu_map,
772 .of_xlate = sun50i_iommu_of_xlate,
773 .probe_device = sun50i_iommu_probe_device,
774 .release_device = sun50i_iommu_release_device,
775 .unmap = sun50i_iommu_unmap,
776 };
777
sun50i_iommu_report_fault(struct sun50i_iommu * iommu,unsigned master,phys_addr_t iova,unsigned prot)778 static void sun50i_iommu_report_fault(struct sun50i_iommu *iommu,
779 unsigned master, phys_addr_t iova,
780 unsigned prot)
781 {
782 dev_err(iommu->dev, "Page fault for %pad (master %d, dir %s)\n",
783 &iova, master, (prot == IOMMU_FAULT_WRITE) ? "wr" : "rd");
784
785 if (iommu->domain)
786 report_iommu_fault(iommu->domain, iommu->dev, iova, prot);
787 else
788 dev_err(iommu->dev, "Page fault while iommu not attached to any domain?\n");
789 }
790
sun50i_iommu_handle_pt_irq(struct sun50i_iommu * iommu,unsigned addr_reg,unsigned blame_reg)791 static phys_addr_t sun50i_iommu_handle_pt_irq(struct sun50i_iommu *iommu,
792 unsigned addr_reg,
793 unsigned blame_reg)
794 {
795 phys_addr_t iova;
796 unsigned master;
797 u32 blame;
798
799 assert_spin_locked(&iommu->iommu_lock);
800
801 iova = iommu_read(iommu, addr_reg);
802 blame = iommu_read(iommu, blame_reg);
803 master = ilog2(blame & IOMMU_INT_MASTER_MASK);
804
805 /*
806 * If the address is not in the page table, we can't get what
807 * operation triggered the fault. Assume it's a read
808 * operation.
809 */
810 sun50i_iommu_report_fault(iommu, master, iova, IOMMU_FAULT_READ);
811
812 return iova;
813 }
814
sun50i_iommu_handle_perm_irq(struct sun50i_iommu * iommu)815 static phys_addr_t sun50i_iommu_handle_perm_irq(struct sun50i_iommu *iommu)
816 {
817 enum sun50i_iommu_aci aci;
818 phys_addr_t iova;
819 unsigned master;
820 unsigned dir;
821 u32 blame;
822
823 assert_spin_locked(&iommu->iommu_lock);
824
825 blame = iommu_read(iommu, IOMMU_INT_STA_REG);
826 master = ilog2(blame & IOMMU_INT_MASTER_MASK);
827 iova = iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG(master));
828 aci = sun50i_get_pte_aci(iommu_read(iommu,
829 IOMMU_INT_ERR_DATA_REG(master)));
830
831 switch (aci) {
832 /*
833 * If we are in the read-only domain, then it means we
834 * tried to write.
835 */
836 case SUN50I_IOMMU_ACI_RD:
837 dir = IOMMU_FAULT_WRITE;
838 break;
839
840 /*
841 * If we are in the write-only domain, then it means
842 * we tried to read.
843 */
844 case SUN50I_IOMMU_ACI_WR:
845
846 /*
847 * If we are in the domain without any permission, we
848 * can't really tell. Let's default to a read
849 * operation.
850 */
851 case SUN50I_IOMMU_ACI_NONE:
852
853 /* WTF? */
854 case SUN50I_IOMMU_ACI_RD_WR:
855 default:
856 dir = IOMMU_FAULT_READ;
857 break;
858 }
859
860 /*
861 * If the address is not in the page table, we can't get what
862 * operation triggered the fault. Assume it's a read
863 * operation.
864 */
865 sun50i_iommu_report_fault(iommu, master, iova, dir);
866
867 return iova;
868 }
869
sun50i_iommu_irq(int irq,void * dev_id)870 static irqreturn_t sun50i_iommu_irq(int irq, void *dev_id)
871 {
872 u32 status, l1_status, l2_status, resets;
873 struct sun50i_iommu *iommu = dev_id;
874
875 spin_lock(&iommu->iommu_lock);
876
877 status = iommu_read(iommu, IOMMU_INT_STA_REG);
878 if (!(status & IOMMU_INT_MASK)) {
879 spin_unlock(&iommu->iommu_lock);
880 return IRQ_NONE;
881 }
882
883 l1_status = iommu_read(iommu, IOMMU_L1PG_INT_REG);
884 l2_status = iommu_read(iommu, IOMMU_L2PG_INT_REG);
885
886 if (status & IOMMU_INT_INVALID_L2PG)
887 sun50i_iommu_handle_pt_irq(iommu,
888 IOMMU_INT_ERR_ADDR_L2_REG,
889 IOMMU_L2PG_INT_REG);
890 else if (status & IOMMU_INT_INVALID_L1PG)
891 sun50i_iommu_handle_pt_irq(iommu,
892 IOMMU_INT_ERR_ADDR_L1_REG,
893 IOMMU_L1PG_INT_REG);
894 else
895 sun50i_iommu_handle_perm_irq(iommu);
896
897 iommu_write(iommu, IOMMU_INT_CLR_REG, status);
898
899 resets = (status | l1_status | l2_status) & IOMMU_INT_MASTER_MASK;
900 iommu_write(iommu, IOMMU_RESET_REG, ~resets);
901 iommu_write(iommu, IOMMU_RESET_REG, IOMMU_RESET_RELEASE_ALL);
902
903 spin_unlock(&iommu->iommu_lock);
904
905 return IRQ_HANDLED;
906 }
907
sun50i_iommu_probe(struct platform_device * pdev)908 static int sun50i_iommu_probe(struct platform_device *pdev)
909 {
910 struct sun50i_iommu *iommu;
911 int ret, irq;
912
913 iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
914 if (!iommu)
915 return -ENOMEM;
916 spin_lock_init(&iommu->iommu_lock);
917 platform_set_drvdata(pdev, iommu);
918 iommu->dev = &pdev->dev;
919
920 iommu->pt_pool = kmem_cache_create(dev_name(&pdev->dev),
921 PT_SIZE, PT_SIZE,
922 SLAB_HWCACHE_ALIGN,
923 NULL);
924 if (!iommu->pt_pool)
925 return -ENOMEM;
926
927 iommu->group = iommu_group_alloc();
928 if (IS_ERR(iommu->group)) {
929 ret = PTR_ERR(iommu->group);
930 goto err_free_cache;
931 }
932
933 iommu->base = devm_platform_ioremap_resource(pdev, 0);
934 if (IS_ERR(iommu->base)) {
935 ret = PTR_ERR(iommu->base);
936 goto err_free_group;
937 }
938
939 irq = platform_get_irq(pdev, 0);
940 if (irq < 0) {
941 ret = irq;
942 goto err_free_group;
943 }
944
945 iommu->clk = devm_clk_get(&pdev->dev, NULL);
946 if (IS_ERR(iommu->clk)) {
947 dev_err(&pdev->dev, "Couldn't get our clock.\n");
948 ret = PTR_ERR(iommu->clk);
949 goto err_free_group;
950 }
951
952 iommu->reset = devm_reset_control_get(&pdev->dev, NULL);
953 if (IS_ERR(iommu->reset)) {
954 dev_err(&pdev->dev, "Couldn't get our reset line.\n");
955 ret = PTR_ERR(iommu->reset);
956 goto err_free_group;
957 }
958
959 ret = iommu_device_sysfs_add(&iommu->iommu, &pdev->dev,
960 NULL, dev_name(&pdev->dev));
961 if (ret)
962 goto err_free_group;
963
964 ret = iommu_device_register(&iommu->iommu, &sun50i_iommu_ops, &pdev->dev);
965 if (ret)
966 goto err_remove_sysfs;
967
968 ret = devm_request_irq(&pdev->dev, irq, sun50i_iommu_irq, 0,
969 dev_name(&pdev->dev), iommu);
970 if (ret < 0)
971 goto err_unregister;
972
973 bus_set_iommu(&platform_bus_type, &sun50i_iommu_ops);
974
975 return 0;
976
977 err_unregister:
978 iommu_device_unregister(&iommu->iommu);
979
980 err_remove_sysfs:
981 iommu_device_sysfs_remove(&iommu->iommu);
982
983 err_free_group:
984 iommu_group_put(iommu->group);
985
986 err_free_cache:
987 kmem_cache_destroy(iommu->pt_pool);
988
989 return ret;
990 }
991
992 static const struct of_device_id sun50i_iommu_dt[] = {
993 { .compatible = "allwinner,sun50i-h6-iommu", },
994 { /* sentinel */ },
995 };
996 MODULE_DEVICE_TABLE(of, sun50i_iommu_dt);
997
998 static struct platform_driver sun50i_iommu_driver = {
999 .driver = {
1000 .name = "sun50i-iommu",
1001 .of_match_table = sun50i_iommu_dt,
1002 .suppress_bind_attrs = true,
1003 }
1004 };
1005 builtin_platform_driver_probe(sun50i_iommu_driver, sun50i_iommu_probe);
1006
1007 MODULE_DESCRIPTION("Allwinner H6 IOMMU driver");
1008 MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>");
1009 MODULE_AUTHOR("zhuxianbin <zhuxianbin@allwinnertech.com>");
1010 MODULE_LICENSE("Dual BSD/GPL");
1011