• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*******************************************************************************
2  * Copyright (C) 2016-2018, Allwinner Technology CO., LTD.
3  * Author: zhuxianbin <zhuxianbin@allwinnertech.com>
4  *
5  * This file is provided under a dual BSD/GPL license.  When using or
6  * redistributing this file, you may do so under either license.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
11  * GNU General Public License for more details.
12  ******************************************************************************/
13 #include <linux/of_iommu.h>
14 #include <linux/module.h>
15 #include <linux/of_platform.h>
16 #include <linux/platform_device.h>
17 #include <linux/interrupt.h>
18 #include <linux/of_irq.h>
19 #include <linux/err.h>
20 #include <linux/slab.h>
21 #include <linux/delay.h>
22 #include <linux/iommu.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/clk.h>
25 #include <linux/dma-iommu.h>
26 #include <linux/sizes.h>
27 #include <linux/device.h>
28 #include <asm/cacheflush.h>
29 #include <linux/pm_runtime.h>
30 
31 #include "sunxi-iommu.h"
32 
33 #define _max(x, y) (((u64)(x) > (u64)(y)) ? (x) : (y))
34 
35 static struct kmem_cache *iopte_cache;
36 static struct sunxi_iommu_dev *global_iommu_dev;
37 static struct sunxi_iommu_domain *global_sunxi_iommu_domain;
38 struct iommu_domain *global_iommu_domain;
39 static struct iommu_group *global_group;
40 static bool iommu_hw_init_flag;
41 static struct device *dma_dev;
42 
iopde_offset(u32 * iopd,unsigned int iova)43 static inline u32 *iopde_offset(u32 *iopd, unsigned int iova)
44 {
45 	return iopd + IOPDE_INDEX(iova);
46 }
47 
iopte_offset(u32 * ent,unsigned int iova)48 static inline u32 *iopte_offset(u32 *ent, unsigned int iova)
49 {
50 	unsigned long iopte_base = 0;
51 
52 	if (IOPTE_BASE(*ent) < SUNXI_PHYS_OFFSET)
53 		iopte_base = IOPTE_BASE(*ent) + SUNXI_4G_PHYS_OFFSET;
54 	else
55 		iopte_base = IOPTE_BASE(*ent);
56 
57 	return (u32 *)__va(iopte_base) + IOPTE_INDEX(iova);
58 }
59 
sunxi_iommu_read(struct sunxi_iommu_dev * iommu,u32 offset)60 static inline u32 sunxi_iommu_read(struct sunxi_iommu_dev *iommu,
61 				   u32 offset)
62 {
63 	return readl(iommu->base + offset);
64 }
65 
sunxi_iommu_write(struct sunxi_iommu_dev * iommu,u32 offset,u32 value)66 static inline void sunxi_iommu_write(struct sunxi_iommu_dev *iommu,
67 				     u32 offset, u32 value)
68 {
69 	writel(value, iommu->base + offset);
70 }
71 
sunxi_reset_device_iommu(unsigned int master_id)72 void sunxi_reset_device_iommu(unsigned int master_id)
73 {
74 	unsigned int regval;
75 	struct sunxi_iommu_dev *iommu = global_iommu_dev;
76 
77 	regval = sunxi_iommu_read(iommu, IOMMU_RESET_REG);
78 	sunxi_iommu_write(iommu, IOMMU_RESET_REG, regval & (~(1 << master_id)));
79 	regval = sunxi_iommu_read(iommu, IOMMU_RESET_REG);
80 	if (!(regval & ((1 << master_id)))) {
81 		sunxi_iommu_write(iommu, IOMMU_RESET_REG, regval | ((1 << master_id)));
82 	}
83 }
84 EXPORT_SYMBOL(sunxi_reset_device_iommu);
85 
sunxi_enable_device_iommu(unsigned int master_id,bool flag)86 void sunxi_enable_device_iommu(unsigned int master_id, bool flag)
87 {
88 	struct sunxi_iommu_dev *iommu = global_iommu_dev;
89 	unsigned long mflag;
90 
91 	spin_lock_irqsave(&iommu->iommu_lock, mflag);
92 	if (flag)
93 		iommu->bypass &= ~(master_id_bitmap[master_id]);
94 	else
95 		iommu->bypass |= master_id_bitmap[master_id];
96 	sunxi_iommu_write(iommu, IOMMU_BYPASS_REG, iommu->bypass);
97 	spin_unlock_irqrestore(&iommu->iommu_lock, mflag);
98 }
99 EXPORT_SYMBOL(sunxi_enable_device_iommu);
100 
sunxi_tlb_flush(struct sunxi_iommu_dev * iommu)101 static int sunxi_tlb_flush(struct sunxi_iommu_dev *iommu)
102 {
103 	int ret;
104 
105 	/* enable the maximum number(7) of master to fit all platform */
106 	sunxi_iommu_write(iommu, IOMMU_TLB_FLUSH_ENABLE_REG, 0x0003007f);
107 	ret = sunxi_wait_when(
108 		(sunxi_iommu_read(iommu, IOMMU_TLB_FLUSH_ENABLE_REG)), 2);
109 	if (ret)
110 		dev_err(iommu->dev, "Enable flush all request timed out\n");
111 
112 	return ret;
113 }
114 
sunxi_iommu_hw_init(struct iommu_domain * input_domain)115 static int sunxi_iommu_hw_init(struct iommu_domain *input_domain)
116 {
117 	int ret = 0;
118 	int iommu_enable = 0;
119 	phys_addr_t dte_addr;
120 	unsigned long mflag;
121 	struct sunxi_iommu_dev *iommu = global_iommu_dev;
122 	const struct sunxi_iommu_plat_data *plat_data = iommu->plat_data;
123 	struct sunxi_iommu_domain *sunxi_domain =
124 		container_of(input_domain, struct sunxi_iommu_domain, domain);
125 
126 	spin_lock_irqsave(&iommu->iommu_lock, mflag);
127 	dte_addr = __pa(sunxi_domain->pgtable);
128 	sunxi_iommu_write(iommu, IOMMU_TTB_REG, dte_addr);
129 
130 	/*
131 	 * set preftech functions, including:
132 	 * master prefetching and only prefetch valid page to TLB/PTW
133 	 */
134 	sunxi_iommu_write(iommu, IOMMU_TLB_PREFETCH_REG, plat_data->tlb_prefetch);
135 	/* new TLB invalid function: use range(start, end) to invalid TLB, started at version V12 */
136 	if (plat_data->version >= IOMMU_VERSION_V12)
137 		sunxi_iommu_write(iommu, IOMMU_TLB_IVLD_MODE_SEL_REG, plat_data->tlb_invalid_mode);
138 	/* new PTW invalid function: use range(start, end) to invalid PTW, started at version V14 */
139 	if (plat_data->version >= IOMMU_VERSION_V14)
140 		sunxi_iommu_write(iommu, IOMMU_PC_IVLD_MODE_SEL_REG, plat_data->ptw_invalid_mode);
141 
142 	/* disable interrupt of prefetch */
143 	sunxi_iommu_write(iommu, IOMMU_INT_ENABLE_REG, 0x3003f);
144 	sunxi_iommu_write(iommu, IOMMU_BYPASS_REG, iommu->bypass);
145 
146 	ret = sunxi_tlb_flush(iommu);
147 	if (ret) {
148 		dev_err(iommu->dev, "Enable flush all request timed out\n");
149 		goto out;
150 	}
151 	sunxi_iommu_write(iommu, IOMMU_AUTO_GATING_REG, 0x1);
152 	sunxi_iommu_write(iommu, IOMMU_ENABLE_REG, IOMMU_ENABLE);
153 	iommu_enable = sunxi_iommu_read(iommu, IOMMU_ENABLE_REG);
154 	if (iommu_enable != 0x1) {
155 		iommu_enable = sunxi_iommu_read(iommu, IOMMU_ENABLE_REG);
156 		if (iommu_enable != 0x1) {
157 			dev_err(iommu->dev, "iommu enable failed! No iommu in bitfile!\n");
158 			ret = -ENODEV;
159 			goto out;
160 		}
161 	}
162 	iommu_hw_init_flag = true;
163 
164 out:
165 	spin_unlock_irqrestore(&iommu->iommu_lock, mflag);
166 
167 	return ret;
168 }
169 
sunxi_tlb_invalid(dma_addr_t iova,dma_addr_t iova_mask)170 static int sunxi_tlb_invalid(dma_addr_t iova, dma_addr_t iova_mask)
171 {
172 	struct sunxi_iommu_dev *iommu = global_iommu_dev;
173 	const struct sunxi_iommu_plat_data *plat_data = iommu->plat_data;
174 	dma_addr_t iova_end = iova_mask;
175 	int ret = 0;
176 	unsigned long mflag;
177 
178 	spin_lock_irqsave(&iommu->iommu_lock, mflag);
179 	/* new TLB invalid function: use range(start, end) to invalid TLB page */
180 	if (plat_data->version >= IOMMU_VERSION_V12) {
181 		pr_debug("iommu: TLB invalid:0x%x-0x%x\n", (unsigned int)iova,
182 			(unsigned int)iova_end);
183 		sunxi_iommu_write(iommu, IOMMU_TLB_IVLD_START_ADDR_REG, iova);
184 		sunxi_iommu_write(iommu, IOMMU_TLB_IVLD_END_ADDR_REG, iova_end);
185 	} else {
186 		/* old TLB invalid function: only invalid 4K at one time */
187 		sunxi_iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_REG, iova);
188 		sunxi_iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_MASK_REG, iova_mask);
189 	}
190 	sunxi_iommu_write(iommu, IOMMU_TLB_IVLD_ENABLE_REG, 0x1);
191 
192 	ret = sunxi_wait_when(
193 		(sunxi_iommu_read(iommu, IOMMU_TLB_IVLD_ENABLE_REG)&0x1), 2);
194 	if (ret) {
195 		dev_err(iommu->dev, "TLB cache invalid timed out\n");
196 	}
197 	spin_unlock_irqrestore(&iommu->iommu_lock, mflag);
198 
199 	return ret;
200 }
201 
sunxi_ptw_cache_invalid(dma_addr_t iova_start,dma_addr_t iova_end)202 static int sunxi_ptw_cache_invalid(dma_addr_t iova_start, dma_addr_t iova_end)
203 {
204 	struct sunxi_iommu_dev *iommu = global_iommu_dev;
205 	const struct sunxi_iommu_plat_data *plat_data = iommu->plat_data;
206 	int ret = 0;
207 	unsigned long mflag;
208 
209 	spin_lock_irqsave(&iommu->iommu_lock, mflag);
210 	/* new PTW invalid function: use range(start, end) to invalid PTW page */
211 	if (plat_data->version >= IOMMU_VERSION_V14) {
212 		pr_debug("iommu: PTW invalid:0x%x-0x%x\n", (unsigned int)iova_start,
213 			 (unsigned int)iova_end);
214 		WARN_ON(iova_end == 0);
215 		sunxi_iommu_write(iommu, IOMMU_PC_IVLD_START_ADDR_REG, iova_start);
216 		sunxi_iommu_write(iommu, IOMMU_PC_IVLD_END_ADDR_REG, iova_end);
217 	} else {
218 		/* old ptw invalid function: only invalid 1M at one time */
219 		pr_debug("iommu: PTW invalid:0x%x\n", (unsigned int)iova_start);
220 		sunxi_iommu_write(iommu, IOMMU_PC_IVLD_ADDR_REG, iova_start);
221 	}
222 	sunxi_iommu_write(iommu, IOMMU_PC_IVLD_ENABLE_REG, 0x1);
223 
224 	ret = sunxi_wait_when(
225 		(sunxi_iommu_read(iommu, IOMMU_PC_IVLD_ENABLE_REG)&0x1), 2);
226 	if (ret) {
227 		dev_err(iommu->dev, "PTW cache invalid timed out\n");
228 		goto out;
229 	}
230 
231 out:
232 	spin_unlock_irqrestore(&iommu->iommu_lock, mflag);
233 
234 	return ret;
235 }
236 
sunxi_alloc_iopte(u32 * sent,int prot)237 static int sunxi_alloc_iopte(u32 *sent, int prot)
238 {
239 	u32 *pent;
240 	u32 flags = 0;
241 
242 	flags |= (prot & IOMMU_READ) ? DENT_READABLE : 0;
243 	flags |= (prot & IOMMU_WRITE) ? DENT_WRITABLE : 0;
244 
245 	pent = kmem_cache_zalloc(iopte_cache, GFP_ATOMIC);
246 	WARN_ON((unsigned long)pent & (PT_SIZE - 1));
247 	if (!pent) {
248 		pr_err("%s, %d, kmalloc failed!\n", __func__, __LINE__);
249 		return 0;
250 	}
251 	dma_sync_single_for_cpu(dma_dev, virt_to_phys(sent), sizeof(*sent), DMA_TO_DEVICE);
252 	*sent = __pa(pent) | DENT_VALID;
253 	dma_sync_single_for_device(dma_dev, virt_to_phys(sent), sizeof(*sent), DMA_TO_DEVICE);
254 
255 	return 1;
256 }
257 
sunxi_free_iopte(u32 * pent)258 static void sunxi_free_iopte(u32 *pent)
259 {
260 	kmem_cache_free(iopte_cache, pent);
261 }
262 
sunxi_zap_tlb(unsigned long iova,size_t size)263 void sunxi_zap_tlb(unsigned long iova, size_t size)
264 {
265 	const struct sunxi_iommu_plat_data *plat_data = global_iommu_dev->plat_data;
266 
267 	if (plat_data->version <= IOMMU_VERSION_V11) {
268 		sunxi_tlb_invalid(iova, (u32)IOMMU_PT_MASK);
269 		sunxi_tlb_invalid(iova + SPAGE_SIZE, (u32)IOMMU_PT_MASK);
270 		sunxi_tlb_invalid(iova + size, (u32)IOMMU_PT_MASK);
271 		sunxi_tlb_invalid(iova + size + SPAGE_SIZE, (u32)IOMMU_PT_MASK);
272 		sunxi_ptw_cache_invalid(iova, 0);
273 		sunxi_ptw_cache_invalid(iova + SPD_SIZE, 0);
274 		sunxi_ptw_cache_invalid(iova + size, 0);
275 		sunxi_ptw_cache_invalid(iova + size + SPD_SIZE, 0);
276 	} else if (plat_data->version <= IOMMU_VERSION_V13) {
277 		sunxi_tlb_invalid(iova, iova + 2 * SPAGE_SIZE);
278 		sunxi_tlb_invalid(iova + size - SPAGE_SIZE, iova + size + 8 * SPAGE_SIZE);
279 		sunxi_ptw_cache_invalid(iova, 0);
280 		sunxi_ptw_cache_invalid(iova + size, 0);
281 
282 		sunxi_ptw_cache_invalid(iova + SPD_SIZE, 0);
283 		sunxi_ptw_cache_invalid(iova + size + SPD_SIZE, 0);
284 		sunxi_ptw_cache_invalid(iova + size + 2 * SPD_SIZE, 0);
285 	} else {
286 		sunxi_tlb_invalid(iova, iova + 2 * SPAGE_SIZE);
287 		sunxi_tlb_invalid(iova + size - SPAGE_SIZE, iova + size + 8 * SPAGE_SIZE);
288 		sunxi_ptw_cache_invalid(iova, iova + SPD_SIZE);
289 		sunxi_ptw_cache_invalid(iova + size - SPD_SIZE, iova + size);
290 	}
291 
292 	return;
293 }
294 
sunxi_mk_pte(u32 page,int prot)295 static inline u32 sunxi_mk_pte(u32 page, int prot)
296 {
297 	u32 flags = 0;
298 
299 	flags |= (prot & IOMMU_READ) ? SUNXI_PTE_PAGE_READABLE : 0;
300 	flags |= (prot & IOMMU_WRITE) ? SUNXI_PTE_PAGE_WRITABLE : 0;
301 	page &= IOMMU_PT_MASK;
302 
303 	return page | flags | SUNXI_PTE_PAGE_VALID;
304 }
305 
sunxi_iommu_map(struct iommu_domain * domain,unsigned long iova,phys_addr_t paddr,size_t size,int prot,gfp_t gfp)306 static int sunxi_iommu_map(struct iommu_domain *domain, unsigned long iova,
307 			   phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
308 {
309 	struct sunxi_iommu_domain *sunxi_domain;
310 	size_t iova_start, iova_end, paddr_start, s_iova_start;
311 	u32 *dent, *pent;
312 	int i;
313 	u32 iova_tail_count, iova_tail_size;
314 	u32 pent_val;
315 
316 	sunxi_domain = container_of(domain, struct sunxi_iommu_domain, domain);
317 	WARN_ON(sunxi_domain->pgtable == NULL);
318 	iova_start = iova & IOMMU_PT_MASK;
319 	paddr_start = paddr & IOMMU_PT_MASK;
320 	iova_end = SPAGE_ALIGN(iova + size);
321 	s_iova_start = iova_start;
322 
323 	mutex_lock(&sunxi_domain->dt_lock);
324 	for (; iova_start < iova_end;) {
325 		iova_tail_count = NUM_ENTRIES_PTE - IOPTE_INDEX(iova_start);
326 		iova_tail_size = iova_tail_count * SPAGE_SIZE;
327 		if (iova_start + iova_tail_size > iova_end) {
328 			iova_tail_size = iova_end - iova_start;
329 			iova_tail_count = iova_tail_size / SPAGE_SIZE;
330 		}
331 
332 		dent = iopde_offset(sunxi_domain->pgtable, iova_start);
333 		if (!IS_VALID(*dent)) {
334 			sunxi_alloc_iopte(dent, prot);
335 		}
336 		pent = iopte_offset(dent, iova_start);
337 		pent_val = sunxi_mk_pte(paddr_start, prot);
338 		for (i = 0; i < iova_tail_count; i++) {
339 			WARN_ON(*pent);
340 			*pent = pent_val +  SPAGE_SIZE * i;
341 			pent++;
342 		}
343 
344 		dma_sync_single_for_device(dma_dev, virt_to_phys(iopte_offset(dent, iova_start)),
345 				iova_tail_count << 2, DMA_TO_DEVICE);
346 		iova_start += iova_tail_size;
347 		paddr_start += iova_tail_size;
348 	}
349 	mutex_unlock(&sunxi_domain->dt_lock);
350 
351 	return 0;
352 }
353 
sunxi_iommu_unmap(struct iommu_domain * domain,unsigned long iova,size_t size,struct iommu_iotlb_gather * gather)354 static size_t sunxi_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
355 				size_t size, struct iommu_iotlb_gather *gather)
356 {
357 	struct sunxi_iommu_domain *sunxi_domain;
358 	const struct sunxi_iommu_plat_data *plat_data;
359 	size_t iova_start, iova_end;
360 	u32 *dent, *pent;
361 	u32 iova_tail_count, iova_tail_size;
362 
363 	sunxi_domain = container_of(domain, struct sunxi_iommu_domain, domain);
364 	plat_data = global_iommu_dev->plat_data;
365 	WARN_ON(sunxi_domain->pgtable == NULL);
366 	iova_start = iova & IOMMU_PT_MASK;
367 	iova_end = SPAGE_ALIGN(iova + size);
368 
369 	if (gather->start > iova_start)
370 		gather->start = iova_start;
371 	if (gather->end < iova_end)
372 		gather->end = iova_end;
373 
374 	mutex_lock(&sunxi_domain->dt_lock);
375 	/* Invalid TLB and PTW */
376 	if (plat_data->version >= IOMMU_VERSION_V12)
377 		sunxi_tlb_invalid(iova_start, iova_end);
378 	if (plat_data->version >= IOMMU_VERSION_V14)
379 		sunxi_ptw_cache_invalid(iova_start, iova_end);
380 
381 	for (; iova_start < iova_end; ) {
382 		iova_tail_count = NUM_ENTRIES_PTE - IOPTE_INDEX(iova_start);
383 		iova_tail_size = iova_tail_count * SPAGE_SIZE;
384 		if (iova_start + iova_tail_size > iova_end) {
385 			iova_tail_size = iova_end - iova_start;
386 			iova_tail_count = iova_tail_size / SPAGE_SIZE;
387 		}
388 
389 		dent = iopde_offset(sunxi_domain->pgtable, iova_start);
390 		if (!IS_VALID(*dent))
391 			return -EINVAL;
392 		pent = iopte_offset(dent, iova_start);
393 		memset(pent, 0, iova_tail_count * sizeof(u32));
394 		dma_sync_single_for_device(dma_dev, virt_to_phys(iopte_offset(dent, iova_start)),
395 				iova_tail_count << 2, DMA_TO_DEVICE);
396 
397 		if (iova_tail_size == SPD_SIZE) {
398 			*dent = 0;
399 			dma_sync_single_for_device(dma_dev, virt_to_phys(dent), sizeof(*dent), DMA_TO_DEVICE);
400 			sunxi_free_iopte(pent);
401 
402 		}
403 
404 		if (plat_data->version < IOMMU_VERSION_V14)
405 			sunxi_ptw_cache_invalid(iova_start, 0);
406 		iova_start += iova_tail_size;
407 	}
408 	mutex_unlock(&sunxi_domain->dt_lock);
409 
410 	return size;
411 }
412 
sunxi_iommu_iotlb_sync_map(struct iommu_domain * domain,unsigned long iova,size_t size)413 void sunxi_iommu_iotlb_sync_map(struct iommu_domain *domain,
414 				unsigned long iova, size_t size)
415 {
416 	struct sunxi_iommu_domain *sunxi_domain =
417 		container_of(domain, struct sunxi_iommu_domain, domain);
418 
419 	mutex_lock(&sunxi_domain->dt_lock);
420 	sunxi_zap_tlb(iova, size);
421 	mutex_unlock(&sunxi_domain->dt_lock);
422 
423 	return;
424 }
425 
sunxi_iommu_iotlb_sync(struct iommu_domain * domain,struct iommu_iotlb_gather * iotlb_gather)426 void sunxi_iommu_iotlb_sync(struct iommu_domain *domain,
427 				  struct iommu_iotlb_gather *iotlb_gather)
428 {
429 	struct sunxi_iommu_domain *sunxi_domain =
430 		container_of(domain, struct sunxi_iommu_domain, domain);
431 	struct sunxi_iommu_dev *iommu = global_iommu_dev;
432 	const struct sunxi_iommu_plat_data *plat_data = iommu->plat_data;
433 
434 	if (plat_data->version >= IOMMU_VERSION_V14)
435 		return ;
436 
437 	mutex_lock(&sunxi_domain->dt_lock);
438 	sunxi_zap_tlb(iotlb_gather->start, iotlb_gather->end - iotlb_gather->start);
439 	mutex_unlock(&sunxi_domain->dt_lock);
440 
441 	return;
442 }
443 
sunxi_iommu_iova_to_phys(struct iommu_domain * domain,dma_addr_t iova)444 static phys_addr_t sunxi_iommu_iova_to_phys(struct iommu_domain *domain,
445 					    dma_addr_t iova)
446 {
447 	struct sunxi_iommu_domain *sunxi_domain =
448 		container_of(domain, struct sunxi_iommu_domain, domain);
449 	u32 *dent, *pent;
450 	phys_addr_t ret = 0;
451 
452 
453 	WARN_ON(sunxi_domain->pgtable == NULL);
454 	mutex_lock(&sunxi_domain->dt_lock);
455 	dent = iopde_offset(sunxi_domain->pgtable, iova);
456 	if (IS_VALID(*dent)) {
457 		pent = iopte_offset(dent, iova);
458 		ret = IOPTE_TO_PFN(pent) + IOVA_PAGE_OFT(iova);
459 	}
460 	if (ret < SUNXI_PHYS_OFFSET)
461 		ret += SUNXI_4G_PHYS_OFFSET;
462 	mutex_unlock(&sunxi_domain->dt_lock);
463 
464 	return ret;
465 }
466 
sunxi_iommu_domain_alloc(unsigned type)467 static struct iommu_domain *sunxi_iommu_domain_alloc(unsigned type)
468 {
469 	struct sunxi_iommu_domain *sunxi_domain;
470 
471 	if (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_UNMANAGED)
472 		return NULL;
473 
474 	/* we just use one domain */
475 	if (global_sunxi_iommu_domain)
476 		return &global_sunxi_iommu_domain->domain;
477 
478 	sunxi_domain = kzalloc(sizeof(*sunxi_domain), GFP_KERNEL);
479 
480 	if (!sunxi_domain)
481 		return NULL;
482 
483 	sunxi_domain->pgtable = (unsigned int *)__get_free_pages(
484 				GFP_KERNEL, get_order(PD_SIZE));
485 	if (!sunxi_domain->pgtable) {
486 		pr_err("sunxi domain get pgtable failed\n");
487 		goto err_page;
488 	}
489 
490 	sunxi_domain->sg_buffer = (unsigned int *)__get_free_pages(
491 				GFP_KERNEL, get_order(MAX_SG_TABLE_SIZE));
492 	if (!sunxi_domain->sg_buffer) {
493 		pr_err("sunxi domain get sg_buffer failed\n");
494 		goto err_sg_buffer;
495 	}
496 
497 	if (type == IOMMU_DOMAIN_DMA &&
498 				iommu_get_dma_cookie(&sunxi_domain->domain)) {
499 		pr_err("sunxi domain get dma cookie failed\n");
500 		goto err_dma_cookie;
501 	}
502 
503 	memset(sunxi_domain->pgtable, 0, PD_SIZE);
504 	sunxi_domain->domain.geometry.aperture_start = 0;
505 	sunxi_domain->domain.geometry.aperture_end	 = (1ULL << 32)-1;
506 	sunxi_domain->domain.geometry.force_aperture = true;
507 	mutex_init(&sunxi_domain->dt_lock);
508 	global_sunxi_iommu_domain = sunxi_domain;
509 	global_iommu_domain = &sunxi_domain->domain;
510 
511 	if (!iommu_hw_init_flag) {
512 		if (sunxi_iommu_hw_init(&sunxi_domain->domain))
513 			pr_err("sunxi iommu hardware init failed\n");
514 	}
515 
516 	return &sunxi_domain->domain;
517 
518 err_dma_cookie:
519 err_sg_buffer:
520 	free_pages((unsigned long)sunxi_domain->pgtable, get_order(PD_SIZE));
521 	sunxi_domain->pgtable = NULL;
522 err_page:
523 	kfree(sunxi_domain);
524 
525 	return NULL;
526 }
527 
sunxi_iommu_domain_free(struct iommu_domain * domain)528 static void sunxi_iommu_domain_free(struct iommu_domain *domain)
529 {
530 	struct sunxi_iommu_domain *sunxi_domain =
531 		container_of(domain, struct sunxi_iommu_domain, domain);
532 	int i = 0;
533 	size_t iova;
534 	u32 *dent, *pent;
535 
536 	mutex_lock(&sunxi_domain->dt_lock);
537 	for (i = 0; i < NUM_ENTRIES_PDE; ++i) {
538 		dent = sunxi_domain->pgtable + i;
539 		iova = i << IOMMU_PD_SHIFT;
540 		if (IS_VALID(*dent)) {
541 			pent = iopte_offset(dent, iova);
542 			dma_sync_single_for_cpu(dma_dev, virt_to_phys(pent), PT_SIZE, DMA_TO_DEVICE);
543 			memset(pent, 0, PT_SIZE);
544 			dma_sync_single_for_device(dma_dev, virt_to_phys(pent), PT_SIZE, DMA_TO_DEVICE);
545 			dma_sync_single_for_cpu(dma_dev, virt_to_phys(dent), PT_SIZE, DMA_TO_DEVICE);
546 			*dent = 0;
547 			dma_sync_single_for_device(dma_dev, virt_to_phys(dent), sizeof(*dent), DMA_TO_DEVICE);
548 			sunxi_free_iopte(pent);
549 		}
550 	}
551 	sunxi_tlb_flush(global_iommu_dev);
552 	mutex_unlock(&sunxi_domain->dt_lock);
553 	free_pages((unsigned long)sunxi_domain->pgtable, get_order(PD_SIZE));
554 	sunxi_domain->pgtable = NULL;
555 	free_pages((unsigned long)sunxi_domain->sg_buffer,
556 						get_order(MAX_SG_TABLE_SIZE));
557 	sunxi_domain->sg_buffer = NULL;
558 	iommu_put_dma_cookie(domain);
559 	kfree(sunxi_domain);
560 }
561 
sunxi_iommu_attach_dev(struct iommu_domain * domain,struct device * dev)562 static int sunxi_iommu_attach_dev(struct iommu_domain *domain,
563 				  struct device *dev)
564 {
565 	return 0;
566 }
567 
sunxi_iommu_detach_dev(struct iommu_domain * domain,struct device * dev)568 static void sunxi_iommu_detach_dev(struct iommu_domain *domain,
569 				   struct device *dev)
570 {
571 		return;
572 }
573 
sunxi_iommu_probe_device(struct device * dev)574 static struct iommu_device *sunxi_iommu_probe_device(struct device *dev)
575 {
576 	struct sunxi_iommu_owner *owner = dev_iommu_priv_get(dev);
577 
578 	if (!owner) /* Not a iommu client device */
579 		return ERR_PTR(-ENODEV);
580 
581 	sunxi_enable_device_iommu(owner->tlbid, owner->flag);
582 
583 	return &owner->data->iommu;
584 }
585 
sunxi_iommu_release_device(struct device * dev)586 static void sunxi_iommu_release_device(struct device *dev)
587 {
588 	struct sunxi_iommu_owner *owner = dev_iommu_priv_get(dev);
589 
590 	if (!owner)
591 		return;
592 
593 	sunxi_enable_device_iommu(owner->tlbid, false);
594 	dev->iommu_group = NULL;
595 	devm_kfree(dev, dev->dma_parms);
596 	dev->dma_parms = NULL;
597 	kfree(owner);
598 	owner = NULL;
599 	dev_iommu_priv_set(dev, NULL);
600 }
601 
602 /* set dma params for master devices */
sunxi_iommu_set_dma_parms(struct notifier_block * nb,unsigned long action,void * data)603 int sunxi_iommu_set_dma_parms(struct notifier_block *nb,
604 			      unsigned long action, void *data)
605 {
606 	struct device *dev = data;
607 
608 	if (action != IOMMU_GROUP_NOTIFY_BIND_DRIVER)
609 		return 0;
610 
611 	dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL);
612 	if (!dev->dma_parms)
613 		return -ENOMEM;
614 	dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
615 
616 	return 0;
617 }
618 
sunxi_iommu_device_group(struct device * dev)619 struct iommu_group *sunxi_iommu_device_group(struct device *dev)
620 {
621 	struct iommu_group *group;
622 	struct notifier_block *nb;
623 
624 	if (!global_group) {
625 		nb = kzalloc(sizeof(*nb), GFP_KERNEL);
626 		if (!nb)
627 			return ERR_PTR(-ENOMEM);
628 
629 		global_group = iommu_group_alloc();
630 		if (IS_ERR(global_group)) {
631 			pr_err("sunxi iommu alloc group failed\n");
632 			goto err_group_alloc;
633 		}
634 
635 		nb->notifier_call = sunxi_iommu_set_dma_parms;
636 		if (iommu_group_register_notifier(global_group, nb)) {
637 			pr_err("sunxi iommu group register notifier failed!\n");
638 			goto err_notifier;
639 		}
640 
641 	}
642 	group = global_group;
643 
644 	return group;
645 
646 err_notifier:
647 err_group_alloc:
648 	kfree(nb);
649 
650 	return ERR_PTR(-EBUSY);
651 }
652 
sunxi_iommu_of_xlate(struct device * dev,struct of_phandle_args * args)653 static int sunxi_iommu_of_xlate(struct device *dev,
654 				struct of_phandle_args *args)
655 {
656 	struct sunxi_iommu_owner *owner = dev_iommu_priv_get(dev);
657 	struct platform_device *sysmmu = of_find_device_by_node(args->np);
658 	struct sunxi_iommu_dev *data;
659 
660 	if (!sysmmu)
661 		return -ENODEV;
662 
663 	data = platform_get_drvdata(sysmmu);
664 	if (data == NULL)
665 		return -ENODEV;
666 
667 	if (!owner) {
668 		owner = kzalloc(sizeof(*owner), GFP_KERNEL);
669 		if (!owner)
670 			return -ENOMEM;
671 		owner->tlbid = args->args[0];
672 		owner->flag = args->args[1];
673 		owner->data = data;
674 		owner->dev = dev;
675 		dev_iommu_priv_set(dev, owner);
676 	}
677 
678 	return 0;
679 }
680 
sunxi_set_debug_mode(void)681 void sunxi_set_debug_mode(void)
682 {
683 	struct sunxi_iommu_dev *iommu = global_iommu_dev;
684 
685 	sunxi_iommu_write(iommu,
686 			IOMMU_VA_CONFIG_REG, 0x80000000);
687 }
688 
sunxi_set_prefetch_mode(void)689 void sunxi_set_prefetch_mode(void)
690 {
691 	struct sunxi_iommu_dev *iommu = global_iommu_dev;
692 
693 	sunxi_iommu_write(iommu,
694 			IOMMU_VA_CONFIG_REG, 0x00000000);
695 }
696 
sunxi_iova_test_write(dma_addr_t iova,u32 val)697 int sunxi_iova_test_write(dma_addr_t iova, u32 val)
698 {
699 	struct sunxi_iommu_dev *iommu = global_iommu_dev;
700 	int retval;
701 
702 	sunxi_iommu_write(iommu, IOMMU_VA_REG, iova);
703 	sunxi_iommu_write(iommu, IOMMU_VA_DATA_REG, val);
704 	sunxi_iommu_write(iommu,
705 			IOMMU_VA_CONFIG_REG, 0x80000100);
706 	sunxi_iommu_write(iommu,
707 			IOMMU_VA_CONFIG_REG, 0x80000101);
708 	retval = sunxi_wait_when((sunxi_iommu_read(iommu,
709 				IOMMU_VA_CONFIG_REG) & 0x1), 1);
710 	if (retval)
711 		dev_err(iommu->dev,
712 			"write VA address request timed out\n");
713 	return retval;
714 }
715 
sunxi_iova_test_read(dma_addr_t iova)716 unsigned long sunxi_iova_test_read(dma_addr_t iova)
717 {
718 	struct sunxi_iommu_dev *iommu = global_iommu_dev;
719 	unsigned long retval;
720 
721 	sunxi_iommu_write(iommu, IOMMU_VA_REG, iova);
722 	sunxi_iommu_write(iommu,
723 			IOMMU_VA_CONFIG_REG, 0x80000000);
724 	sunxi_iommu_write(iommu,
725 			IOMMU_VA_CONFIG_REG, 0x80000001);
726 	retval = sunxi_wait_when((sunxi_iommu_read(iommu,
727 				IOMMU_VA_CONFIG_REG) & 0x1), 1);
728 	if (retval) {
729 		dev_err(iommu->dev,
730 			"read VA address request timed out\n");
731 		retval = 0;
732 		goto out;
733 	}
734 	retval = sunxi_iommu_read(iommu,
735 			IOMMU_VA_DATA_REG);
736 
737 out:
738 	return retval;
739 }
740 
sunxi_iova_invalid_helper(unsigned long iova)741 static int sunxi_iova_invalid_helper(unsigned long iova)
742 {
743 	struct sunxi_iommu_domain *sunxi_domain = global_sunxi_iommu_domain;
744 	u32 *pte_addr, *dte_addr;
745 
746 	dte_addr = iopde_offset(sunxi_domain->pgtable, iova);
747 	if ((*dte_addr & 0x3) != 0x1) {
748 		pr_err("0x%lx is not mapped!\n", iova);
749 		return 1;
750 	}
751 	pte_addr = iopte_offset(dte_addr, iova);
752 	if ((*pte_addr & 0x2) == 0) {
753 		pr_err("0x%lx is not mapped!\n", iova);
754 		return 1;
755 	}
756 	pr_err("0x%lx is mapped!\n", iova);
757 
758 	return 0;
759 }
760 
sunxi_iommu_irq(int irq,void * dev_id)761 static irqreturn_t sunxi_iommu_irq(int irq, void *dev_id)
762 {
763 
764 	u32 inter_status_reg = 0;
765 	u32 addr_reg = 0;
766 	u32	int_masterid_bitmap = 0;
767 	u32	data_reg = 0;
768 	u32	l1_pgint_reg = 0;
769 	u32	l2_pgint_reg = 0;
770 	u32	master_id = 0;
771 	unsigned long mflag;
772 	struct sunxi_iommu_dev *iommu = dev_id;
773 	const struct sunxi_iommu_plat_data *plat_data = iommu->plat_data;
774 
775 	spin_lock_irqsave(&iommu->iommu_lock, mflag);
776 	inter_status_reg = sunxi_iommu_read(iommu, IOMMU_INT_STA_REG) & 0x3ffff;
777 	l1_pgint_reg = sunxi_iommu_read(iommu, IOMMU_L1PG_INT_REG);
778 	l2_pgint_reg = sunxi_iommu_read(iommu, IOMMU_L2PG_INT_REG);
779 	int_masterid_bitmap = inter_status_reg | l1_pgint_reg | l2_pgint_reg;
780 
781 	if (inter_status_reg & MICRO_TLB0_INVALID_INTER_MASK) {
782 		pr_err("%s Invalid Authority\n", plat_data->master[0]);
783 		addr_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG0);
784 		data_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_DATA_REG0);
785 	} else if (inter_status_reg & MICRO_TLB1_INVALID_INTER_MASK) {
786 		pr_err("%s Invalid Authority\n", plat_data->master[1]);
787 		addr_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG1);
788 		data_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_DATA_REG1);
789 	} else if (inter_status_reg & MICRO_TLB2_INVALID_INTER_MASK) {
790 		pr_err("%s Invalid Authority\n", plat_data->master[2]);
791 		addr_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG2);
792 		data_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_DATA_REG2);
793 	} else if (inter_status_reg & MICRO_TLB3_INVALID_INTER_MASK) {
794 		pr_err("%s Invalid Authority\n", plat_data->master[3]);
795 		addr_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG3);
796 		data_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_DATA_REG3);
797 	} else if (inter_status_reg & MICRO_TLB4_INVALID_INTER_MASK) {
798 		pr_err("%s Invalid Authority\n", plat_data->master[4]);
799 		addr_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG4);
800 		data_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_DATA_REG4);
801 	} else if (inter_status_reg & MICRO_TLB5_INVALID_INTER_MASK) {
802 		pr_err("%s Invalid Authority\n", plat_data->master[5]);
803 		addr_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG5);
804 		data_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_DATA_REG5);
805 	} else if (inter_status_reg & MICRO_TLB6_INVALID_INTER_MASK) {
806 		pr_err("%s Invalid Authority\n", plat_data->master[6]);
807 		addr_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG6);
808 		data_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_DATA_REG6);
809 	} else if (inter_status_reg & L1_PAGETABLE_INVALID_INTER_MASK) {
810 		/*It's OK to prefetch an invalid page, no need to print msg for debug.*/
811 		if (!(int_masterid_bitmap & (1U << 31)))
812 			pr_err("L1 PageTable Invalid\n");
813 		addr_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG7);
814 		data_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_DATA_REG7);
815 	} else if (inter_status_reg & L2_PAGETABLE_INVALID_INTER_MASK) {
816 		if (!(int_masterid_bitmap & (1U << 31)))
817 			pr_err("L2 PageTable Invalid\n");
818 		addr_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG8);
819 		data_reg = sunxi_iommu_read(iommu, IOMMU_INT_ERR_DATA_REG8);
820 	} else
821 		pr_err("sunxi iommu int error!!!\n");
822 
823 	if (!(int_masterid_bitmap & (1U << 31))) {
824 		if (sunxi_iova_invalid_helper(addr_reg)) {
825 			int_masterid_bitmap &= 0xffff;
826 			master_id = __ffs(int_masterid_bitmap);
827 		}
828 		pr_err("Bug is in %s module, invalid address: 0x%x, data:0x%x, id:0x%x\n",
829 			plat_data->master[master_id], addr_reg, data_reg,
830 				int_masterid_bitmap);
831 	}
832 
833 	/* invalid TLB */
834 	if (plat_data->version <= IOMMU_VERSION_V11) {
835 		sunxi_iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_REG, addr_reg);
836 		sunxi_iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_MASK_REG, (u32)IOMMU_PT_MASK);
837 		sunxi_iommu_write(iommu, IOMMU_TLB_IVLD_ENABLE_REG, 0x1);
838 		while (sunxi_iommu_read(iommu, IOMMU_TLB_IVLD_ENABLE_REG) & 0x1)
839 			;
840 		sunxi_iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_REG, addr_reg + 0x2000);
841 		sunxi_iommu_write(iommu, IOMMU_TLB_IVLD_ADDR_MASK_REG, (u32)IOMMU_PT_MASK);
842 	} else {
843 		sunxi_iommu_write(iommu, IOMMU_TLB_IVLD_START_ADDR_REG, addr_reg);
844 		sunxi_iommu_write(iommu, IOMMU_TLB_IVLD_END_ADDR_REG, addr_reg + 4 * SPAGE_SIZE);
845 	}
846 	sunxi_iommu_write(iommu, IOMMU_TLB_IVLD_ENABLE_REG, 0x1);
847 	while (sunxi_iommu_read(iommu, IOMMU_TLB_IVLD_ENABLE_REG) & 0x1)
848 		;
849 
850 	/* invalid PTW */
851 	if (plat_data->version <= IOMMU_VERSION_V13) {
852 		sunxi_iommu_write(iommu, IOMMU_PC_IVLD_ADDR_REG, addr_reg);
853 		sunxi_iommu_write(iommu, IOMMU_PC_IVLD_ENABLE_REG, 0x1);
854 		while (sunxi_iommu_read(iommu, IOMMU_PC_IVLD_ENABLE_REG) & 0x1)
855 			;
856 		sunxi_iommu_write(iommu, IOMMU_PC_IVLD_ADDR_REG, addr_reg + 0x200000);
857 	} else {
858 		sunxi_iommu_write(iommu, IOMMU_PC_IVLD_START_ADDR_REG, addr_reg);
859 		sunxi_iommu_write(iommu, IOMMU_PC_IVLD_END_ADDR_REG, addr_reg + 2 * SPD_SIZE);
860 	}
861 	sunxi_iommu_write(iommu, IOMMU_PC_IVLD_ENABLE_REG, 0x1);
862 	while (sunxi_iommu_read(iommu, IOMMU_PC_IVLD_ENABLE_REG) & 0x1)
863 		;
864 
865 	sunxi_iommu_write(iommu, IOMMU_INT_CLR_REG, inter_status_reg);
866 	inter_status_reg |= (l1_pgint_reg | l2_pgint_reg);
867 	inter_status_reg &= 0xffff;
868 	sunxi_iommu_write(iommu, IOMMU_RESET_REG, ~inter_status_reg);
869 	sunxi_iommu_write(iommu, IOMMU_RESET_REG, 0xffffffff);
870 	spin_unlock_irqrestore(&iommu->iommu_lock, mflag);
871 
872 	return IRQ_HANDLED;
873 }
874 
sunxi_iommu_enable_show(struct device * dev,struct device_attribute * attr,char * buf)875 static ssize_t sunxi_iommu_enable_show(struct device *dev,
876 		struct device_attribute *attr, char *buf)
877 {
878 	struct sunxi_iommu_dev *iommu = global_iommu_dev;
879 	u32 data;
880 
881 	spin_lock(&iommu->iommu_lock);
882 	data = sunxi_iommu_read(iommu, IOMMU_PMU_ENABLE_REG);
883 	spin_unlock(&iommu->iommu_lock);
884 
885 	return snprintf(buf, PAGE_SIZE,
886 		"enable = %d\n", data & 0x1 ? 1 : 0);
887 }
888 
sunxi_iommu_enable_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)889 static ssize_t sunxi_iommu_enable_store(struct device *dev,
890 					struct device_attribute *attr,
891 					const char *buf, size_t count)
892 {
893 	struct sunxi_iommu_dev *iommu = global_iommu_dev;
894 	unsigned long val;
895 	u32 data;
896 	int retval;
897 
898 	if (kstrtoul(buf, 0, &val))
899 		return -EINVAL;
900 
901 	if (val) {
902 		spin_lock(&iommu->iommu_lock);
903 		data = sunxi_iommu_read(iommu, IOMMU_PMU_ENABLE_REG);
904 		sunxi_iommu_write(iommu, IOMMU_PMU_ENABLE_REG, data | 0x1);
905 		data = sunxi_iommu_read(iommu, IOMMU_PMU_CLR_REG);
906 		sunxi_iommu_write(iommu, IOMMU_PMU_CLR_REG, data | 0x1);
907 		retval = sunxi_wait_when((sunxi_iommu_read(iommu,
908 				IOMMU_PMU_CLR_REG) & 0x1), 1);
909 		if (retval)
910 			dev_err(iommu->dev, "Clear PMU Count timed out\n");
911 		spin_unlock(&iommu->iommu_lock);
912 	} else {
913 		spin_lock(&iommu->iommu_lock);
914 		data = sunxi_iommu_read(iommu, IOMMU_PMU_CLR_REG);
915 		sunxi_iommu_write(iommu, IOMMU_PMU_CLR_REG, data | 0x1);
916 		retval = sunxi_wait_when((sunxi_iommu_read(iommu,
917 				IOMMU_PMU_CLR_REG) & 0x1), 1);
918 		if (retval)
919 			dev_err(iommu->dev, "Clear PMU Count timed out\n");
920 		data = sunxi_iommu_read(iommu, IOMMU_PMU_ENABLE_REG);
921 		sunxi_iommu_write(iommu, IOMMU_PMU_ENABLE_REG, data & ~0x1);
922 		spin_unlock(&iommu->iommu_lock);
923 	}
924 
925 	return count;
926 }
927 
sunxi_iommu_profilling_show(struct device * dev,struct device_attribute * attr,char * buf)928 static ssize_t sunxi_iommu_profilling_show(struct device *dev,
929 					struct device_attribute *attr,
930 					char *buf)
931 {
932 	struct sunxi_iommu_dev *iommu = global_iommu_dev;
933 	const struct sunxi_iommu_plat_data *plat_data = iommu->plat_data;
934 	u64 micro_tlb0_access_count;
935 	u64 micro_tlb0_hit_count;
936 	u64 micro_tlb1_access_count;
937 	u64 micro_tlb1_hit_count;
938 	u64 micro_tlb2_access_count;
939 	u64 micro_tlb2_hit_count;
940 	u64 micro_tlb3_access_count;
941 	u64 micro_tlb3_hit_count;
942 	u64 micro_tlb4_access_count;
943 	u64 micro_tlb4_hit_count;
944 	u64 micro_tlb5_access_count;
945 	u64 micro_tlb5_hit_count;
946 	u64 micro_tlb6_access_count;
947 	u64 micro_tlb6_hit_count;
948 	u64 macrotlb_access_count;
949 	u64 macrotlb_hit_count;
950 	u64 ptwcache_access_count;
951 	u64 ptwcache_hit_count;
952 	u64 micro_tlb0_latency;
953 	u64 micro_tlb1_latency;
954 	u64 micro_tlb2_latency;
955 	u64 micro_tlb3_latency;
956 	u64 micro_tlb4_latency;
957 	u64 micro_tlb5_latency;
958 	u64 micro_tlb6_latency;
959 	u32 micro_tlb0_max_latency;
960 	u32 micro_tlb1_max_latency;
961 	u32 micro_tlb2_max_latency;
962 	u32 micro_tlb3_max_latency;
963 	u32 micro_tlb4_max_latency;
964 	u32 micro_tlb5_max_latency;
965 	u32 micro_tlb6_max_latency;
966 
967 	spin_lock(&iommu->iommu_lock);
968 
969 	micro_tlb0_access_count =
970 		((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_HIGH_REG0) &
971 		0x7ff) << 32) |
972 		sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_LOW_REG0);
973 	micro_tlb0_hit_count =
974 		((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_HIT_HIGH_REG0) &
975 		0x7ff) << 32) | sunxi_iommu_read(iommu, IOMMU_PMU_HIT_LOW_REG0);
976 
977 	micro_tlb1_access_count =
978 		((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_HIGH_REG1) &
979 		0x7ff) << 32) |
980 		sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_LOW_REG1);
981 	micro_tlb1_hit_count =
982 		((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_HIT_HIGH_REG1) &
983 		0x7ff) << 32) | sunxi_iommu_read(iommu, IOMMU_PMU_HIT_LOW_REG1);
984 
985 	micro_tlb2_access_count =
986 		((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_HIGH_REG2) &
987 		0x7ff) << 32) |
988 		sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_LOW_REG2);
989 	micro_tlb2_hit_count =
990 		((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_HIT_HIGH_REG2) &
991 		0x7ff) << 32) | sunxi_iommu_read(iommu, IOMMU_PMU_HIT_LOW_REG2);
992 
993 	micro_tlb3_access_count =
994 		((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_HIGH_REG3) &
995 		0x7ff) << 32) |
996 		sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_LOW_REG3);
997 	micro_tlb3_hit_count =
998 		((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_HIT_HIGH_REG3) &
999 		0x7ff) << 32) | sunxi_iommu_read(iommu, IOMMU_PMU_HIT_LOW_REG3);
1000 
1001 	micro_tlb4_access_count =
1002 		((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_HIGH_REG4) &
1003 		0x7ff) << 32) |
1004 		sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_LOW_REG4);
1005 	micro_tlb4_hit_count =
1006 		((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_HIT_HIGH_REG4) &
1007 		0x7ff) << 32) | sunxi_iommu_read(iommu, IOMMU_PMU_HIT_LOW_REG4);
1008 
1009 	micro_tlb5_access_count =
1010 		((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_HIGH_REG5) &
1011 		0x7ff) << 32) |
1012 		sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_LOW_REG5);
1013 	micro_tlb5_hit_count =
1014 		((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_HIT_HIGH_REG5) &
1015 		0x7ff) << 32) | sunxi_iommu_read(iommu, IOMMU_PMU_HIT_LOW_REG5);
1016 
1017 	micro_tlb6_access_count =
1018 		((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_HIGH_REG6) &
1019 		0x7ff) << 32) |
1020 		sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_LOW_REG6);
1021 	micro_tlb6_hit_count =
1022 		((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_HIT_HIGH_REG6) &
1023 		0x7ff) << 32) | sunxi_iommu_read(iommu, IOMMU_PMU_HIT_LOW_REG6);
1024 
1025 	macrotlb_access_count =
1026 		((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_HIGH_REG7) &
1027 		0x7ff) << 32) |
1028 		sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_LOW_REG7);
1029 	macrotlb_hit_count =
1030 		((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_HIT_HIGH_REG7) &
1031 		0x7ff) << 32) | sunxi_iommu_read(iommu, IOMMU_PMU_HIT_LOW_REG7);
1032 
1033 	ptwcache_access_count =
1034 		((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_HIGH_REG8) &
1035 		0x7ff) << 32) |
1036 		sunxi_iommu_read(iommu, IOMMU_PMU_ACCESS_LOW_REG8);
1037 	ptwcache_hit_count =
1038 		((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_HIT_HIGH_REG8) &
1039 		0x7ff) << 32) | sunxi_iommu_read(iommu, IOMMU_PMU_HIT_LOW_REG8);
1040 
1041 	micro_tlb0_latency =
1042 		((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_TL_HIGH_REG0) &
1043 		0x3ffff) << 32) |
1044 		sunxi_iommu_read(iommu, IOMMU_PMU_TL_LOW_REG0);
1045 	micro_tlb1_latency =
1046 		((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_TL_HIGH_REG1) &
1047 		0x3ffff) << 32) |
1048 		sunxi_iommu_read(iommu, IOMMU_PMU_TL_LOW_REG1);
1049 	micro_tlb2_latency =
1050 		((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_TL_HIGH_REG2) &
1051 		0x3ffff) << 32) |
1052 		sunxi_iommu_read(iommu, IOMMU_PMU_TL_LOW_REG2);
1053 	micro_tlb3_latency =
1054 		((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_TL_HIGH_REG3) &
1055 		0x3ffff) << 32) |
1056 		sunxi_iommu_read(iommu, IOMMU_PMU_TL_LOW_REG3);
1057 	micro_tlb4_latency =
1058 		((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_TL_HIGH_REG4) &
1059 		0x3ffff) << 32) |
1060 		sunxi_iommu_read(iommu, IOMMU_PMU_TL_LOW_REG4);
1061 	micro_tlb5_latency =
1062 		((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_TL_HIGH_REG5) &
1063 		0x3ffff) << 32) |
1064 		sunxi_iommu_read(iommu, IOMMU_PMU_TL_LOW_REG5);
1065 
1066 	micro_tlb6_latency =
1067 		((u64)(sunxi_iommu_read(iommu, IOMMU_PMU_TL_HIGH_REG6) &
1068 		0x3ffff) << 32) |
1069 		sunxi_iommu_read(iommu, IOMMU_PMU_TL_LOW_REG6);
1070 
1071 	micro_tlb0_max_latency = sunxi_iommu_read(iommu, IOMMU_PMU_ML_REG0);
1072 	micro_tlb1_max_latency = sunxi_iommu_read(iommu, IOMMU_PMU_ML_REG1);
1073 	micro_tlb2_max_latency = sunxi_iommu_read(iommu, IOMMU_PMU_ML_REG2);
1074 	micro_tlb3_max_latency = sunxi_iommu_read(iommu, IOMMU_PMU_ML_REG3);
1075 	micro_tlb4_max_latency = sunxi_iommu_read(iommu, IOMMU_PMU_ML_REG4);
1076 	micro_tlb5_max_latency = sunxi_iommu_read(iommu, IOMMU_PMU_ML_REG5);
1077 	micro_tlb6_max_latency = sunxi_iommu_read(iommu, IOMMU_PMU_ML_REG6);
1078 
1079 	spin_unlock(&iommu->iommu_lock);
1080 
1081 	return snprintf(buf, PAGE_SIZE,
1082 		"%s_access_count = 0x%llx\n"
1083 		"%s_hit_count = 0x%llx\n"
1084 		"%s_access_count = 0x%llx\n"
1085 		"%s_hit_count = 0x%llx\n"
1086 		"%s_access_count = 0x%llx\n"
1087 		"%s_hit_count = 0x%llx\n"
1088 		"%s_access_count = 0x%llx\n"
1089 		"%s_hit_count = 0x%llx\n"
1090 		"%s_access_count = 0x%llx\n"
1091 		"%s_hit_count = 0x%llx\n"
1092 		"%s_access_count = 0x%llx\n"
1093 		"%s_hit_count = 0x%llx\n"
1094 		"%s_access_count = 0x%llx\n"
1095 		"%s_hit_count = 0x%llx\n"
1096 		"macrotlb_access_count = 0x%llx\n"
1097 		"macrotlb_hit_count = 0x%llx\n"
1098 		"ptwcache_access_count = 0x%llx\n"
1099 		"ptwcache_hit_count = 0x%llx\n"
1100 		"%s_total_latency = 0x%llx\n"
1101 		"%s_total_latency = 0x%llx\n"
1102 		"%s_total_latency = 0x%llx\n"
1103 		"%s_total_latency = 0x%llx\n"
1104 		"%s_total_latency = 0x%llx\n"
1105 		"%s_total_latency = 0x%llx\n"
1106 		"%s_total_latency = 0x%llx\n"
1107 		"%s_max_latency = 0x%x\n"
1108 		"%s_max_latency = 0x%x\n"
1109 		"%s_max_latency = 0x%x\n"
1110 		"%s_max_latency = 0x%x\n"
1111 		"%s_max_latency = 0x%x\n"
1112 		"%s_max_latency = 0x%x\n"
1113 		"%s_max_latency = 0x%x\n"
1114 		,
1115 		plat_data->master[0], micro_tlb0_access_count,
1116 		plat_data->master[0], micro_tlb0_hit_count,
1117 		plat_data->master[1], micro_tlb1_access_count,
1118 		plat_data->master[1], micro_tlb1_hit_count,
1119 		plat_data->master[2], micro_tlb2_access_count,
1120 		plat_data->master[2], micro_tlb2_hit_count,
1121 		plat_data->master[3], micro_tlb3_access_count,
1122 		plat_data->master[3], micro_tlb3_hit_count,
1123 		plat_data->master[4], micro_tlb4_access_count,
1124 		plat_data->master[4], micro_tlb4_hit_count,
1125 		plat_data->master[5], micro_tlb5_access_count,
1126 		plat_data->master[5], micro_tlb5_hit_count,
1127 		plat_data->master[6], micro_tlb6_access_count,
1128 		plat_data->master[6], micro_tlb6_hit_count,
1129 		macrotlb_access_count,
1130 		macrotlb_hit_count,
1131 		ptwcache_access_count,
1132 		ptwcache_hit_count,
1133 		plat_data->master[0], micro_tlb0_latency,
1134 		plat_data->master[1], micro_tlb1_latency,
1135 		plat_data->master[2], micro_tlb2_latency,
1136 		plat_data->master[3], micro_tlb3_latency,
1137 		plat_data->master[4], micro_tlb4_latency,
1138 		plat_data->master[5], micro_tlb5_latency,
1139 		plat_data->master[6], micro_tlb6_latency,
1140 		plat_data->master[0], micro_tlb0_max_latency,
1141 		plat_data->master[1], micro_tlb1_max_latency,
1142 		plat_data->master[2], micro_tlb2_max_latency,
1143 		plat_data->master[3], micro_tlb3_max_latency,
1144 		plat_data->master[4], micro_tlb4_max_latency,
1145 		plat_data->master[5], micro_tlb5_max_latency,
1146 		plat_data->master[6], micro_tlb6_max_latency
1147 			);
1148 }
1149 
1150 static struct device_attribute sunxi_iommu_enable_attr =
1151 	__ATTR(enable, 0644, sunxi_iommu_enable_show,
1152 	sunxi_iommu_enable_store);
1153 static struct device_attribute sunxi_iommu_profilling_attr =
1154 	__ATTR(profilling, 0444, sunxi_iommu_profilling_show, NULL);
1155 
sunxi_iommu_sysfs_create(struct platform_device * _pdev)1156 static void sunxi_iommu_sysfs_create(struct platform_device *_pdev)
1157 {
1158 	device_create_file(&_pdev->dev, &sunxi_iommu_enable_attr);
1159 	device_create_file(&_pdev->dev, &sunxi_iommu_profilling_attr);
1160 }
1161 
sunxi_iommu_sysfs_remove(struct platform_device * _pdev)1162 static void sunxi_iommu_sysfs_remove(struct platform_device *_pdev)
1163 {
1164 	device_remove_file(&_pdev->dev, &sunxi_iommu_enable_attr);
1165 	device_remove_file(&_pdev->dev, &sunxi_iommu_profilling_attr);
1166 }
1167 
1168 static const struct iommu_ops sunxi_iommu_ops = {
1169 	.pgsize_bitmap = SZ_4K | SZ_16K | SZ_64K | SZ_256K | SZ_1M | SZ_4M | SZ_16M,
1170 	.map  = sunxi_iommu_map,
1171 	.unmap = sunxi_iommu_unmap,
1172 	.iotlb_sync_map = sunxi_iommu_iotlb_sync_map,
1173 	.iotlb_sync = sunxi_iommu_iotlb_sync,
1174 	.domain_alloc = sunxi_iommu_domain_alloc,
1175 	.domain_free = sunxi_iommu_domain_free,
1176 	.attach_dev = sunxi_iommu_attach_dev,
1177 	.detach_dev = sunxi_iommu_detach_dev,
1178 	.probe_device = sunxi_iommu_probe_device,
1179 	.release_device = sunxi_iommu_release_device,
1180 	.device_group	= sunxi_iommu_device_group,
1181 	.of_xlate = sunxi_iommu_of_xlate,
1182 	.iova_to_phys = sunxi_iommu_iova_to_phys,
1183 	.owner = THIS_MODULE,
1184 };
1185 
sunxi_iommu_probe(struct platform_device * pdev)1186 static int sunxi_iommu_probe(struct platform_device *pdev)
1187 {
1188 	int ret, irq;
1189 	struct device *dev = &pdev->dev;
1190 	struct sunxi_iommu_dev *sunxi_iommu;
1191 	struct resource *res;
1192 
1193 	iopte_cache = kmem_cache_create("sunxi-iopte-cache", PT_SIZE,
1194 				PT_SIZE, SLAB_HWCACHE_ALIGN, NULL);
1195 	if (!iopte_cache) {
1196 		pr_err("%s: Failed to create sunx-iopte-cache.\n", __func__);
1197 		return -ENOMEM;
1198 	}
1199 
1200 	sunxi_iommu = devm_kzalloc(dev, sizeof(*sunxi_iommu), GFP_KERNEL);
1201 	if (!sunxi_iommu)
1202 		return	-ENOMEM;
1203 
1204 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1205 	if (!res) {
1206 		dev_dbg(dev, "Unable to find resource region\n");
1207 		ret = -ENOENT;
1208 		goto err_res;
1209 	}
1210 
1211 	sunxi_iommu->base = devm_ioremap_resource(&pdev->dev, res);
1212 	if (!sunxi_iommu->base) {
1213 		dev_dbg(dev, "Unable to map IOMEM @ PA:%#x\n",
1214 				(unsigned int)res->start);
1215 		ret = -ENOENT;
1216 		goto err_res;
1217 	}
1218 
1219 	sunxi_iommu->bypass = DEFAULT_BYPASS_VALUE;
1220 
1221 	irq = platform_get_irq(pdev, 0);
1222 	if (irq <= 0) {
1223 		dev_dbg(dev, "Unable to find IRQ resource\n");
1224 		ret = -ENOENT;
1225 		goto err_irq;
1226 	}
1227 	pr_info("sunxi iommu: irq = %d\n", irq);
1228 
1229 	ret = devm_request_irq(dev, irq, sunxi_iommu_irq, 0,
1230 			dev_name(dev), (void *)sunxi_iommu);
1231 	if (ret < 0) {
1232 		dev_dbg(dev, "Unabled to register interrupt handler\n");
1233 		goto err_irq;
1234 	}
1235 
1236 	sunxi_iommu->irq = irq;
1237 
1238 	sunxi_iommu->clk = of_clk_get_by_name(dev->of_node, "iommu");
1239 	if (IS_ERR(sunxi_iommu->clk)) {
1240 		sunxi_iommu->clk = NULL;
1241 		dev_dbg(dev, "Unable to find clock\n");
1242 		ret = -ENOENT;
1243 		goto err_clk;
1244 	}
1245 	clk_prepare_enable(sunxi_iommu->clk);
1246 
1247 	platform_set_drvdata(pdev, sunxi_iommu);
1248 	sunxi_iommu->dev = dev;
1249 	spin_lock_init(&sunxi_iommu->iommu_lock);
1250 	global_iommu_dev = sunxi_iommu;
1251 	sunxi_iommu->plat_data = of_device_get_match_data(dev);
1252 
1253 	if (sunxi_iommu->plat_data->version !=
1254 			sunxi_iommu_read(sunxi_iommu, IOMMU_VERSION_REG)) {
1255 		dev_err(dev, "iommu version mismatch, please check and reconfigure\n");
1256 		goto err_clk;
1257 	}
1258 
1259 	if (dev->parent)
1260 		pm_runtime_enable(dev);
1261 
1262 	sunxi_iommu_sysfs_create(pdev);
1263 	ret = iommu_device_sysfs_add(&sunxi_iommu->iommu, dev, NULL,
1264 				     dev_name(dev));
1265 	if (ret) {
1266 		dev_err(dev, "Failed to register iommu in sysfs\n");
1267 		goto err_clk;
1268 	}
1269 
1270 	sunxi_iommu->iommu.ops = &sunxi_iommu_ops;
1271 	iommu_device_set_ops(&sunxi_iommu->iommu, &sunxi_iommu_ops);
1272 	iommu_device_set_fwnode(&sunxi_iommu->iommu, dev->fwnode);
1273 
1274 	ret = iommu_device_register(&sunxi_iommu->iommu);
1275 	if (ret) {
1276 		dev_err(dev, "Failed to register iommu\n");
1277 		goto err_clk;
1278 	}
1279 
1280 	bus_set_iommu(&platform_bus_type, &sunxi_iommu_ops);
1281 
1282 	if (!dma_dev)
1283 		dma_dev = &pdev->dev;
1284 
1285 	return 0;
1286 
1287 err_clk:
1288 	devm_free_irq(dev, irq, sunxi_iommu);
1289 err_irq:
1290 	devm_iounmap(dev, sunxi_iommu->base);
1291 err_res:
1292 	kmem_cache_destroy(iopte_cache);
1293 	dev_err(dev, "Failed to initialize\n");
1294 
1295 	return ret;
1296 }
1297 
sunxi_iommu_remove(struct platform_device * pdev)1298 static int sunxi_iommu_remove(struct platform_device *pdev)
1299 {
1300 	struct sunxi_iommu_dev *sunxi_iommu = platform_get_drvdata(pdev);
1301 
1302 	kmem_cache_destroy(iopte_cache);
1303 	bus_set_iommu(&platform_bus_type, NULL);
1304 	devm_free_irq(sunxi_iommu->dev, sunxi_iommu->irq, sunxi_iommu);
1305 	devm_iounmap(sunxi_iommu->dev, sunxi_iommu->base);
1306 	sunxi_iommu_sysfs_remove(pdev);
1307 	iommu_device_sysfs_remove(&sunxi_iommu->iommu);
1308 	iommu_device_unregister(&sunxi_iommu->iommu);
1309 	global_iommu_dev = NULL;
1310 
1311 	return 0;
1312 }
1313 
sunxi_iommu_suspend(struct device * dev)1314 static int sunxi_iommu_suspend(struct device *dev)
1315 {
1316 	clk_disable_unprepare(global_iommu_dev->clk);
1317 
1318 	return 0;
1319 }
1320 
sunxi_iommu_resume(struct device * dev)1321 static int sunxi_iommu_resume(struct device *dev)
1322 {
1323 	int err;
1324 
1325 	clk_prepare_enable(global_iommu_dev->clk);
1326 
1327 	if (unlikely(!global_sunxi_iommu_domain))
1328 		return 0;
1329 
1330 	err = sunxi_iommu_hw_init(&global_sunxi_iommu_domain->domain);
1331 
1332 	return err;
1333 }
1334 
1335 const struct dev_pm_ops sunxi_iommu_pm_ops = {
1336 	.suspend	= sunxi_iommu_suspend,
1337 	.resume		= sunxi_iommu_resume,
1338 };
1339 
1340 static const struct sunxi_iommu_plat_data iommu_v10_sun50iw6_data = {
1341 	.version = 0x10,
1342 	.tlb_prefetch = 0x7f,
1343 	.tlb_invalid_mode = 0x0,
1344 	.ptw_invalid_mode = 0x0,
1345 	.master = {"DE", "VE_R", "DI", "VE", "CSI", "VP9"},
1346 };
1347 
1348 static const struct sunxi_iommu_plat_data iommu_v11_sun8iw15_data = {
1349 	.version = 0x11,
1350 	.tlb_prefetch = 0x5f,
1351 	.tlb_invalid_mode = 0x0,
1352 	.ptw_invalid_mode = 0x0,
1353 	.master = {"DE", "E_EDMA", "E_FE", "VE", "CSI",
1354 			"G2D", "E_BE", "DEBUG_MODE"},
1355 };
1356 
1357 static const struct sunxi_iommu_plat_data iommu_v12_sun8iw19_data = {
1358 	.version = 0x12,
1359 	.tlb_prefetch = 0x0,
1360 	.tlb_invalid_mode = 0x1,
1361 	.ptw_invalid_mode = 0x0,
1362 	.master = {"DE", "EISE", "AI", "VE", "CSI",
1363 			"ISP", "G2D", "DEBUG_MODE"},
1364 };
1365 
1366 static const struct sunxi_iommu_plat_data iommu_v12_sun50iw9_data = {
1367 	.version = 0x12,
1368 	/* disable preftech for performance issue */
1369 	.tlb_prefetch = 0x0,
1370 	.tlb_invalid_mode = 0x1,
1371 	.ptw_invalid_mode = 0x0,
1372 	.master = {"DE", "DI", "VE_R", "VE", "CSI0",
1373 			"CSI1", "G2D", "DEBUG_MODE"},
1374 };
1375 
1376 static const struct sunxi_iommu_plat_data iommu_v13_sun50iw10_data = {
1377 	.version = 0x13,
1378 	/* disable preftech for performance issue */
1379 	.tlb_prefetch = 0x0,
1380 	.tlb_invalid_mode = 0x1,
1381 	.ptw_invalid_mode = 0x0,
1382 	.master = {"DE0", "DE1", "VE", "CSI", "ISP",
1383 			"G2D", "EINK", "DEBUG_MODE"},
1384 };
1385 
1386 static const struct sunxi_iommu_plat_data iommu_v14_sun50iw12_data = {
1387 	.version = 0x14,
1388 	.tlb_prefetch = 0x3007f,
1389 	.tlb_invalid_mode = 0x1,
1390 	.ptw_invalid_mode = 0x1,
1391 	.master = {"VE", "VE_R", "TVD_MBUS", "TVD_AXI", "TVCAP",
1392 			"AV1", "TVFE", "DEBUG_MODE"},
1393 };
1394 
1395 static const struct sunxi_iommu_plat_data iommu_v14_sun8iw20_data = {
1396 	.version = 0x14,
1397 	.tlb_prefetch = 0x30016, /* disable preftech on G2D/VE for better performance */
1398 	.tlb_invalid_mode = 0x1,
1399 	.ptw_invalid_mode = 0x1,
1400 	.master = {"VE", "CSI", "DE", "G2D", "DI", "DEBUG_MODE"},
1401 };
1402 
1403 static const struct of_device_id sunxi_iommu_dt_ids[] = {
1404 	{ .compatible = "allwinner,iommu-v10-sun50iw6", .data = &iommu_v10_sun50iw6_data},
1405 	{ .compatible = "allwinner,iommu-v11-sun8iw15", .data = &iommu_v11_sun8iw15_data},
1406 	{ .compatible = "allwinner,iommu-v12-sun8iw19", .data = &iommu_v12_sun8iw19_data},
1407 	{ .compatible = "allwinner,iommu-v12-sun50iw9", .data = &iommu_v12_sun50iw9_data},
1408 	{ .compatible = "allwinner,iommu-v13-sun50iw10", .data = &iommu_v13_sun50iw10_data},
1409 	{ .compatible = "allwinner,iommu-v14-sun50iw12", .data = &iommu_v14_sun50iw12_data},
1410 	{ .compatible = "allwinner,iommu-v14-sun8iw20", .data = &iommu_v14_sun8iw20_data},
1411 	{ .compatible = "allwinner,iommu-v14-sun20iw1", .data = &iommu_v14_sun8iw20_data},
1412 	{ /* sentinel */ },
1413 };
1414 
1415 static struct platform_driver sunxi_iommu_driver = {
1416 	.probe		= sunxi_iommu_probe,
1417 	.remove		= sunxi_iommu_remove,
1418 	.driver		= {
1419 		.owner		= THIS_MODULE,
1420 		.name		= "sunxi-iommu",
1421 		.pm 		= &sunxi_iommu_pm_ops,
1422 		.of_match_table = sunxi_iommu_dt_ids,
1423 	}
1424 };
1425 
sunxi_iommu_init(void)1426 static int __init sunxi_iommu_init(void)
1427 {
1428 	return platform_driver_register(&sunxi_iommu_driver);
1429 }
1430 
sunxi_iommu_exit(void)1431 static void __exit sunxi_iommu_exit(void)
1432 {
1433 	return platform_driver_unregister(&sunxi_iommu_driver);
1434 }
1435 
1436 subsys_initcall(sunxi_iommu_init);
1437 module_exit(sunxi_iommu_exit);
1438 
1439 MODULE_LICENSE("GPL v2");
1440 MODULE_VERSION("1.2.0");
1441