1 /*
2 * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14 #include <linux/debugfs.h>
15 #include <linux/device.h>
16 #include <linux/iommu.h>
17 #include <linux/of.h>
18 #include <linux/platform_device.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/dma-mapping.h>
22 #include <asm/cacheflush.h>
23 #include <asm/barrier.h>
24 #include "sunxi-iommu.h"
25 #include <linux/iommu.h>
26 #include <asm/cacheflush.h>
27 #include <linux/module.h>
28 #include <linux/compiler.h>
29
30 #ifdef CONFIG_AW_IOMMU_TESTS
31
32 #ifdef CONFIG_64BIT
33
34 #define kstrtoux kstrtou64
35 #define kstrtox_from_user kstrtoll_from_user
36 #define kstrtosize_t kstrtoul
37
38 #else
39
40 #define kstrtoux kstrtou32
41 #define kstrtox_from_user kstrtoint_from_user
42 #define kstrtosize_t kstrtouint
43
44 #endif
45
46 #define ION_KERNEL_USER_ERR(str) pr_err("%s failed!", #str)
47 struct ion_facade {
48 struct ion_client *client;
49 struct ion_handle *handle;
50 dma_addr_t dma_address;
51 void *virtual_address;
52 size_t address_length;
53 struct sg_table *sg_table;
54 };
55
56 static struct dentry *iommu_debugfs_top;
57
58 static LIST_HEAD(iommu_debug_devices);
59 static struct dentry *debugfs_tests_dir;
60
61 struct iommu_debug_device {
62 struct device *dev;
63 struct iommu_domain *domain;
64 u64 iova;
65 u64 phys;
66 size_t len;
67 struct list_head list;
68 };
69
_size_to_string(unsigned long size)70 static const char * const _size_to_string(unsigned long size)
71 {
72 switch (size) {
73 case SZ_4K:
74 return "4K";
75 case SZ_8K:
76 return "8K";
77 case SZ_16K:
78 return "16K";
79 case SZ_64K:
80 return "64K";
81 case SZ_2M:
82 return "2M";
83 case SZ_1M:
84 return "1M";
85 case SZ_1M * 4:
86 return "4M";
87 case SZ_1M * 8:
88 return "8M";
89 case SZ_1M * 16:
90 return "16M";
91 case SZ_1M * 32:
92 return "32M";
93 }
94 return "unknown size, please add to _size_to_string";
95 }
96
iommu_debug_profiling_fast_dma_api_show(struct seq_file * s,void * ignored)97 static int iommu_debug_profiling_fast_dma_api_show(struct seq_file *s,
98 void *ignored)
99 {
100 int i, experiment;
101 struct iommu_debug_device *ddev = s->private;
102 struct device *dev = ddev->dev;
103 u64 map_elapsed_ns[10], unmap_elapsed_ns[10];
104 dma_addr_t dma_addr;
105 void *virt;
106 const char * const extra_labels[] = {
107 "not coherent",
108 "coherent",
109 };
110 unsigned long extra_attrs[] = {
111 0,
112 DMA_ATTR_SKIP_CPU_SYNC,
113 };
114
115 virt = kmalloc(1518, GFP_KERNEL);
116 if (!virt)
117 goto out;
118
119 for (experiment = 0; experiment < 2; ++experiment) {
120 size_t map_avg = 0, unmap_avg = 0;
121
122 for (i = 0; i < 10; ++i) {
123 struct timespec64 tbefore, tafter, diff;
124 u64 ns;
125
126 ktime_get_ts64(&tbefore);
127 dma_addr = dma_map_single_attrs(
128 dev, virt, SZ_4K, DMA_TO_DEVICE,
129 extra_attrs[experiment]);
130 ktime_get_ts64(&tafter);
131 diff.tv_sec = tafter.tv_sec - tbefore.tv_sec;
132 diff.tv_nsec = tafter.tv_nsec - tbefore.tv_nsec;
133 ns = diff.tv_sec * NSEC_PER_SEC + diff.tv_nsec;
134 if (dma_mapping_error(dev, dma_addr)) {
135 seq_puts(s, "dma_map_single failed\n");
136 goto out_disable_config_clocks;
137 }
138 map_elapsed_ns[i] = ns;
139 ktime_get_ts64(&tbefore);
140 dma_unmap_single_attrs(
141 dev, dma_addr, SZ_4K, DMA_TO_DEVICE,
142 extra_attrs[experiment]);
143 ktime_get_ts64(&tafter);
144 diff.tv_sec = tafter.tv_sec - tbefore.tv_sec;
145 diff.tv_nsec = tafter.tv_nsec - tbefore.tv_nsec;
146 ns = diff.tv_sec * NSEC_PER_SEC + diff.tv_nsec;
147 unmap_elapsed_ns[i] = ns;
148 }
149 seq_printf(s, "%13s %24s (ns): [", extra_labels[experiment],
150 "dma_map_single_attrs");
151 for (i = 0; i < 10; ++i) {
152 map_avg += map_elapsed_ns[i];
153 seq_printf(s, "%5llu%s", map_elapsed_ns[i],
154 i < 9 ? ", " : "");
155 }
156 map_avg /= 10;
157 seq_printf(s, "] (avg: %zu)\n", map_avg);
158
159 seq_printf(s, "%13s %24s (ns): [", extra_labels[experiment],
160 "dma_unmap_single_attrs");
161 for (i = 0; i < 10; ++i) {
162 unmap_avg += unmap_elapsed_ns[i];
163 seq_printf(s, "%5llu%s", unmap_elapsed_ns[i],
164 i < 9 ? ", " : "");
165 }
166 unmap_avg /= 10;
167 seq_printf(s, "] (avg: %zu)\n", unmap_avg);
168 }
169
170 out_disable_config_clocks:
171 kfree(virt);
172 out:
173 return 0;
174 }
175
iommu_debug_profiling_fast_dma_api_open(struct inode * inode,struct file * file)176 static int iommu_debug_profiling_fast_dma_api_open(struct inode *inode,
177 struct file *file)
178 {
179 return single_open(file, iommu_debug_profiling_fast_dma_api_show,
180 inode->i_private);
181 }
182
183 static const struct file_operations iommu_debug_profiling_fast_dma_api_fops = {
184 .open = iommu_debug_profiling_fast_dma_api_open,
185 .read = seq_read,
186 .llseek = seq_lseek,
187 .release = single_release,
188 };
189
190
191 /* Creates a fresh fast mapping and applies @fn to it */
__apply_to_new_mapping(struct seq_file * s,int (* fn)(struct device * dev,struct seq_file * s,struct iommu_domain * domain,void * priv),void * priv)192 static int __apply_to_new_mapping(struct seq_file *s,
193 int (*fn)(struct device *dev,
194 struct seq_file *s,
195 struct iommu_domain *domain,
196 void *priv),
197 void *priv)
198 {
199 struct iommu_debug_device *ddev = s->private;
200 struct device *dev = ddev->dev;
201 int ret = -EINVAL;
202
203 ret = fn(dev, s, global_iommu_domain, priv);
204 return ret;
205 }
206
207 #if 0
208 static int __tlb_stress_sweep(struct device *dev, struct seq_file *s)
209 {
210 int i, ret = 0;
211 unsigned long iova;
212 const unsigned long max = SZ_1G * 4UL;
213 void *virt;
214 phys_addr_t phys;
215 dma_addr_t dma_addr;
216
217 /*
218 * we'll be doing 4K and 8K mappings. Need to own an entire 8K
219 * chunk that we can work with.
220 */
221 virt = (void *)__get_free_pages(GFP_KERNEL, get_order(SZ_8K));
222 phys = virt_to_phys(virt);
223
224 /* fill the whole 4GB space */
225 for (iova = 0, i = 0; iova < max; iova += SZ_8K, ++i) {
226 dma_addr = dma_map_single(dev, virt, SZ_8K, DMA_TO_DEVICE);
227 if (dma_addr == 0) {
228 dev_err(dev, "Failed map on iter %d\n", i);
229 ret = -EINVAL;
230 goto out;
231 }
232 }
233
234 /*
235 * free up 4K at the very beginning, then leave one 4K mapping,
236 * then free up 8K. This will result in the next 8K map to skip
237 * over the 4K hole and take the 8K one.
238 */
239 dma_unmap_single(dev, 0, SZ_4K, DMA_TO_DEVICE);
240 dma_unmap_single(dev, SZ_8K, SZ_4K, DMA_TO_DEVICE);
241 dma_unmap_single(dev, SZ_8K + SZ_4K, SZ_4K, DMA_TO_DEVICE);
242
243 /* remap 8K */
244 dma_addr = dma_map_single(dev, virt, SZ_8K, DMA_TO_DEVICE);
245
246 /*
247 * now remap 4K. We should get the first 4K chunk that was skipped
248 * over during the previous 8K map. If we missed a TLB invalidate
249 * at that point this should explode.
250 */
251 dma_addr = dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE);
252
253
254 /* we're all full again. unmap everything. */
255 for (dma_addr = 0; dma_addr < max; dma_addr += SZ_8K)
256 dma_unmap_single(dev, dma_addr, SZ_8K, DMA_TO_DEVICE);
257
258 out:
259 free_pages((unsigned long)virt, get_order(SZ_8K));
260 return ret;
261 }
262 #else
__tlb_stress_sweep(struct device * dev,struct seq_file * s)263 static int __tlb_stress_sweep(struct device *dev, struct seq_file *s)
264 {
265 return 0;
266 }
267 #endif
268
269 struct fib_state {
270 unsigned long cur;
271 unsigned long prev;
272 };
273
fib_init(struct fib_state * f)274 static __maybe_unused void fib_init(struct fib_state *f)
275 {
276 f->cur = f->prev = 1;
277 }
278
get_next_fib(struct fib_state * f)279 static __maybe_unused unsigned long get_next_fib(struct fib_state *f)
280 {
281 int next = f->cur + f->prev;
282
283 f->prev = f->cur;
284 f->cur = next;
285 return next;
286 }
287
288 /*
289 * Not actually random. Just testing the fibs (and max - the fibs).
290 */
291 #if 0
292 static int __rand_va_sweep(struct device *dev, struct seq_file *s,
293 const size_t size)
294 {
295 u64 iova;
296 const unsigned long max = SZ_1G * 4UL;
297 int i, remapped, unmapped, ret = 0;
298 void *virt;
299 dma_addr_t dma_addr, dma_addr2;
300 struct fib_state fib;
301
302 virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
303 if (!virt) {
304 if (size > SZ_8K) {
305 dev_err(dev,
306 "Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
307 _size_to_string(size));
308 return 0;
309 }
310 return -ENOMEM;
311 }
312
313 /* fill the whole 4GB space */
314 for (iova = 0, i = 0; iova < max; iova += size, ++i) {
315 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
316 if (dma_addr == 0) {
317 dev_err(dev, "Failed map on iter %d\n", i);
318 ret = -EINVAL;
319 goto out;
320 }
321 }
322
323 /* now unmap "random" iovas */
324 unmapped = 0;
325 fib_init(&fib);
326 for (iova = get_next_fib(&fib) * size;
327 iova < max - size;
328 iova = get_next_fib(&fib) * size) {
329 dma_addr = iova;
330 dma_addr2 = max - size - iova;
331 if (dma_addr == dma_addr2) {
332 WARN(1,
333 "%s test needs update! The random number sequence is folding in on itself and should be changed.\n",
334 __func__);
335 return -EINVAL;
336 }
337 dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);
338 dma_unmap_single(dev, dma_addr2, size, DMA_TO_DEVICE);
339 unmapped += 2;
340 }
341
342 /* and map until everything fills back up */
343 for (remapped = 0;; ++remapped) {
344 dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
345 if (dma_addr == 0)
346 break;
347 }
348
349 if (unmapped != remapped) {
350 dev_err(dev,
351 "Unexpected random remap count! Unmapped %d but remapped %d\n",
352 unmapped, remapped);
353 ret = -EINVAL;
354 }
355
356 for (dma_addr = 0; dma_addr < max; dma_addr += size)
357 dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);
358
359 out:
360 free_pages((unsigned long)virt, get_order(size));
361 return ret;
362 }
363 #else
__rand_va_sweep(struct device * dev,struct seq_file * s,const size_t size)364 static int __rand_va_sweep(struct device *dev, struct seq_file *s,
365 const size_t size)
366 {
367 return 0;
368 }
369 #endif
370
371 struct dma_addr_list {
372 dma_addr_t addr;
373 void *next;
374 };
375
__full_va_sweep(struct device * dev,struct seq_file * s,const size_t size,struct iommu_domain * domain)376 static int __full_va_sweep(struct device *dev, struct seq_file *s,
377 const size_t size, struct iommu_domain *domain)
378 {
379 unsigned long iova;
380 void *virt;
381 phys_addr_t phys;
382 const unsigned long max = SZ_128M;
383 struct dma_addr_list *phead, *p, *ptmp;
384
385 phead = kmalloc(sizeof(struct dma_addr_list), GFP_KERNEL);
386 memset(phead, 0, sizeof(struct dma_addr_list));
387 virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
388 if (!virt) {
389 if (size > SZ_8K) {
390 dev_err(dev,
391 "Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
392 _size_to_string(size));
393 return 0;
394 }
395 return -ENOMEM;
396 }
397 phys = virt_to_phys(virt);
398
399 for (p = phead, iova = 0; iova < max; iova += size, p = p->next) {
400 p->addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
401 p->next = kmalloc(sizeof(struct dma_addr_list), GFP_KERNEL);
402 memset(p->next, 0, sizeof(struct dma_addr_list));
403 if (p->addr == 0 || p->addr == DMA_MAPPING_ERROR) {
404 dev_err(dev, "Failed to map dma, out of iova space\n");
405 return -ENOMEM;
406 }
407 }
408
409 for (p = phead; p->addr != 0;) {
410 dma_unmap_single(dev, p->addr, size, DMA_TO_DEVICE);
411 ptmp = p;
412 p = p->next;
413 kfree(ptmp);
414 }
415 kfree(p);
416 free_pages((unsigned long)virt, get_order(size));
417 return 0;
418 }
419
420 #define ds_printf(d, s, fmt, ...) ({ \
421 dev_err(d, fmt, ##__VA_ARGS__); \
422 seq_printf(s, fmt, ##__VA_ARGS__); \
423 })
424
__functional_dma_api_va_test(struct device * dev,struct seq_file * s,struct iommu_domain * domain,void * priv)425 static int __functional_dma_api_va_test(struct device *dev, struct seq_file *s,
426 struct iommu_domain *domain, void *priv)
427 {
428 int i, j;
429 int ret = 0;
430 size_t *sz, *sizes = priv;
431
432 for (j = 0; j < 1; ++j) {
433 for (sz = sizes; *sz; ++sz) {
434 for (i = 0; i < 2; ++i) {
435 ds_printf(dev, s, "Full VA sweep @%s %d",
436 _size_to_string(*sz), i);
437 if (__full_va_sweep(dev, s, *sz, domain)) {
438 ds_printf(dev, s, " -> FAILED\n");
439 ret = -EINVAL;
440 goto out;
441 } else
442 ds_printf(dev, s, " -> SUCCEEDED\n");
443 }
444 }
445 }
446
447 ds_printf(dev, s, "bonus map:");
448 if (__full_va_sweep(dev, s, SZ_4K, domain)) {
449 ds_printf(dev, s, " -> FAILED\n");
450 ret = -EINVAL;
451 goto out;
452 } else
453 ds_printf(dev, s, " -> SUCCEEDED\n");
454
455 for (sz = sizes; *sz; ++sz) {
456 for (i = 0; i < 2; ++i) {
457 ds_printf(dev, s, "Rand VA sweep @%s %d",
458 _size_to_string(*sz), i);
459 if (__rand_va_sweep(dev, s, *sz)) {
460 ds_printf(dev, s, " -> FAILED\n");
461 ret = -EINVAL;
462 goto out;
463 } else
464 ds_printf(dev, s, " -> SUCCEEDED\n");
465 }
466 }
467
468 ds_printf(dev, s, "TLB stress sweep");
469 if (__tlb_stress_sweep(dev, s)) {
470 ds_printf(dev, s, " -> FAILED\n");
471 ret = -EINVAL;
472 goto out;
473 } else
474 ds_printf(dev, s, " -> SUCCEEDED\n");
475
476 ds_printf(dev, s, "second bonus map:");
477 if (__full_va_sweep(dev, s, SZ_4K, domain)) {
478 ds_printf(dev, s, " -> FAILED\n");
479 ret = -EINVAL;
480 goto out;
481 } else
482 ds_printf(dev, s, " -> SUCCEEDED\n");
483 out:
484 return ret;
485 }
486
487 /*iova alloc strategy stress test*/
iommu_iova_alloc_strategy_stress_show(struct seq_file * s,void * ignored)488 static int iommu_iova_alloc_strategy_stress_show(struct seq_file *s,
489 void *ignored)
490 {
491 size_t sizes[] = {SZ_4K, SZ_8K, SZ_16K, SZ_64K, 0};
492 int ret = 0;
493
494 ret = __apply_to_new_mapping(s, __functional_dma_api_va_test, sizes);
495 return ret;
496 }
497
iommu_iova_alloc_strategy_stress_open(struct inode * inode,struct file * file)498 static int iommu_iova_alloc_strategy_stress_open(struct inode *inode,
499 struct file *file)
500 {
501 return single_open(file, iommu_iova_alloc_strategy_stress_show,
502 inode->i_private);
503 }
504
505 static const struct file_operations iommu_iova_alloc_strategy_stress_fops = {
506 .open = iommu_iova_alloc_strategy_stress_open,
507 .read = seq_read,
508 .llseek = seq_lseek,
509 .release = single_release,
510 };
511
512
__functional_dma_api_alloc_test(struct device * dev,struct seq_file * s,struct iommu_domain * domain,void * ignored)513 static int __functional_dma_api_alloc_test(struct device *dev,
514 struct seq_file *s,
515 struct iommu_domain *domain,
516 void *ignored)
517 {
518 size_t size = SZ_1K * 742;
519 int ret = 0;
520 u8 *data;
521 dma_addr_t iova;
522
523 /* Make sure we can allocate and use a buffer */
524 ds_printf(dev, s, "Allocating coherent buffer");
525 data = dma_alloc_coherent(dev, size, &iova, GFP_KERNEL);
526 if (!data) {
527 ds_printf(dev, s, " -> FAILED\n");
528 ret = -EINVAL;
529 } else {
530 int i;
531
532 ds_printf(dev, s, " -> SUCCEEDED\n");
533 ds_printf(dev, s, "Using coherent buffer");
534 for (i = 0; i < 742; ++i) {
535 int ind = SZ_1K * i;
536 u8 *p = data + ind;
537 u8 val = i % 255;
538
539 memset(data, 0xa5, size);
540 *p = val;
541 (*p)++;
542 if ((*p) != val + 1) {
543 ds_printf(dev, s,
544 " -> FAILED on iter %d since %d != %d\n",
545 i, *p, val + 1);
546 ret = -EINVAL;
547 break;
548 }
549 }
550 if (!ret)
551 ds_printf(dev, s, " -> SUCCEEDED\n");
552 dma_free_coherent(dev, size, data, iova);
553 }
554
555 return ret;
556 }
557
558 /*iommu kernel virtual addr read/write*/
iommu_kvirtual_addr_rdwr_show(struct seq_file * s,void * ignored)559 static int iommu_kvirtual_addr_rdwr_show(struct seq_file *s,
560 void *ignored)
561 {
562 struct iommu_debug_device *ddev = s->private;
563 struct device *dev = ddev->dev;
564 int ret = -EINVAL;
565
566 ret = __functional_dma_api_alloc_test(dev, s,
567 global_iommu_domain, NULL);
568 return ret;
569 }
570
iommu_kvirtual_addr_rdwr_open(struct inode * inode,struct file * file)571 static int iommu_kvirtual_addr_rdwr_open(struct inode *inode,
572 struct file *file)
573 {
574 return single_open(file, iommu_kvirtual_addr_rdwr_show,
575 inode->i_private);
576 }
577
578 static const struct file_operations iommu_kvirtul_addr_rdwr_fops = {
579 .open = iommu_kvirtual_addr_rdwr_open,
580 .read = seq_read,
581 .llseek = seq_lseek,
582 .release = single_release,
583 };
584
__functional_dma_api_ion_test(struct device * dev,struct seq_file * s,struct iommu_domain * domain,void * ignored)585 static int __functional_dma_api_ion_test(struct device *dev,
586 struct seq_file *s,
587 struct iommu_domain *domain,
588 void *ignored)
589 {
590 return 0;
591 }
592
593 /*iommu ion interface test*/
iommu_ion_interface_test_show(struct seq_file * s,void * ignored)594 static int iommu_ion_interface_test_show(struct seq_file *s,
595 void *ignored)
596 {
597 struct iommu_debug_device *ddev = s->private;
598 struct device *dev = ddev->dev;
599 int ret = -EINVAL;
600
601 ret = __functional_dma_api_ion_test(dev, s,
602 global_iommu_domain, NULL);
603 return ret;
604 }
605
iommu_ion_interface_test_open(struct inode * inode,struct file * file)606 static int iommu_ion_interface_test_open(struct inode *inode,
607 struct file *file)
608 {
609 return single_open(file, iommu_ion_interface_test_show,
610 inode->i_private);
611 }
612
613 static const struct file_operations iommu_ion_interface_test_fops = {
614 .open = iommu_ion_interface_test_open,
615 .read = seq_read,
616 .llseek = seq_lseek,
617 .release = single_release,
618 };
619
__functional_dma_api_iova_test(struct device * dev,struct seq_file * s,struct iommu_domain * domain,void * ignored)620 static int __functional_dma_api_iova_test(struct device *dev,
621 struct seq_file *s,
622 struct iommu_domain *domain,
623 void *ignored)
624 {
625 size_t size = SZ_4K * 1024;
626 int ret = 0;
627 u32 *data;
628 dma_addr_t iova;
629
630 sunxi_set_debug_mode();
631
632 /* Make sure we can allocate and use a buffer */
633 ds_printf(dev, s, "Allocating coherent iova buffer");
634 data = dma_alloc_coherent(dev, size, &iova, GFP_KERNEL);
635 if (!data) {
636 ds_printf(dev, s, " -> FAILED\n");
637 ret = -EINVAL;
638 } else {
639 int i;
640
641 ds_printf(dev, s, " -> SUCCEEDED\n");
642 ds_printf(dev, s, "Using coherent buffer");
643 for (i = 0; i < 1024; ++i) {
644 int ind = (SZ_4K * i) / sizeof(u32);
645 u32 *p = data + ind;
646 u32 *p1 = (u32 *)iova + ind;
647 u32 read_data;
648
649 memset(data, 0xa5, size);
650 *p = 0x5a5a5a5a;
651 /**
652 * make sure that *p is written before
653 * the write operation of the debug mode of iommu
654 */
655 wmb();
656 sunxi_iova_test_write((dma_addr_t)p1, 0xdead);
657 /**
658 * do the write operation of debug mode of iommu
659 * in order
660 */
661 rmb();
662 if ((*p) != 0xdead) {
663 ds_printf(dev, s,
664 "-> FAILED on iova0 iter %x %x\n", i, *p);
665 ret = -EINVAL;
666 goto out;
667 }
668
669 *p = 0xffffaaaa;
670 /**
671 * make sure that *p is written before
672 * the read operation of the debug mode of iommu
673 */
674 wmb();
675 read_data = sunxi_iova_test_read((dma_addr_t)p1);
676 if (read_data != 0xffffaaaa) {
677 ds_printf(dev, s,
678 "-> FAILED on iova1 iter %x %x\n",
679 i, read_data);
680 ret = -EINVAL;
681 goto out;
682 }
683
684 }
685 if (!ret)
686 ds_printf(dev, s, " -> SUCCEEDED\n");
687 }
688 out:
689 dma_free_coherent(dev, size, data, iova);
690 sunxi_set_prefetch_mode();
691 return ret;
692 }
693
694 /*iommu test use debug interface*/
iommu_vir_devio_addr_rdwr_show(struct seq_file * s,void * ignored)695 static int iommu_vir_devio_addr_rdwr_show(struct seq_file *s,
696 void *ignored)
697 {
698 int ret = 0;
699
700 ret = __apply_to_new_mapping(s, __functional_dma_api_iova_test, NULL);
701 if (ret) {
702 pr_err("the first iova test failed\n");
703 return ret;
704 }
705 ret = 0;
706 ret = __apply_to_new_mapping(s, __functional_dma_api_iova_test, NULL);
707 if (ret) {
708 pr_err("the second iova test failed\n");
709 return ret;
710 }
711 ret = 0;
712 ret = __apply_to_new_mapping(s, __functional_dma_api_iova_test, NULL);
713 if (ret) {
714 pr_err("the third iova test failed\n");
715 return ret;
716 }
717 return 0;
718 }
719
iommu_vir_devio_addr_rdwr_open(struct inode * inode,struct file * file)720 static int iommu_vir_devio_addr_rdwr_open(struct inode *inode,
721 struct file *file)
722 {
723 return single_open(file, iommu_vir_devio_addr_rdwr_show,
724 inode->i_private);
725 }
726
727 static const struct file_operations iommu_vir_devio_addr_rdwr_fops = {
728 .open = iommu_vir_devio_addr_rdwr_open,
729 .read = seq_read,
730 .llseek = seq_lseek,
731 .release = single_release,
732 };
733
734
__functional_dma_api_basic_test(struct device * dev,struct seq_file * s,struct iommu_domain * domain,void * ignored)735 static int __functional_dma_api_basic_test(struct device *dev,
736 struct seq_file *s,
737 struct iommu_domain *domain,
738 void *ignored)
739 {
740 size_t size = 1518;
741 int i, j, ret = 0;
742 u8 *data;
743 dma_addr_t iova;
744 phys_addr_t pa, pa2;
745
746 ds_printf(dev, s, "Basic DMA API test");
747 /* Make sure we can allocate and use a buffer */
748 for (i = 0; i < 1000; ++i) {
749 data = kmalloc(size, GFP_KERNEL);
750 if (!data) {
751 ds_printf(dev, s, " -> FAILED\n");
752 ret = -EINVAL;
753 goto out;
754 }
755 memset(data, 0xa5, size);
756 iova = dma_map_single(dev, data, size, DMA_TO_DEVICE);
757 pa = iommu_iova_to_phys(domain, iova);
758 pa2 = virt_to_phys(data);
759 if (pa != pa2) {
760 dev_err(dev,
761 "iova_to_phys doesn't match virt_to_phys: %pa != %pa\n",
762 &pa, &pa2);
763 ret = -EINVAL;
764 kfree(data);
765 goto out;
766 }
767 dma_unmap_single(dev, iova, size, DMA_TO_DEVICE);
768 for (j = 0; j < size; ++j) {
769 if (data[j] != 0xa5) {
770 dev_err(dev, "data[%d] != 0xa5\n", data[j]);
771 ret = -EINVAL;
772 kfree(data);
773 goto out;
774 }
775 }
776 kfree(data);
777 }
778
779 out:
780 if (ret)
781 ds_printf(dev, s, " -> FAILED\n");
782 else
783 ds_printf(dev, s, " -> SUCCEEDED\n");
784
785 return ret;
786 }
787
788 /*iommu basic test*/
iommu_debug_basic_test_show(struct seq_file * s,void * ignored)789 static int iommu_debug_basic_test_show(struct seq_file *s,
790 void *ignored)
791 {
792 struct iommu_debug_device *ddev = s->private;
793 struct device *dev = ddev->dev;
794 int ret = -EINVAL;
795
796 ret = __functional_dma_api_basic_test(dev, s,
797 global_iommu_domain, NULL);
798 return ret;
799 }
800
iommu_debug_basic_test_open(struct inode * inode,struct file * file)801 static int iommu_debug_basic_test_open(struct inode *inode,
802 struct file *file)
803 {
804 return single_open(file, iommu_debug_basic_test_show,
805 inode->i_private);
806 }
807
808 static const struct file_operations iommu_debug_basic_test_fops = {
809 .open = iommu_debug_basic_test_open,
810 .read = seq_read,
811 .llseek = seq_lseek,
812 .release = single_release,
813 };
814
815
816 /*
817 * The following will only work for drivers that implement the generic
818 * device tree bindings described in
819 * Documentation/devicetree/bindings/iommu/iommu.txt
820 */
snarf_iommu_devices(struct device * dev,const char * name)821 static int snarf_iommu_devices(struct device *dev, const char *name)
822 {
823 struct iommu_debug_device *ddev;
824 struct dentry *dir;
825
826 if (IS_ERR_OR_NULL(dev))
827 return -EINVAL;
828
829 ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
830 if (!ddev)
831 return -ENODEV;
832 ddev->dev = dev;
833 ddev->domain = global_iommu_domain;
834 dir = debugfs_create_dir(name, debugfs_tests_dir);
835 if (!dir) {
836 pr_err("Couldn't create iommu/devices/%s debugfs dir\n",
837 name);
838 goto err;
839 }
840
841 if (!debugfs_create_file("profiling_fast_dma_api", 0400, dir, ddev,
842 &iommu_debug_profiling_fast_dma_api_fops)) {
843 pr_err("Couldn't create iommu/devices/%s/profiling_fast_dma_api debugfs file\n",
844 name);
845 goto err_rmdir;
846 }
847
848 if (!debugfs_create_file("iommu_basic_test", 0400, dir, ddev,
849 &iommu_debug_basic_test_fops)) {
850 pr_err("Couldn't create iommu/devices/%s/iommu_basic_test debugfs file\n",
851 name);
852 goto err_rmdir;
853 }
854
855 if (!debugfs_create_file("ion_interface_test", 0400, dir, ddev,
856 &iommu_ion_interface_test_fops)) {
857 pr_err("Couldn't create iommu/devices/%s/ion_interface_test debugfs file\n",
858 name);
859 goto err_rmdir;
860 }
861
862 if (!debugfs_create_file("iova_alloc_strategy_stress_test",
863 0200, dir, ddev,
864 &iommu_iova_alloc_strategy_stress_fops)) {
865 pr_err("Couldn't create iommu/devices/%s/iova_alloc_strategy_stress_test debugfs file\n",
866 name);
867 goto err_rmdir;
868 }
869
870 if (!debugfs_create_file("kvirtual_addr_rdwr_test", 0200, dir, ddev,
871 &iommu_kvirtul_addr_rdwr_fops)) {
872 pr_err("Couldn't create iommu/devices/%s/kvirtual_addr_rdwr_test debugfs file\n",
873 name);
874 goto err_rmdir;
875 }
876
877 if (!debugfs_create_file("vir_devio_addr_rdwr_test", 0200, dir, ddev,
878 &iommu_vir_devio_addr_rdwr_fops)) {
879 pr_err("Couldn't create iommu/devices/%s/vir_devio_addr_rdwr_test debugfs file\n",
880 name);
881 goto err_rmdir;
882 }
883
884 list_add(&ddev->list, &iommu_debug_devices);
885 return 0;
886
887 err_rmdir:
888 debugfs_remove_recursive(dir);
889 err:
890 kfree(ddev);
891 return 0;
892 }
893
pass_iommu_devices(struct device * dev,void * ignored)894 static int pass_iommu_devices(struct device *dev, void *ignored)
895 {
896 if (!of_find_property(dev->of_node, "iommus", NULL))
897 return 0;
898
899 return snarf_iommu_devices(dev, dev_name(dev));
900 }
901
iommu_debug_populate_devices(void)902 static int iommu_debug_populate_devices(void)
903 {
904 return bus_for_each_dev(&platform_bus_type, NULL, NULL,
905 pass_iommu_devices);
906 }
907
iommu_debug_init_tests(void)908 static int iommu_debug_init_tests(void)
909 {
910 iommu_debugfs_top = debugfs_create_dir("iommu", NULL);
911 if (!iommu_debugfs_top) {
912 pr_err("Couldn't create iommu debugfs directory\n");
913 return -ENODEV;
914 }
915 debugfs_tests_dir = debugfs_create_dir("tests",
916 iommu_debugfs_top);
917 if (!debugfs_tests_dir) {
918 pr_err("Couldn't create iommu/tests debugfs directory\n");
919 return -ENODEV;
920 }
921
922 return iommu_debug_populate_devices();
923 }
924
iommu_debug_destroy_tests(void)925 static void iommu_debug_destroy_tests(void)
926 {
927 debugfs_remove_recursive(debugfs_tests_dir);
928 }
929 #else
iommu_debug_init_tests(void)930 static inline int iommu_debug_init_tests(void) { return 0; }
iommu_debug_destroy_tests(void)931 static inline void iommu_debug_destroy_tests(void) { }
932 #endif
933
iommu_debug_init(void)934 static int __init iommu_debug_init(void)
935 {
936 if (iommu_debug_init_tests())
937 return -ENODEV;
938
939 return 0;
940 }
941
iommu_debug_exit(void)942 static void __exit iommu_debug_exit(void)
943 {
944 iommu_debug_destroy_tests();
945 }
946
947 module_init(iommu_debug_init);
948 module_exit(iommu_debug_exit);
949
950 MODULE_LICENSE("GPL v2");
951