1 // SPDX-License-Identifier: GPL-2.0-only
2 /**
3 * Host side test driver to test endpoint functionality
4 *
5 * Copyright (C) 2017 Texas Instruments
6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
7 */
8
9 #include <linux/crc32.h>
10 #include <linux/delay.h>
11 #include <linux/fs.h>
12 #include <linux/io.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/miscdevice.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/random.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/pci.h>
22 #include <linux/pci_ids.h>
23
24 #include <linux/pci_regs.h>
25
26 #include <uapi/linux/pcitest.h>
27
28 #define DRV_MODULE_NAME "pci-endpoint-test"
29
30 #define IRQ_TYPE_UNDEFINED -1
31 #define IRQ_TYPE_LEGACY 0
32 #define IRQ_TYPE_MSI 1
33 #define IRQ_TYPE_MSIX 2
34
35 #define PCI_ENDPOINT_TEST_MAGIC 0x0
36
37 #define PCI_ENDPOINT_TEST_COMMAND 0x4
38 #define COMMAND_RAISE_LEGACY_IRQ BIT(0)
39 #define COMMAND_RAISE_MSI_IRQ BIT(1)
40 #define COMMAND_RAISE_MSIX_IRQ BIT(2)
41 #define COMMAND_READ BIT(3)
42 #define COMMAND_WRITE BIT(4)
43 #define COMMAND_COPY BIT(5)
44
45 #define PCI_ENDPOINT_TEST_STATUS 0x8
46 #define STATUS_READ_SUCCESS BIT(0)
47 #define STATUS_READ_FAIL BIT(1)
48 #define STATUS_WRITE_SUCCESS BIT(2)
49 #define STATUS_WRITE_FAIL BIT(3)
50 #define STATUS_COPY_SUCCESS BIT(4)
51 #define STATUS_COPY_FAIL BIT(5)
52 #define STATUS_IRQ_RAISED BIT(6)
53 #define STATUS_SRC_ADDR_INVALID BIT(7)
54 #define STATUS_DST_ADDR_INVALID BIT(8)
55
56 #define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c
57 #define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10
58
59 #define PCI_ENDPOINT_TEST_LOWER_DST_ADDR 0x14
60 #define PCI_ENDPOINT_TEST_UPPER_DST_ADDR 0x18
61
62 #define PCI_ENDPOINT_TEST_SIZE 0x1c
63 #define PCI_ENDPOINT_TEST_CHECKSUM 0x20
64
65 #define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24
66 #define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
67
68 #define PCI_ENDPOINT_TEST_FLAGS 0x2c
69 #define FLAG_USE_DMA BIT(0)
70
71 #define PCI_DEVICE_ID_TI_J721E 0xb00d
72 #define PCI_DEVICE_ID_TI_AM654 0xb00c
73 #define PCI_DEVICE_ID_LS1088A 0x80c0
74
75 #define is_am654_pci_dev(pdev) \
76 ((pdev)->device == PCI_DEVICE_ID_TI_AM654)
77
78 #define PCI_DEVICE_ID_RENESAS_R8A774A1 0x0028
79 #define PCI_DEVICE_ID_RENESAS_R8A774B1 0x002b
80 #define PCI_DEVICE_ID_RENESAS_R8A774C0 0x002d
81 #define PCI_DEVICE_ID_RENESAS_R8A774E1 0x0025
82
83 static DEFINE_IDA(pci_endpoint_test_ida);
84
85 #define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
86 miscdev)
87
88 static bool no_msi;
89 module_param(no_msi, bool, 0444);
90 MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
91
92 static int irq_type = IRQ_TYPE_MSI;
93 module_param(irq_type, int, 0444);
94 MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
95
96 enum pci_barno {
97 BAR_0,
98 BAR_1,
99 BAR_2,
100 BAR_3,
101 BAR_4,
102 BAR_5,
103 };
104
105 struct pci_endpoint_test {
106 struct pci_dev *pdev;
107 void __iomem *base;
108 void __iomem *bar[PCI_STD_NUM_BARS];
109 struct completion irq_raised;
110 int last_irq;
111 int num_irqs;
112 int irq_type;
113 /* mutex to protect the ioctls */
114 struct mutex mutex;
115 struct miscdevice miscdev;
116 enum pci_barno test_reg_bar;
117 size_t alignment;
118 const char *name;
119 };
120
121 struct pci_endpoint_test_data {
122 enum pci_barno test_reg_bar;
123 size_t alignment;
124 int irq_type;
125 };
126
pci_endpoint_test_readl(struct pci_endpoint_test * test,u32 offset)127 static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
128 u32 offset)
129 {
130 return readl(test->base + offset);
131 }
132
pci_endpoint_test_writel(struct pci_endpoint_test * test,u32 offset,u32 value)133 static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
134 u32 offset, u32 value)
135 {
136 writel(value, test->base + offset);
137 }
138
pci_endpoint_test_bar_readl(struct pci_endpoint_test * test,int bar,int offset)139 static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test,
140 int bar, int offset)
141 {
142 return readl(test->bar[bar] + offset);
143 }
144
pci_endpoint_test_bar_writel(struct pci_endpoint_test * test,int bar,u32 offset,u32 value)145 static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test,
146 int bar, u32 offset, u32 value)
147 {
148 writel(value, test->bar[bar] + offset);
149 }
150
pci_endpoint_test_irqhandler(int irq,void * dev_id)151 static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
152 {
153 struct pci_endpoint_test *test = dev_id;
154 u32 reg;
155
156 reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
157 if (reg & STATUS_IRQ_RAISED) {
158 test->last_irq = irq;
159 complete(&test->irq_raised);
160 reg &= ~STATUS_IRQ_RAISED;
161 }
162 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS,
163 reg);
164
165 return IRQ_HANDLED;
166 }
167
pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test * test)168 static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
169 {
170 struct pci_dev *pdev = test->pdev;
171
172 pci_free_irq_vectors(pdev);
173 test->irq_type = IRQ_TYPE_UNDEFINED;
174 }
175
pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test * test,int type)176 static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
177 int type)
178 {
179 int irq = -1;
180 struct pci_dev *pdev = test->pdev;
181 struct device *dev = &pdev->dev;
182 bool res = true;
183
184 switch (type) {
185 case IRQ_TYPE_LEGACY:
186 irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
187 if (irq < 0)
188 dev_err(dev, "Failed to get Legacy interrupt\n");
189 break;
190 case IRQ_TYPE_MSI:
191 irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
192 if (irq < 0)
193 dev_err(dev, "Failed to get MSI interrupts\n");
194 break;
195 case IRQ_TYPE_MSIX:
196 irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
197 if (irq < 0)
198 dev_err(dev, "Failed to get MSI-X interrupts\n");
199 break;
200 default:
201 dev_err(dev, "Invalid IRQ type selected\n");
202 }
203
204 if (irq < 0) {
205 irq = 0;
206 res = false;
207 }
208
209 test->irq_type = type;
210 test->num_irqs = irq;
211
212 return res;
213 }
214
pci_endpoint_test_release_irq(struct pci_endpoint_test * test)215 static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
216 {
217 int i;
218 struct pci_dev *pdev = test->pdev;
219 struct device *dev = &pdev->dev;
220
221 for (i = 0; i < test->num_irqs; i++)
222 devm_free_irq(dev, pci_irq_vector(pdev, i), test);
223
224 test->num_irqs = 0;
225 }
226
pci_endpoint_test_request_irq(struct pci_endpoint_test * test)227 static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
228 {
229 int i;
230 int err;
231 struct pci_dev *pdev = test->pdev;
232 struct device *dev = &pdev->dev;
233
234 for (i = 0; i < test->num_irqs; i++) {
235 err = devm_request_irq(dev, pci_irq_vector(pdev, i),
236 pci_endpoint_test_irqhandler,
237 IRQF_SHARED, test->name, test);
238 if (err)
239 goto fail;
240 }
241
242 return true;
243
244 fail:
245 switch (irq_type) {
246 case IRQ_TYPE_LEGACY:
247 dev_err(dev, "Failed to request IRQ %d for Legacy\n",
248 pci_irq_vector(pdev, i));
249 break;
250 case IRQ_TYPE_MSI:
251 dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
252 pci_irq_vector(pdev, i),
253 i + 1);
254 break;
255 case IRQ_TYPE_MSIX:
256 dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
257 pci_irq_vector(pdev, i),
258 i + 1);
259 break;
260 }
261
262 return false;
263 }
264
pci_endpoint_test_bar(struct pci_endpoint_test * test,enum pci_barno barno)265 static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
266 enum pci_barno barno)
267 {
268 int j;
269 u32 val;
270 int size;
271 struct pci_dev *pdev = test->pdev;
272
273 if (!test->bar[barno])
274 return false;
275
276 size = pci_resource_len(pdev, barno);
277
278 if (barno == test->test_reg_bar)
279 size = 0x4;
280
281 for (j = 0; j < size; j += 4)
282 pci_endpoint_test_bar_writel(test, barno, j, 0xA0A0A0A0);
283
284 for (j = 0; j < size; j += 4) {
285 val = pci_endpoint_test_bar_readl(test, barno, j);
286 if (val != 0xA0A0A0A0)
287 return false;
288 }
289
290 return true;
291 }
292
pci_endpoint_test_legacy_irq(struct pci_endpoint_test * test)293 static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test)
294 {
295 u32 val;
296
297 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
298 IRQ_TYPE_LEGACY);
299 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
300 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
301 COMMAND_RAISE_LEGACY_IRQ);
302 val = wait_for_completion_timeout(&test->irq_raised,
303 msecs_to_jiffies(1000));
304 if (!val)
305 return false;
306
307 return true;
308 }
309
pci_endpoint_test_msi_irq(struct pci_endpoint_test * test,u16 msi_num,bool msix)310 static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
311 u16 msi_num, bool msix)
312 {
313 u32 val;
314 struct pci_dev *pdev = test->pdev;
315
316 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
317 msix == false ? IRQ_TYPE_MSI :
318 IRQ_TYPE_MSIX);
319 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
320 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
321 msix == false ? COMMAND_RAISE_MSI_IRQ :
322 COMMAND_RAISE_MSIX_IRQ);
323 val = wait_for_completion_timeout(&test->irq_raised,
324 msecs_to_jiffies(1000));
325 if (!val)
326 return false;
327
328 if (pci_irq_vector(pdev, msi_num - 1) == test->last_irq)
329 return true;
330
331 return false;
332 }
333
pci_endpoint_test_validate_xfer_params(struct device * dev,struct pci_endpoint_test_xfer_param * param,size_t alignment)334 static int pci_endpoint_test_validate_xfer_params(struct device *dev,
335 struct pci_endpoint_test_xfer_param *param, size_t alignment)
336 {
337 if (!param->size) {
338 dev_dbg(dev, "Data size is zero\n");
339 return -EINVAL;
340 }
341
342 if (param->size > SIZE_MAX - alignment) {
343 dev_dbg(dev, "Maximum transfer data size exceeded\n");
344 return -EINVAL;
345 }
346
347 return 0;
348 }
349
pci_endpoint_test_copy(struct pci_endpoint_test * test,unsigned long arg)350 static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
351 unsigned long arg)
352 {
353 struct pci_endpoint_test_xfer_param param;
354 bool ret = false;
355 void *src_addr;
356 void *dst_addr;
357 u32 flags = 0;
358 bool use_dma;
359 size_t size;
360 dma_addr_t src_phys_addr;
361 dma_addr_t dst_phys_addr;
362 struct pci_dev *pdev = test->pdev;
363 struct device *dev = &pdev->dev;
364 void *orig_src_addr;
365 dma_addr_t orig_src_phys_addr;
366 void *orig_dst_addr;
367 dma_addr_t orig_dst_phys_addr;
368 size_t offset;
369 size_t alignment = test->alignment;
370 int irq_type = test->irq_type;
371 u32 src_crc32;
372 u32 dst_crc32;
373 int err;
374
375 err = copy_from_user(¶m, (void __user *)arg, sizeof(param));
376 if (err) {
377 dev_err(dev, "Failed to get transfer param\n");
378 return false;
379 }
380
381 err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
382 if (err)
383 return false;
384
385 size = param.size;
386
387 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
388 if (use_dma)
389 flags |= FLAG_USE_DMA;
390
391 if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
392 dev_err(dev, "Invalid IRQ type option\n");
393 goto err;
394 }
395
396 orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
397 if (!orig_src_addr) {
398 dev_err(dev, "Failed to allocate source buffer\n");
399 ret = false;
400 goto err;
401 }
402
403 get_random_bytes(orig_src_addr, size + alignment);
404 orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
405 size + alignment, DMA_TO_DEVICE);
406 if (dma_mapping_error(dev, orig_src_phys_addr)) {
407 dev_err(dev, "failed to map source buffer address\n");
408 ret = false;
409 goto err_src_phys_addr;
410 }
411
412 if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
413 src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
414 offset = src_phys_addr - orig_src_phys_addr;
415 src_addr = orig_src_addr + offset;
416 } else {
417 src_phys_addr = orig_src_phys_addr;
418 src_addr = orig_src_addr;
419 }
420
421 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
422 lower_32_bits(src_phys_addr));
423
424 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
425 upper_32_bits(src_phys_addr));
426
427 src_crc32 = crc32_le(~0, src_addr, size);
428
429 orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
430 if (!orig_dst_addr) {
431 dev_err(dev, "Failed to allocate destination address\n");
432 ret = false;
433 goto err_dst_addr;
434 }
435
436 orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
437 size + alignment, DMA_FROM_DEVICE);
438 if (dma_mapping_error(dev, orig_dst_phys_addr)) {
439 dev_err(dev, "failed to map destination buffer address\n");
440 ret = false;
441 goto err_dst_phys_addr;
442 }
443
444 if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
445 dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
446 offset = dst_phys_addr - orig_dst_phys_addr;
447 dst_addr = orig_dst_addr + offset;
448 } else {
449 dst_phys_addr = orig_dst_phys_addr;
450 dst_addr = orig_dst_addr;
451 }
452
453 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
454 lower_32_bits(dst_phys_addr));
455 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
456 upper_32_bits(dst_phys_addr));
457
458 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
459 size);
460
461 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
462 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
463 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
464 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
465 COMMAND_COPY);
466
467 wait_for_completion(&test->irq_raised);
468
469 dma_unmap_single(dev, orig_dst_phys_addr, size + alignment,
470 DMA_FROM_DEVICE);
471
472 dst_crc32 = crc32_le(~0, dst_addr, size);
473 if (dst_crc32 == src_crc32)
474 ret = true;
475
476 err_dst_phys_addr:
477 kfree(orig_dst_addr);
478
479 err_dst_addr:
480 dma_unmap_single(dev, orig_src_phys_addr, size + alignment,
481 DMA_TO_DEVICE);
482
483 err_src_phys_addr:
484 kfree(orig_src_addr);
485
486 err:
487 return ret;
488 }
489
pci_endpoint_test_write(struct pci_endpoint_test * test,unsigned long arg)490 static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
491 unsigned long arg)
492 {
493 struct pci_endpoint_test_xfer_param param;
494 bool ret = false;
495 u32 flags = 0;
496 bool use_dma;
497 u32 reg;
498 void *addr;
499 dma_addr_t phys_addr;
500 struct pci_dev *pdev = test->pdev;
501 struct device *dev = &pdev->dev;
502 void *orig_addr;
503 dma_addr_t orig_phys_addr;
504 size_t offset;
505 size_t alignment = test->alignment;
506 int irq_type = test->irq_type;
507 size_t size;
508 u32 crc32;
509 int err;
510
511 err = copy_from_user(¶m, (void __user *)arg, sizeof(param));
512 if (err != 0) {
513 dev_err(dev, "Failed to get transfer param\n");
514 return false;
515 }
516
517 err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
518 if (err)
519 return false;
520
521 size = param.size;
522
523 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
524 if (use_dma)
525 flags |= FLAG_USE_DMA;
526
527 if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
528 dev_err(dev, "Invalid IRQ type option\n");
529 goto err;
530 }
531
532 orig_addr = kzalloc(size + alignment, GFP_KERNEL);
533 if (!orig_addr) {
534 dev_err(dev, "Failed to allocate address\n");
535 ret = false;
536 goto err;
537 }
538
539 get_random_bytes(orig_addr, size + alignment);
540
541 orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
542 DMA_TO_DEVICE);
543 if (dma_mapping_error(dev, orig_phys_addr)) {
544 dev_err(dev, "failed to map source buffer address\n");
545 ret = false;
546 goto err_phys_addr;
547 }
548
549 if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
550 phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
551 offset = phys_addr - orig_phys_addr;
552 addr = orig_addr + offset;
553 } else {
554 phys_addr = orig_phys_addr;
555 addr = orig_addr;
556 }
557
558 crc32 = crc32_le(~0, addr, size);
559 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
560 crc32);
561
562 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
563 lower_32_bits(phys_addr));
564 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
565 upper_32_bits(phys_addr));
566
567 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
568
569 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
570 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
571 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
572 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
573 COMMAND_READ);
574
575 wait_for_completion(&test->irq_raised);
576
577 reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
578 if (reg & STATUS_READ_SUCCESS)
579 ret = true;
580
581 dma_unmap_single(dev, orig_phys_addr, size + alignment,
582 DMA_TO_DEVICE);
583
584 err_phys_addr:
585 kfree(orig_addr);
586
587 err:
588 return ret;
589 }
590
pci_endpoint_test_read(struct pci_endpoint_test * test,unsigned long arg)591 static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
592 unsigned long arg)
593 {
594 struct pci_endpoint_test_xfer_param param;
595 bool ret = false;
596 u32 flags = 0;
597 bool use_dma;
598 size_t size;
599 void *addr;
600 dma_addr_t phys_addr;
601 struct pci_dev *pdev = test->pdev;
602 struct device *dev = &pdev->dev;
603 void *orig_addr;
604 dma_addr_t orig_phys_addr;
605 size_t offset;
606 size_t alignment = test->alignment;
607 int irq_type = test->irq_type;
608 u32 crc32;
609 int err;
610
611 err = copy_from_user(¶m, (void __user *)arg, sizeof(param));
612 if (err) {
613 dev_err(dev, "Failed to get transfer param\n");
614 return false;
615 }
616
617 err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
618 if (err)
619 return false;
620
621 size = param.size;
622
623 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
624 if (use_dma)
625 flags |= FLAG_USE_DMA;
626
627 if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
628 dev_err(dev, "Invalid IRQ type option\n");
629 goto err;
630 }
631
632 orig_addr = kzalloc(size + alignment, GFP_KERNEL);
633 if (!orig_addr) {
634 dev_err(dev, "Failed to allocate destination address\n");
635 ret = false;
636 goto err;
637 }
638
639 orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
640 DMA_FROM_DEVICE);
641 if (dma_mapping_error(dev, orig_phys_addr)) {
642 dev_err(dev, "failed to map source buffer address\n");
643 ret = false;
644 goto err_phys_addr;
645 }
646
647 if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
648 phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
649 offset = phys_addr - orig_phys_addr;
650 addr = orig_addr + offset;
651 } else {
652 phys_addr = orig_phys_addr;
653 addr = orig_addr;
654 }
655
656 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
657 lower_32_bits(phys_addr));
658 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
659 upper_32_bits(phys_addr));
660
661 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
662
663 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
664 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
665 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
666 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
667 COMMAND_WRITE);
668
669 wait_for_completion(&test->irq_raised);
670
671 dma_unmap_single(dev, orig_phys_addr, size + alignment,
672 DMA_FROM_DEVICE);
673
674 crc32 = crc32_le(~0, addr, size);
675 if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
676 ret = true;
677
678 err_phys_addr:
679 kfree(orig_addr);
680 err:
681 return ret;
682 }
683
pci_endpoint_test_clear_irq(struct pci_endpoint_test * test)684 static bool pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
685 {
686 pci_endpoint_test_release_irq(test);
687 pci_endpoint_test_free_irq_vectors(test);
688 return true;
689 }
690
pci_endpoint_test_set_irq(struct pci_endpoint_test * test,int req_irq_type)691 static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
692 int req_irq_type)
693 {
694 struct pci_dev *pdev = test->pdev;
695 struct device *dev = &pdev->dev;
696
697 if (req_irq_type < IRQ_TYPE_LEGACY || req_irq_type > IRQ_TYPE_MSIX) {
698 dev_err(dev, "Invalid IRQ type option\n");
699 return false;
700 }
701
702 if (test->irq_type == req_irq_type)
703 return true;
704
705 pci_endpoint_test_release_irq(test);
706 pci_endpoint_test_free_irq_vectors(test);
707
708 if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
709 goto err;
710
711 if (!pci_endpoint_test_request_irq(test))
712 goto err;
713
714 return true;
715
716 err:
717 pci_endpoint_test_free_irq_vectors(test);
718 return false;
719 }
720
pci_endpoint_test_ioctl(struct file * file,unsigned int cmd,unsigned long arg)721 static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
722 unsigned long arg)
723 {
724 int ret = -EINVAL;
725 enum pci_barno bar;
726 struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
727 struct pci_dev *pdev = test->pdev;
728
729 mutex_lock(&test->mutex);
730 switch (cmd) {
731 case PCITEST_BAR:
732 bar = arg;
733 if (bar < 0 || bar > 5)
734 goto ret;
735 if (is_am654_pci_dev(pdev) && bar == BAR_0)
736 goto ret;
737 ret = pci_endpoint_test_bar(test, bar);
738 break;
739 case PCITEST_LEGACY_IRQ:
740 ret = pci_endpoint_test_legacy_irq(test);
741 break;
742 case PCITEST_MSI:
743 case PCITEST_MSIX:
744 ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
745 break;
746 case PCITEST_WRITE:
747 ret = pci_endpoint_test_write(test, arg);
748 break;
749 case PCITEST_READ:
750 ret = pci_endpoint_test_read(test, arg);
751 break;
752 case PCITEST_COPY:
753 ret = pci_endpoint_test_copy(test, arg);
754 break;
755 case PCITEST_SET_IRQTYPE:
756 ret = pci_endpoint_test_set_irq(test, arg);
757 break;
758 case PCITEST_GET_IRQTYPE:
759 ret = irq_type;
760 break;
761 case PCITEST_CLEAR_IRQ:
762 ret = pci_endpoint_test_clear_irq(test);
763 break;
764 }
765
766 ret:
767 mutex_unlock(&test->mutex);
768 return ret;
769 }
770
771 static const struct file_operations pci_endpoint_test_fops = {
772 .owner = THIS_MODULE,
773 .unlocked_ioctl = pci_endpoint_test_ioctl,
774 };
775
pci_endpoint_test_probe(struct pci_dev * pdev,const struct pci_device_id * ent)776 static int pci_endpoint_test_probe(struct pci_dev *pdev,
777 const struct pci_device_id *ent)
778 {
779 int err;
780 int id;
781 char name[24];
782 enum pci_barno bar;
783 void __iomem *base;
784 struct device *dev = &pdev->dev;
785 struct pci_endpoint_test *test;
786 struct pci_endpoint_test_data *data;
787 enum pci_barno test_reg_bar = BAR_0;
788 struct miscdevice *misc_device;
789
790 if (pci_is_bridge(pdev))
791 return -ENODEV;
792
793 test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
794 if (!test)
795 return -ENOMEM;
796
797 test->test_reg_bar = 0;
798 test->alignment = 0;
799 test->pdev = pdev;
800 test->irq_type = IRQ_TYPE_UNDEFINED;
801
802 if (no_msi)
803 irq_type = IRQ_TYPE_LEGACY;
804
805 data = (struct pci_endpoint_test_data *)ent->driver_data;
806 if (data) {
807 test_reg_bar = data->test_reg_bar;
808 test->test_reg_bar = test_reg_bar;
809 test->alignment = data->alignment;
810 irq_type = data->irq_type;
811 }
812
813 init_completion(&test->irq_raised);
814 mutex_init(&test->mutex);
815
816 if ((dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)) != 0) &&
817 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
818 dev_err(dev, "Cannot set DMA mask\n");
819 return -EINVAL;
820 }
821
822 err = pci_enable_device(pdev);
823 if (err) {
824 dev_err(dev, "Cannot enable PCI device\n");
825 return err;
826 }
827
828 err = pci_request_regions(pdev, DRV_MODULE_NAME);
829 if (err) {
830 dev_err(dev, "Cannot obtain PCI resources\n");
831 goto err_disable_pdev;
832 }
833
834 pci_set_master(pdev);
835
836 if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) {
837 err = -EINVAL;
838 goto err_disable_irq;
839 }
840
841 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
842 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
843 base = pci_ioremap_bar(pdev, bar);
844 if (!base) {
845 dev_err(dev, "Failed to read BAR%d\n", bar);
846 WARN_ON(bar == test_reg_bar);
847 }
848 test->bar[bar] = base;
849 }
850 }
851
852 test->base = test->bar[test_reg_bar];
853 if (!test->base) {
854 err = -ENOMEM;
855 dev_err(dev, "Cannot perform PCI test without BAR%d\n",
856 test_reg_bar);
857 goto err_iounmap;
858 }
859
860 pci_set_drvdata(pdev, test);
861
862 id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
863 if (id < 0) {
864 err = id;
865 dev_err(dev, "Unable to get id\n");
866 goto err_iounmap;
867 }
868
869 snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
870 test->name = kstrdup(name, GFP_KERNEL);
871 if (!test->name) {
872 err = -ENOMEM;
873 goto err_ida_remove;
874 }
875
876 if (!pci_endpoint_test_request_irq(test)) {
877 err = -EINVAL;
878 goto err_kfree_test_name;
879 }
880
881 misc_device = &test->miscdev;
882 misc_device->minor = MISC_DYNAMIC_MINOR;
883 misc_device->name = kstrdup(name, GFP_KERNEL);
884 if (!misc_device->name) {
885 err = -ENOMEM;
886 goto err_release_irq;
887 }
888 misc_device->fops = &pci_endpoint_test_fops,
889
890 err = misc_register(misc_device);
891 if (err) {
892 dev_err(dev, "Failed to register device\n");
893 goto err_kfree_name;
894 }
895
896 return 0;
897
898 err_kfree_name:
899 kfree(misc_device->name);
900
901 err_release_irq:
902 pci_endpoint_test_release_irq(test);
903
904 err_kfree_test_name:
905 kfree(test->name);
906
907 err_ida_remove:
908 ida_simple_remove(&pci_endpoint_test_ida, id);
909
910 err_iounmap:
911 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
912 if (test->bar[bar])
913 pci_iounmap(pdev, test->bar[bar]);
914 }
915
916 err_disable_irq:
917 pci_endpoint_test_free_irq_vectors(test);
918 pci_release_regions(pdev);
919
920 err_disable_pdev:
921 pci_disable_device(pdev);
922
923 return err;
924 }
925
pci_endpoint_test_remove(struct pci_dev * pdev)926 static void pci_endpoint_test_remove(struct pci_dev *pdev)
927 {
928 int id;
929 enum pci_barno bar;
930 struct pci_endpoint_test *test = pci_get_drvdata(pdev);
931 struct miscdevice *misc_device = &test->miscdev;
932
933 if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
934 return;
935 if (id < 0)
936 return;
937
938 misc_deregister(&test->miscdev);
939 kfree(misc_device->name);
940 kfree(test->name);
941 ida_simple_remove(&pci_endpoint_test_ida, id);
942 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
943 if (test->bar[bar])
944 pci_iounmap(pdev, test->bar[bar]);
945 }
946
947 pci_endpoint_test_release_irq(test);
948 pci_endpoint_test_free_irq_vectors(test);
949
950 pci_release_regions(pdev);
951 pci_disable_device(pdev);
952 }
953
954 static const struct pci_endpoint_test_data default_data = {
955 .test_reg_bar = BAR_0,
956 .alignment = SZ_4K,
957 .irq_type = IRQ_TYPE_MSI,
958 };
959
960 static const struct pci_endpoint_test_data am654_data = {
961 .test_reg_bar = BAR_2,
962 .alignment = SZ_64K,
963 .irq_type = IRQ_TYPE_MSI,
964 };
965
966 static const struct pci_endpoint_test_data j721e_data = {
967 .alignment = 256,
968 .irq_type = IRQ_TYPE_MSI,
969 };
970
971 static const struct pci_device_id pci_endpoint_test_tbl[] = {
972 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
973 .driver_data = (kernel_ulong_t)&default_data,
974 },
975 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
976 .driver_data = (kernel_ulong_t)&default_data,
977 },
978 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0),
979 .driver_data = (kernel_ulong_t)&default_data,
980 },
981 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_LS1088A),
982 .driver_data = (kernel_ulong_t)&default_data,
983 },
984 { PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
985 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
986 .driver_data = (kernel_ulong_t)&am654_data
987 },
988 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774A1),},
989 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
990 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
991 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
992 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
993 .driver_data = (kernel_ulong_t)&j721e_data,
994 },
995 { }
996 };
997 MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
998
999 static struct pci_driver pci_endpoint_test_driver = {
1000 .name = DRV_MODULE_NAME,
1001 .id_table = pci_endpoint_test_tbl,
1002 .probe = pci_endpoint_test_probe,
1003 .remove = pci_endpoint_test_remove,
1004 };
1005 module_pci_driver(pci_endpoint_test_driver);
1006
1007 MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
1008 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1009 MODULE_LICENSE("GPL v2");
1010