• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Host side test driver to test endpoint functionality
4  *
5  * Copyright (C) 2017 Texas Instruments
6  * Author: Kishon Vijay Abraham I <kishon@ti.com>
7  */
8 
9 #include <linux/crc32.h>
10 #include <linux/cleanup.h>
11 #include <linux/delay.h>
12 #include <linux/fs.h>
13 #include <linux/io.h>
14 #include <linux/interrupt.h>
15 #include <linux/irq.h>
16 #include <linux/miscdevice.h>
17 #include <linux/module.h>
18 #include <linux/mutex.h>
19 #include <linux/random.h>
20 #include <linux/slab.h>
21 #include <linux/uaccess.h>
22 #include <linux/pci.h>
23 #include <linux/pci_ids.h>
24 
25 #include <linux/pci_regs.h>
26 
27 #include <uapi/linux/pcitest.h>
28 
29 #define DRV_MODULE_NAME				"pci-endpoint-test"
30 
31 #define IRQ_TYPE_UNDEFINED			-1
32 #define IRQ_TYPE_INTX				0
33 #define IRQ_TYPE_MSI				1
34 #define IRQ_TYPE_MSIX				2
35 
36 #define PCI_ENDPOINT_TEST_MAGIC			0x0
37 
38 #define PCI_ENDPOINT_TEST_COMMAND		0x4
39 #define COMMAND_RAISE_INTX_IRQ			BIT(0)
40 #define COMMAND_RAISE_MSI_IRQ			BIT(1)
41 #define COMMAND_RAISE_MSIX_IRQ			BIT(2)
42 #define COMMAND_READ				BIT(3)
43 #define COMMAND_WRITE				BIT(4)
44 #define COMMAND_COPY				BIT(5)
45 
46 #define PCI_ENDPOINT_TEST_STATUS		0x8
47 #define STATUS_READ_SUCCESS			BIT(0)
48 #define STATUS_READ_FAIL			BIT(1)
49 #define STATUS_WRITE_SUCCESS			BIT(2)
50 #define STATUS_WRITE_FAIL			BIT(3)
51 #define STATUS_COPY_SUCCESS			BIT(4)
52 #define STATUS_COPY_FAIL			BIT(5)
53 #define STATUS_IRQ_RAISED			BIT(6)
54 #define STATUS_SRC_ADDR_INVALID			BIT(7)
55 #define STATUS_DST_ADDR_INVALID			BIT(8)
56 
57 #define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR	0x0c
58 #define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR	0x10
59 
60 #define PCI_ENDPOINT_TEST_LOWER_DST_ADDR	0x14
61 #define PCI_ENDPOINT_TEST_UPPER_DST_ADDR	0x18
62 
63 #define PCI_ENDPOINT_TEST_SIZE			0x1c
64 #define PCI_ENDPOINT_TEST_CHECKSUM		0x20
65 
66 #define PCI_ENDPOINT_TEST_IRQ_TYPE		0x24
67 #define PCI_ENDPOINT_TEST_IRQ_NUMBER		0x28
68 
69 #define PCI_ENDPOINT_TEST_FLAGS			0x2c
70 #define FLAG_USE_DMA				BIT(0)
71 
72 #define PCI_DEVICE_ID_TI_AM654			0xb00c
73 #define PCI_DEVICE_ID_TI_J7200			0xb00f
74 #define PCI_DEVICE_ID_TI_AM64			0xb010
75 #define PCI_DEVICE_ID_TI_J721S2		0xb013
76 #define PCI_DEVICE_ID_LS1088A			0x80c0
77 #define PCI_DEVICE_ID_IMX8			0x0808
78 
79 #define is_am654_pci_dev(pdev)		\
80 		((pdev)->device == PCI_DEVICE_ID_TI_AM654)
81 
82 #define PCI_DEVICE_ID_RENESAS_R8A774A1		0x0028
83 #define PCI_DEVICE_ID_RENESAS_R8A774B1		0x002b
84 #define PCI_DEVICE_ID_RENESAS_R8A774C0		0x002d
85 #define PCI_DEVICE_ID_RENESAS_R8A774E1		0x0025
86 #define PCI_DEVICE_ID_RENESAS_R8A779F0		0x0031
87 
88 #define PCI_DEVICE_ID_ROCKCHIP_RK3588		0x3588
89 
90 static DEFINE_IDA(pci_endpoint_test_ida);
91 
92 #define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
93 					    miscdev)
94 
95 static bool no_msi;
96 module_param(no_msi, bool, 0444);
97 MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
98 
99 static int irq_type = IRQ_TYPE_MSI;
100 module_param(irq_type, int, 0444);
101 MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
102 
103 enum pci_barno {
104 	BAR_0,
105 	BAR_1,
106 	BAR_2,
107 	BAR_3,
108 	BAR_4,
109 	BAR_5,
110 };
111 
112 struct pci_endpoint_test {
113 	struct pci_dev	*pdev;
114 	void __iomem	*base;
115 	void __iomem	*bar[PCI_STD_NUM_BARS];
116 	struct completion irq_raised;
117 	int		last_irq;
118 	int		num_irqs;
119 	int		irq_type;
120 	/* mutex to protect the ioctls */
121 	struct mutex	mutex;
122 	struct miscdevice miscdev;
123 	enum pci_barno test_reg_bar;
124 	size_t alignment;
125 	const char *name;
126 };
127 
128 struct pci_endpoint_test_data {
129 	enum pci_barno test_reg_bar;
130 	size_t alignment;
131 	int irq_type;
132 };
133 
pci_endpoint_test_readl(struct pci_endpoint_test * test,u32 offset)134 static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
135 					  u32 offset)
136 {
137 	return readl(test->base + offset);
138 }
139 
pci_endpoint_test_writel(struct pci_endpoint_test * test,u32 offset,u32 value)140 static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
141 					    u32 offset, u32 value)
142 {
143 	writel(value, test->base + offset);
144 }
145 
pci_endpoint_test_irqhandler(int irq,void * dev_id)146 static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
147 {
148 	struct pci_endpoint_test *test = dev_id;
149 	u32 reg;
150 
151 	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
152 	if (reg & STATUS_IRQ_RAISED) {
153 		test->last_irq = irq;
154 		complete(&test->irq_raised);
155 	}
156 
157 	return IRQ_HANDLED;
158 }
159 
pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test * test)160 static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
161 {
162 	struct pci_dev *pdev = test->pdev;
163 
164 	pci_free_irq_vectors(pdev);
165 	test->irq_type = IRQ_TYPE_UNDEFINED;
166 }
167 
pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test * test,int type)168 static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
169 						int type)
170 {
171 	int irq = -1;
172 	struct pci_dev *pdev = test->pdev;
173 	struct device *dev = &pdev->dev;
174 	bool res = true;
175 
176 	switch (type) {
177 	case IRQ_TYPE_INTX:
178 		irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_INTX);
179 		if (irq < 0)
180 			dev_err(dev, "Failed to get Legacy interrupt\n");
181 		break;
182 	case IRQ_TYPE_MSI:
183 		irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
184 		if (irq < 0)
185 			dev_err(dev, "Failed to get MSI interrupts\n");
186 		break;
187 	case IRQ_TYPE_MSIX:
188 		irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
189 		if (irq < 0)
190 			dev_err(dev, "Failed to get MSI-X interrupts\n");
191 		break;
192 	default:
193 		dev_err(dev, "Invalid IRQ type selected\n");
194 	}
195 
196 	if (irq < 0) {
197 		irq = 0;
198 		res = false;
199 	}
200 
201 	test->irq_type = type;
202 	test->num_irqs = irq;
203 
204 	return res;
205 }
206 
pci_endpoint_test_release_irq(struct pci_endpoint_test * test)207 static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
208 {
209 	int i;
210 	struct pci_dev *pdev = test->pdev;
211 	struct device *dev = &pdev->dev;
212 
213 	for (i = 0; i < test->num_irqs; i++)
214 		devm_free_irq(dev, pci_irq_vector(pdev, i), test);
215 
216 	test->num_irqs = 0;
217 }
218 
pci_endpoint_test_request_irq(struct pci_endpoint_test * test)219 static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
220 {
221 	int i;
222 	int err;
223 	struct pci_dev *pdev = test->pdev;
224 	struct device *dev = &pdev->dev;
225 
226 	for (i = 0; i < test->num_irqs; i++) {
227 		err = devm_request_irq(dev, pci_irq_vector(pdev, i),
228 				       pci_endpoint_test_irqhandler,
229 				       IRQF_SHARED, test->name, test);
230 		if (err)
231 			goto fail;
232 	}
233 
234 	return true;
235 
236 fail:
237 	switch (test->irq_type) {
238 	case IRQ_TYPE_INTX:
239 		dev_err(dev, "Failed to request IRQ %d for Legacy\n",
240 			pci_irq_vector(pdev, i));
241 		break;
242 	case IRQ_TYPE_MSI:
243 		dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
244 			pci_irq_vector(pdev, i),
245 			i + 1);
246 		break;
247 	case IRQ_TYPE_MSIX:
248 		dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
249 			pci_irq_vector(pdev, i),
250 			i + 1);
251 		break;
252 	}
253 
254 	test->num_irqs = i;
255 	pci_endpoint_test_release_irq(test);
256 
257 	return false;
258 }
259 
260 static const u32 bar_test_pattern[] = {
261 	0xA0A0A0A0,
262 	0xA1A1A1A1,
263 	0xA2A2A2A2,
264 	0xA3A3A3A3,
265 	0xA4A4A4A4,
266 	0xA5A5A5A5,
267 };
268 
pci_endpoint_test_bar_memcmp(struct pci_endpoint_test * test,enum pci_barno barno,int offset,void * write_buf,void * read_buf,int size)269 static int pci_endpoint_test_bar_memcmp(struct pci_endpoint_test *test,
270 					enum pci_barno barno, int offset,
271 					void *write_buf, void *read_buf,
272 					int size)
273 {
274 	memset(write_buf, bar_test_pattern[barno], size);
275 	memcpy_toio(test->bar[barno] + offset, write_buf, size);
276 
277 	memcpy_fromio(read_buf, test->bar[barno] + offset, size);
278 
279 	return memcmp(write_buf, read_buf, size);
280 }
281 
pci_endpoint_test_bar(struct pci_endpoint_test * test,enum pci_barno barno)282 static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
283 				  enum pci_barno barno)
284 {
285 	int j, bar_size, buf_size, iters, remain;
286 	void *write_buf __free(kfree) = NULL;
287 	void *read_buf __free(kfree) = NULL;
288 	struct pci_dev *pdev = test->pdev;
289 
290 	bar_size = pci_resource_len(pdev, barno);
291 	if (!bar_size)
292 		return -ENODATA;
293 
294 	if (!test->bar[barno])
295 		return false;
296 
297 	if (barno == test->test_reg_bar)
298 		bar_size = 0x4;
299 
300 	/*
301 	 * Allocate a buffer of max size 1MB, and reuse that buffer while
302 	 * iterating over the whole BAR size (which might be much larger).
303 	 */
304 	buf_size = min(SZ_1M, bar_size);
305 
306 	write_buf = kmalloc(buf_size, GFP_KERNEL);
307 	if (!write_buf)
308 		return false;
309 
310 	read_buf = kmalloc(buf_size, GFP_KERNEL);
311 	if (!read_buf)
312 		return false;
313 
314 	iters = bar_size / buf_size;
315 	for (j = 0; j < iters; j++)
316 		if (pci_endpoint_test_bar_memcmp(test, barno, buf_size * j,
317 						 write_buf, read_buf, buf_size))
318 			return false;
319 
320 	remain = bar_size % buf_size;
321 	if (remain)
322 		if (pci_endpoint_test_bar_memcmp(test, barno, buf_size * iters,
323 						 write_buf, read_buf, remain))
324 			return false;
325 
326 	return true;
327 }
328 
pci_endpoint_test_intx_irq(struct pci_endpoint_test * test)329 static bool pci_endpoint_test_intx_irq(struct pci_endpoint_test *test)
330 {
331 	u32 val;
332 
333 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
334 				 IRQ_TYPE_INTX);
335 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
336 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
337 				 COMMAND_RAISE_INTX_IRQ);
338 	val = wait_for_completion_timeout(&test->irq_raised,
339 					  msecs_to_jiffies(1000));
340 	if (!val)
341 		return false;
342 
343 	return true;
344 }
345 
pci_endpoint_test_msi_irq(struct pci_endpoint_test * test,u16 msi_num,bool msix)346 static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
347 				       u16 msi_num, bool msix)
348 {
349 	u32 val;
350 	struct pci_dev *pdev = test->pdev;
351 
352 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
353 				 msix ? IRQ_TYPE_MSIX : IRQ_TYPE_MSI);
354 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
355 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
356 				 msix ? COMMAND_RAISE_MSIX_IRQ :
357 				 COMMAND_RAISE_MSI_IRQ);
358 	val = wait_for_completion_timeout(&test->irq_raised,
359 					  msecs_to_jiffies(1000));
360 	if (!val)
361 		return false;
362 
363 	return pci_irq_vector(pdev, msi_num - 1) == test->last_irq;
364 }
365 
pci_endpoint_test_validate_xfer_params(struct device * dev,struct pci_endpoint_test_xfer_param * param,size_t alignment)366 static int pci_endpoint_test_validate_xfer_params(struct device *dev,
367 		struct pci_endpoint_test_xfer_param *param, size_t alignment)
368 {
369 	if (!param->size) {
370 		dev_dbg(dev, "Data size is zero\n");
371 		return -EINVAL;
372 	}
373 
374 	if (param->size > SIZE_MAX - alignment) {
375 		dev_dbg(dev, "Maximum transfer data size exceeded\n");
376 		return -EINVAL;
377 	}
378 
379 	return 0;
380 }
381 
pci_endpoint_test_copy(struct pci_endpoint_test * test,unsigned long arg)382 static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
383 				   unsigned long arg)
384 {
385 	struct pci_endpoint_test_xfer_param param;
386 	bool ret = false;
387 	void *src_addr;
388 	void *dst_addr;
389 	u32 flags = 0;
390 	bool use_dma;
391 	size_t size;
392 	dma_addr_t src_phys_addr;
393 	dma_addr_t dst_phys_addr;
394 	struct pci_dev *pdev = test->pdev;
395 	struct device *dev = &pdev->dev;
396 	void *orig_src_addr;
397 	dma_addr_t orig_src_phys_addr;
398 	void *orig_dst_addr;
399 	dma_addr_t orig_dst_phys_addr;
400 	size_t offset;
401 	size_t alignment = test->alignment;
402 	int irq_type = test->irq_type;
403 	u32 src_crc32;
404 	u32 dst_crc32;
405 	int err;
406 
407 	err = copy_from_user(&param, (void __user *)arg, sizeof(param));
408 	if (err) {
409 		dev_err(dev, "Failed to get transfer param\n");
410 		return false;
411 	}
412 
413 	err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
414 	if (err)
415 		return false;
416 
417 	size = param.size;
418 
419 	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
420 	if (use_dma)
421 		flags |= FLAG_USE_DMA;
422 
423 	if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
424 		dev_err(dev, "Invalid IRQ type option\n");
425 		goto err;
426 	}
427 
428 	orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
429 	if (!orig_src_addr) {
430 		dev_err(dev, "Failed to allocate source buffer\n");
431 		ret = false;
432 		goto err;
433 	}
434 
435 	get_random_bytes(orig_src_addr, size + alignment);
436 	orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
437 					    size + alignment, DMA_TO_DEVICE);
438 	if (dma_mapping_error(dev, orig_src_phys_addr)) {
439 		dev_err(dev, "failed to map source buffer address\n");
440 		ret = false;
441 		goto err_src_phys_addr;
442 	}
443 
444 	if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
445 		src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
446 		offset = src_phys_addr - orig_src_phys_addr;
447 		src_addr = orig_src_addr + offset;
448 	} else {
449 		src_phys_addr = orig_src_phys_addr;
450 		src_addr = orig_src_addr;
451 	}
452 
453 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
454 				 lower_32_bits(src_phys_addr));
455 
456 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
457 				 upper_32_bits(src_phys_addr));
458 
459 	src_crc32 = crc32_le(~0, src_addr, size);
460 
461 	orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
462 	if (!orig_dst_addr) {
463 		dev_err(dev, "Failed to allocate destination address\n");
464 		ret = false;
465 		goto err_dst_addr;
466 	}
467 
468 	orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
469 					    size + alignment, DMA_FROM_DEVICE);
470 	if (dma_mapping_error(dev, orig_dst_phys_addr)) {
471 		dev_err(dev, "failed to map destination buffer address\n");
472 		ret = false;
473 		goto err_dst_phys_addr;
474 	}
475 
476 	if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
477 		dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
478 		offset = dst_phys_addr - orig_dst_phys_addr;
479 		dst_addr = orig_dst_addr + offset;
480 	} else {
481 		dst_phys_addr = orig_dst_phys_addr;
482 		dst_addr = orig_dst_addr;
483 	}
484 
485 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
486 				 lower_32_bits(dst_phys_addr));
487 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
488 				 upper_32_bits(dst_phys_addr));
489 
490 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
491 				 size);
492 
493 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
494 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
495 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
496 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
497 				 COMMAND_COPY);
498 
499 	wait_for_completion(&test->irq_raised);
500 
501 	dma_unmap_single(dev, orig_dst_phys_addr, size + alignment,
502 			 DMA_FROM_DEVICE);
503 
504 	dst_crc32 = crc32_le(~0, dst_addr, size);
505 	if (dst_crc32 == src_crc32)
506 		ret = true;
507 
508 err_dst_phys_addr:
509 	kfree(orig_dst_addr);
510 
511 err_dst_addr:
512 	dma_unmap_single(dev, orig_src_phys_addr, size + alignment,
513 			 DMA_TO_DEVICE);
514 
515 err_src_phys_addr:
516 	kfree(orig_src_addr);
517 
518 err:
519 	return ret;
520 }
521 
pci_endpoint_test_write(struct pci_endpoint_test * test,unsigned long arg)522 static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
523 				    unsigned long arg)
524 {
525 	struct pci_endpoint_test_xfer_param param;
526 	bool ret = false;
527 	u32 flags = 0;
528 	bool use_dma;
529 	u32 reg;
530 	void *addr;
531 	dma_addr_t phys_addr;
532 	struct pci_dev *pdev = test->pdev;
533 	struct device *dev = &pdev->dev;
534 	void *orig_addr;
535 	dma_addr_t orig_phys_addr;
536 	size_t offset;
537 	size_t alignment = test->alignment;
538 	int irq_type = test->irq_type;
539 	size_t size;
540 	u32 crc32;
541 	int err;
542 
543 	err = copy_from_user(&param, (void __user *)arg, sizeof(param));
544 	if (err != 0) {
545 		dev_err(dev, "Failed to get transfer param\n");
546 		return false;
547 	}
548 
549 	err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
550 	if (err)
551 		return false;
552 
553 	size = param.size;
554 
555 	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
556 	if (use_dma)
557 		flags |= FLAG_USE_DMA;
558 
559 	if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
560 		dev_err(dev, "Invalid IRQ type option\n");
561 		goto err;
562 	}
563 
564 	orig_addr = kzalloc(size + alignment, GFP_KERNEL);
565 	if (!orig_addr) {
566 		dev_err(dev, "Failed to allocate address\n");
567 		ret = false;
568 		goto err;
569 	}
570 
571 	get_random_bytes(orig_addr, size + alignment);
572 
573 	orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
574 					DMA_TO_DEVICE);
575 	if (dma_mapping_error(dev, orig_phys_addr)) {
576 		dev_err(dev, "failed to map source buffer address\n");
577 		ret = false;
578 		goto err_phys_addr;
579 	}
580 
581 	if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
582 		phys_addr =  PTR_ALIGN(orig_phys_addr, alignment);
583 		offset = phys_addr - orig_phys_addr;
584 		addr = orig_addr + offset;
585 	} else {
586 		phys_addr = orig_phys_addr;
587 		addr = orig_addr;
588 	}
589 
590 	crc32 = crc32_le(~0, addr, size);
591 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
592 				 crc32);
593 
594 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
595 				 lower_32_bits(phys_addr));
596 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
597 				 upper_32_bits(phys_addr));
598 
599 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
600 
601 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
602 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
603 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
604 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
605 				 COMMAND_READ);
606 
607 	wait_for_completion(&test->irq_raised);
608 
609 	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
610 	if (reg & STATUS_READ_SUCCESS)
611 		ret = true;
612 
613 	dma_unmap_single(dev, orig_phys_addr, size + alignment,
614 			 DMA_TO_DEVICE);
615 
616 err_phys_addr:
617 	kfree(orig_addr);
618 
619 err:
620 	return ret;
621 }
622 
pci_endpoint_test_read(struct pci_endpoint_test * test,unsigned long arg)623 static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
624 				   unsigned long arg)
625 {
626 	struct pci_endpoint_test_xfer_param param;
627 	bool ret = false;
628 	u32 flags = 0;
629 	bool use_dma;
630 	size_t size;
631 	void *addr;
632 	dma_addr_t phys_addr;
633 	struct pci_dev *pdev = test->pdev;
634 	struct device *dev = &pdev->dev;
635 	void *orig_addr;
636 	dma_addr_t orig_phys_addr;
637 	size_t offset;
638 	size_t alignment = test->alignment;
639 	int irq_type = test->irq_type;
640 	u32 crc32;
641 	int err;
642 
643 	err = copy_from_user(&param, (void __user *)arg, sizeof(param));
644 	if (err) {
645 		dev_err(dev, "Failed to get transfer param\n");
646 		return false;
647 	}
648 
649 	err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
650 	if (err)
651 		return false;
652 
653 	size = param.size;
654 
655 	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
656 	if (use_dma)
657 		flags |= FLAG_USE_DMA;
658 
659 	if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
660 		dev_err(dev, "Invalid IRQ type option\n");
661 		goto err;
662 	}
663 
664 	orig_addr = kzalloc(size + alignment, GFP_KERNEL);
665 	if (!orig_addr) {
666 		dev_err(dev, "Failed to allocate destination address\n");
667 		ret = false;
668 		goto err;
669 	}
670 
671 	orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
672 					DMA_FROM_DEVICE);
673 	if (dma_mapping_error(dev, orig_phys_addr)) {
674 		dev_err(dev, "failed to map source buffer address\n");
675 		ret = false;
676 		goto err_phys_addr;
677 	}
678 
679 	if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
680 		phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
681 		offset = phys_addr - orig_phys_addr;
682 		addr = orig_addr + offset;
683 	} else {
684 		phys_addr = orig_phys_addr;
685 		addr = orig_addr;
686 	}
687 
688 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
689 				 lower_32_bits(phys_addr));
690 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
691 				 upper_32_bits(phys_addr));
692 
693 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
694 
695 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
696 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
697 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
698 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
699 				 COMMAND_WRITE);
700 
701 	wait_for_completion(&test->irq_raised);
702 
703 	dma_unmap_single(dev, orig_phys_addr, size + alignment,
704 			 DMA_FROM_DEVICE);
705 
706 	crc32 = crc32_le(~0, addr, size);
707 	if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
708 		ret = true;
709 
710 err_phys_addr:
711 	kfree(orig_addr);
712 err:
713 	return ret;
714 }
715 
pci_endpoint_test_clear_irq(struct pci_endpoint_test * test)716 static bool pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
717 {
718 	pci_endpoint_test_release_irq(test);
719 	pci_endpoint_test_free_irq_vectors(test);
720 	return true;
721 }
722 
pci_endpoint_test_set_irq(struct pci_endpoint_test * test,int req_irq_type)723 static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
724 				      int req_irq_type)
725 {
726 	struct pci_dev *pdev = test->pdev;
727 	struct device *dev = &pdev->dev;
728 
729 	if (req_irq_type < IRQ_TYPE_INTX || req_irq_type > IRQ_TYPE_MSIX) {
730 		dev_err(dev, "Invalid IRQ type option\n");
731 		return false;
732 	}
733 
734 	if (test->irq_type == req_irq_type)
735 		return true;
736 
737 	pci_endpoint_test_release_irq(test);
738 	pci_endpoint_test_free_irq_vectors(test);
739 
740 	if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
741 		goto err;
742 
743 	if (!pci_endpoint_test_request_irq(test))
744 		goto err;
745 
746 	irq_type = test->irq_type;
747 	return true;
748 
749 err:
750 	pci_endpoint_test_free_irq_vectors(test);
751 	return false;
752 }
753 
pci_endpoint_test_ioctl(struct file * file,unsigned int cmd,unsigned long arg)754 static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
755 				    unsigned long arg)
756 {
757 	int ret = -EINVAL;
758 	enum pci_barno bar;
759 	struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
760 	struct pci_dev *pdev = test->pdev;
761 
762 	mutex_lock(&test->mutex);
763 
764 	reinit_completion(&test->irq_raised);
765 	test->last_irq = -ENODATA;
766 
767 	switch (cmd) {
768 	case PCITEST_BAR:
769 		bar = arg;
770 		if (bar > BAR_5)
771 			goto ret;
772 		if (is_am654_pci_dev(pdev) && bar == BAR_0)
773 			goto ret;
774 		ret = pci_endpoint_test_bar(test, bar);
775 		break;
776 	case PCITEST_INTX_IRQ:
777 		ret = pci_endpoint_test_intx_irq(test);
778 		break;
779 	case PCITEST_MSI:
780 	case PCITEST_MSIX:
781 		ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
782 		break;
783 	case PCITEST_WRITE:
784 		ret = pci_endpoint_test_write(test, arg);
785 		break;
786 	case PCITEST_READ:
787 		ret = pci_endpoint_test_read(test, arg);
788 		break;
789 	case PCITEST_COPY:
790 		ret = pci_endpoint_test_copy(test, arg);
791 		break;
792 	case PCITEST_SET_IRQTYPE:
793 		ret = pci_endpoint_test_set_irq(test, arg);
794 		break;
795 	case PCITEST_GET_IRQTYPE:
796 		ret = irq_type;
797 		break;
798 	case PCITEST_CLEAR_IRQ:
799 		ret = pci_endpoint_test_clear_irq(test);
800 		break;
801 	}
802 
803 ret:
804 	mutex_unlock(&test->mutex);
805 	return ret;
806 }
807 
808 static const struct file_operations pci_endpoint_test_fops = {
809 	.owner = THIS_MODULE,
810 	.unlocked_ioctl = pci_endpoint_test_ioctl,
811 };
812 
pci_endpoint_test_probe(struct pci_dev * pdev,const struct pci_device_id * ent)813 static int pci_endpoint_test_probe(struct pci_dev *pdev,
814 				   const struct pci_device_id *ent)
815 {
816 	int err;
817 	int id;
818 	char name[24];
819 	enum pci_barno bar;
820 	void __iomem *base;
821 	struct device *dev = &pdev->dev;
822 	struct pci_endpoint_test *test;
823 	struct pci_endpoint_test_data *data;
824 	enum pci_barno test_reg_bar = BAR_0;
825 	struct miscdevice *misc_device;
826 
827 	if (pci_is_bridge(pdev))
828 		return -ENODEV;
829 
830 	test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
831 	if (!test)
832 		return -ENOMEM;
833 
834 	test->test_reg_bar = 0;
835 	test->alignment = 0;
836 	test->pdev = pdev;
837 	test->irq_type = IRQ_TYPE_UNDEFINED;
838 
839 	if (no_msi)
840 		irq_type = IRQ_TYPE_INTX;
841 
842 	data = (struct pci_endpoint_test_data *)ent->driver_data;
843 	if (data) {
844 		test_reg_bar = data->test_reg_bar;
845 		test->test_reg_bar = test_reg_bar;
846 		test->alignment = data->alignment;
847 		irq_type = data->irq_type;
848 	}
849 
850 	init_completion(&test->irq_raised);
851 	mutex_init(&test->mutex);
852 
853 	dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
854 
855 	err = pci_enable_device(pdev);
856 	if (err) {
857 		dev_err(dev, "Cannot enable PCI device\n");
858 		return err;
859 	}
860 
861 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
862 	if (err) {
863 		dev_err(dev, "Cannot obtain PCI resources\n");
864 		goto err_disable_pdev;
865 	}
866 
867 	pci_set_master(pdev);
868 
869 	if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) {
870 		err = -EINVAL;
871 		goto err_disable_irq;
872 	}
873 
874 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
875 		if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
876 			base = pci_ioremap_bar(pdev, bar);
877 			if (!base) {
878 				dev_err(dev, "Failed to read BAR%d\n", bar);
879 				WARN_ON(bar == test_reg_bar);
880 			}
881 			test->bar[bar] = base;
882 		}
883 	}
884 
885 	test->base = test->bar[test_reg_bar];
886 	if (!test->base) {
887 		err = -ENOMEM;
888 		dev_err(dev, "Cannot perform PCI test without BAR%d\n",
889 			test_reg_bar);
890 		goto err_iounmap;
891 	}
892 
893 	pci_set_drvdata(pdev, test);
894 
895 	id = ida_alloc(&pci_endpoint_test_ida, GFP_KERNEL);
896 	if (id < 0) {
897 		err = id;
898 		dev_err(dev, "Unable to get id\n");
899 		goto err_iounmap;
900 	}
901 
902 	snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
903 	test->name = kstrdup(name, GFP_KERNEL);
904 	if (!test->name) {
905 		err = -ENOMEM;
906 		goto err_ida_remove;
907 	}
908 
909 	if (!pci_endpoint_test_request_irq(test)) {
910 		err = -EINVAL;
911 		goto err_kfree_test_name;
912 	}
913 
914 	misc_device = &test->miscdev;
915 	misc_device->minor = MISC_DYNAMIC_MINOR;
916 	misc_device->name = kstrdup(name, GFP_KERNEL);
917 	if (!misc_device->name) {
918 		err = -ENOMEM;
919 		goto err_release_irq;
920 	}
921 	misc_device->parent = &pdev->dev;
922 	misc_device->fops = &pci_endpoint_test_fops;
923 
924 	err = misc_register(misc_device);
925 	if (err) {
926 		dev_err(dev, "Failed to register device\n");
927 		goto err_kfree_name;
928 	}
929 
930 	return 0;
931 
932 err_kfree_name:
933 	kfree(misc_device->name);
934 
935 err_release_irq:
936 	pci_endpoint_test_release_irq(test);
937 
938 err_kfree_test_name:
939 	kfree(test->name);
940 
941 err_ida_remove:
942 	ida_free(&pci_endpoint_test_ida, id);
943 
944 err_iounmap:
945 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
946 		if (test->bar[bar])
947 			pci_iounmap(pdev, test->bar[bar]);
948 	}
949 
950 err_disable_irq:
951 	pci_endpoint_test_free_irq_vectors(test);
952 	pci_release_regions(pdev);
953 
954 err_disable_pdev:
955 	pci_disable_device(pdev);
956 
957 	return err;
958 }
959 
pci_endpoint_test_remove(struct pci_dev * pdev)960 static void pci_endpoint_test_remove(struct pci_dev *pdev)
961 {
962 	int id;
963 	enum pci_barno bar;
964 	struct pci_endpoint_test *test = pci_get_drvdata(pdev);
965 	struct miscdevice *misc_device = &test->miscdev;
966 
967 	if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
968 		return;
969 	if (id < 0)
970 		return;
971 
972 	pci_endpoint_test_release_irq(test);
973 	pci_endpoint_test_free_irq_vectors(test);
974 
975 	misc_deregister(&test->miscdev);
976 	kfree(misc_device->name);
977 	kfree(test->name);
978 	ida_free(&pci_endpoint_test_ida, id);
979 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
980 		if (test->bar[bar])
981 			pci_iounmap(pdev, test->bar[bar]);
982 	}
983 
984 	pci_release_regions(pdev);
985 	pci_disable_device(pdev);
986 }
987 
988 static const struct pci_endpoint_test_data default_data = {
989 	.test_reg_bar = BAR_0,
990 	.alignment = SZ_4K,
991 	.irq_type = IRQ_TYPE_MSI,
992 };
993 
994 static const struct pci_endpoint_test_data am654_data = {
995 	.test_reg_bar = BAR_2,
996 	.alignment = SZ_64K,
997 	.irq_type = IRQ_TYPE_MSI,
998 };
999 
1000 static const struct pci_endpoint_test_data j721e_data = {
1001 	.alignment = 256,
1002 	.irq_type = IRQ_TYPE_MSI,
1003 };
1004 
1005 static const struct pci_endpoint_test_data rk3588_data = {
1006 	.alignment = SZ_64K,
1007 	.irq_type = IRQ_TYPE_MSI,
1008 };
1009 
1010 /*
1011  * If the controller's Vendor/Device ID are programmable, you may be able to
1012  * use one of the existing entries for testing instead of adding a new one.
1013  */
1014 static const struct pci_device_id pci_endpoint_test_tbl[] = {
1015 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
1016 	  .driver_data = (kernel_ulong_t)&default_data,
1017 	},
1018 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
1019 	  .driver_data = (kernel_ulong_t)&default_data,
1020 	},
1021 	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0),
1022 	  .driver_data = (kernel_ulong_t)&default_data,
1023 	},
1024 	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_IMX8),},
1025 	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_LS1088A),
1026 	  .driver_data = (kernel_ulong_t)&default_data,
1027 	},
1028 	{ PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
1029 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
1030 	  .driver_data = (kernel_ulong_t)&am654_data
1031 	},
1032 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774A1),},
1033 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
1034 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
1035 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
1036 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A779F0),
1037 	  .driver_data = (kernel_ulong_t)&default_data,
1038 	},
1039 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
1040 	  .driver_data = (kernel_ulong_t)&j721e_data,
1041 	},
1042 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J7200),
1043 	  .driver_data = (kernel_ulong_t)&j721e_data,
1044 	},
1045 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
1046 	  .driver_data = (kernel_ulong_t)&j721e_data,
1047 	},
1048 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721S2),
1049 	  .driver_data = (kernel_ulong_t)&j721e_data,
1050 	},
1051 	{ PCI_DEVICE(PCI_VENDOR_ID_ROCKCHIP, PCI_DEVICE_ID_ROCKCHIP_RK3588),
1052 	  .driver_data = (kernel_ulong_t)&rk3588_data,
1053 	},
1054 	{ }
1055 };
1056 MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
1057 
1058 static struct pci_driver pci_endpoint_test_driver = {
1059 	.name		= DRV_MODULE_NAME,
1060 	.id_table	= pci_endpoint_test_tbl,
1061 	.probe		= pci_endpoint_test_probe,
1062 	.remove		= pci_endpoint_test_remove,
1063 	.sriov_configure = pci_sriov_configure_simple,
1064 };
1065 module_pci_driver(pci_endpoint_test_driver);
1066 
1067 MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
1068 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1069 MODULE_LICENSE("GPL v2");
1070