• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /**
3  * Host side test driver to test endpoint functionality
4  *
5  * Copyright (C) 2017 Texas Instruments
6  * Author: Kishon Vijay Abraham I <kishon@ti.com>
7  */
8 
9 #include <linux/crc32.h>
10 #include <linux/delay.h>
11 #include <linux/fs.h>
12 #include <linux/io.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/miscdevice.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/random.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/pci.h>
22 #include <linux/pci_ids.h>
23 
24 #include <linux/pci_regs.h>
25 
26 #include <uapi/linux/pcitest.h>
27 
28 #define DRV_MODULE_NAME				"pci-endpoint-test"
29 
30 #define IRQ_TYPE_UNDEFINED			-1
31 #define IRQ_TYPE_LEGACY				0
32 #define IRQ_TYPE_MSI				1
33 #define IRQ_TYPE_MSIX				2
34 
35 #define PCI_ENDPOINT_TEST_MAGIC			0x0
36 
37 #define PCI_ENDPOINT_TEST_COMMAND		0x4
38 #define COMMAND_RAISE_LEGACY_IRQ		BIT(0)
39 #define COMMAND_RAISE_MSI_IRQ			BIT(1)
40 #define COMMAND_RAISE_MSIX_IRQ			BIT(2)
41 #define COMMAND_READ				BIT(3)
42 #define COMMAND_WRITE				BIT(4)
43 #define COMMAND_COPY				BIT(5)
44 
45 #define PCI_ENDPOINT_TEST_STATUS		0x8
46 #define STATUS_READ_SUCCESS			BIT(0)
47 #define STATUS_READ_FAIL			BIT(1)
48 #define STATUS_WRITE_SUCCESS			BIT(2)
49 #define STATUS_WRITE_FAIL			BIT(3)
50 #define STATUS_COPY_SUCCESS			BIT(4)
51 #define STATUS_COPY_FAIL			BIT(5)
52 #define STATUS_IRQ_RAISED			BIT(6)
53 #define STATUS_SRC_ADDR_INVALID			BIT(7)
54 #define STATUS_DST_ADDR_INVALID			BIT(8)
55 
56 #define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR	0x0c
57 #define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR	0x10
58 
59 #define PCI_ENDPOINT_TEST_LOWER_DST_ADDR	0x14
60 #define PCI_ENDPOINT_TEST_UPPER_DST_ADDR	0x18
61 
62 #define PCI_ENDPOINT_TEST_SIZE			0x1c
63 #define PCI_ENDPOINT_TEST_CHECKSUM		0x20
64 
65 #define PCI_ENDPOINT_TEST_IRQ_TYPE		0x24
66 #define PCI_ENDPOINT_TEST_IRQ_NUMBER		0x28
67 
68 #define PCI_ENDPOINT_TEST_FLAGS			0x2c
69 #define FLAG_USE_DMA				BIT(0)
70 
71 #define PCI_DEVICE_ID_TI_J721E			0xb00d
72 #define PCI_DEVICE_ID_TI_AM654			0xb00c
73 #define PCI_DEVICE_ID_TI_J7200			0xb00f
74 #define PCI_DEVICE_ID_TI_AM64			0xb010
75 #define PCI_DEVICE_ID_TI_J721S2		0xb013
76 #define PCI_DEVICE_ID_LS1088A			0x80c0
77 
78 #define is_am654_pci_dev(pdev)		\
79 		((pdev)->device == PCI_DEVICE_ID_TI_AM654)
80 
81 #define PCI_DEVICE_ID_RENESAS_R8A774A1		0x0028
82 #define PCI_DEVICE_ID_RENESAS_R8A774B1		0x002b
83 #define PCI_DEVICE_ID_RENESAS_R8A774C0		0x002d
84 #define PCI_DEVICE_ID_RENESAS_R8A774E1		0x0025
85 #define PCI_DEVICE_ID_RENESAS_R8A779F0		0x0031
86 
87 static DEFINE_IDA(pci_endpoint_test_ida);
88 
89 #define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
90 					    miscdev)
91 
92 static bool no_msi;
93 module_param(no_msi, bool, 0444);
94 MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
95 
96 static int irq_type = IRQ_TYPE_MSI;
97 module_param(irq_type, int, 0444);
98 MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
99 
100 enum pci_barno {
101 	BAR_0,
102 	BAR_1,
103 	BAR_2,
104 	BAR_3,
105 	BAR_4,
106 	BAR_5,
107 };
108 
109 struct pci_endpoint_test {
110 	struct pci_dev	*pdev;
111 	void __iomem	*base;
112 	void __iomem	*bar[PCI_STD_NUM_BARS];
113 	struct completion irq_raised;
114 	int		last_irq;
115 	int		num_irqs;
116 	int		irq_type;
117 	/* mutex to protect the ioctls */
118 	struct mutex	mutex;
119 	struct miscdevice miscdev;
120 	enum pci_barno test_reg_bar;
121 	size_t alignment;
122 	const char *name;
123 };
124 
125 struct pci_endpoint_test_data {
126 	enum pci_barno test_reg_bar;
127 	size_t alignment;
128 	int irq_type;
129 };
130 
pci_endpoint_test_readl(struct pci_endpoint_test * test,u32 offset)131 static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
132 					  u32 offset)
133 {
134 	return readl(test->base + offset);
135 }
136 
pci_endpoint_test_writel(struct pci_endpoint_test * test,u32 offset,u32 value)137 static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
138 					    u32 offset, u32 value)
139 {
140 	writel(value, test->base + offset);
141 }
142 
pci_endpoint_test_bar_readl(struct pci_endpoint_test * test,int bar,int offset)143 static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test,
144 					      int bar, int offset)
145 {
146 	return readl(test->bar[bar] + offset);
147 }
148 
pci_endpoint_test_bar_writel(struct pci_endpoint_test * test,int bar,u32 offset,u32 value)149 static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test,
150 						int bar, u32 offset, u32 value)
151 {
152 	writel(value, test->bar[bar] + offset);
153 }
154 
pci_endpoint_test_irqhandler(int irq,void * dev_id)155 static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
156 {
157 	struct pci_endpoint_test *test = dev_id;
158 	u32 reg;
159 
160 	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
161 	if (reg & STATUS_IRQ_RAISED) {
162 		test->last_irq = irq;
163 		complete(&test->irq_raised);
164 		reg &= ~STATUS_IRQ_RAISED;
165 	}
166 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS,
167 				 reg);
168 
169 	return IRQ_HANDLED;
170 }
171 
pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test * test)172 static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
173 {
174 	struct pci_dev *pdev = test->pdev;
175 
176 	pci_free_irq_vectors(pdev);
177 	test->irq_type = IRQ_TYPE_UNDEFINED;
178 }
179 
pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test * test,int type)180 static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
181 						int type)
182 {
183 	int irq = -1;
184 	struct pci_dev *pdev = test->pdev;
185 	struct device *dev = &pdev->dev;
186 	bool res = true;
187 
188 	switch (type) {
189 	case IRQ_TYPE_LEGACY:
190 		irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
191 		if (irq < 0)
192 			dev_err(dev, "Failed to get Legacy interrupt\n");
193 		break;
194 	case IRQ_TYPE_MSI:
195 		irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
196 		if (irq < 0)
197 			dev_err(dev, "Failed to get MSI interrupts\n");
198 		break;
199 	case IRQ_TYPE_MSIX:
200 		irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
201 		if (irq < 0)
202 			dev_err(dev, "Failed to get MSI-X interrupts\n");
203 		break;
204 	default:
205 		dev_err(dev, "Invalid IRQ type selected\n");
206 	}
207 
208 	if (irq < 0) {
209 		irq = 0;
210 		res = false;
211 	}
212 
213 	test->irq_type = type;
214 	test->num_irqs = irq;
215 
216 	return res;
217 }
218 
pci_endpoint_test_release_irq(struct pci_endpoint_test * test)219 static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
220 {
221 	int i;
222 	struct pci_dev *pdev = test->pdev;
223 	struct device *dev = &pdev->dev;
224 
225 	for (i = 0; i < test->num_irqs; i++)
226 		devm_free_irq(dev, pci_irq_vector(pdev, i), test);
227 
228 	test->num_irqs = 0;
229 }
230 
pci_endpoint_test_request_irq(struct pci_endpoint_test * test)231 static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
232 {
233 	int i;
234 	int err;
235 	struct pci_dev *pdev = test->pdev;
236 	struct device *dev = &pdev->dev;
237 
238 	for (i = 0; i < test->num_irqs; i++) {
239 		err = devm_request_irq(dev, pci_irq_vector(pdev, i),
240 				       pci_endpoint_test_irqhandler,
241 				       IRQF_SHARED, test->name, test);
242 		if (err)
243 			goto fail;
244 	}
245 
246 	return true;
247 
248 fail:
249 	switch (irq_type) {
250 	case IRQ_TYPE_LEGACY:
251 		dev_err(dev, "Failed to request IRQ %d for Legacy\n",
252 			pci_irq_vector(pdev, i));
253 		break;
254 	case IRQ_TYPE_MSI:
255 		dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
256 			pci_irq_vector(pdev, i),
257 			i + 1);
258 		break;
259 	case IRQ_TYPE_MSIX:
260 		dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
261 			pci_irq_vector(pdev, i),
262 			i + 1);
263 		break;
264 	}
265 
266 	return false;
267 }
268 
pci_endpoint_test_bar(struct pci_endpoint_test * test,enum pci_barno barno)269 static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
270 				  enum pci_barno barno)
271 {
272 	int j;
273 	u32 val;
274 	int size;
275 	struct pci_dev *pdev = test->pdev;
276 
277 	if (!test->bar[barno])
278 		return false;
279 
280 	size = pci_resource_len(pdev, barno);
281 
282 	if (barno == test->test_reg_bar)
283 		size = 0x4;
284 
285 	for (j = 0; j < size; j += 4)
286 		pci_endpoint_test_bar_writel(test, barno, j, 0xA0A0A0A0);
287 
288 	for (j = 0; j < size; j += 4) {
289 		val = pci_endpoint_test_bar_readl(test, barno, j);
290 		if (val != 0xA0A0A0A0)
291 			return false;
292 	}
293 
294 	return true;
295 }
296 
pci_endpoint_test_legacy_irq(struct pci_endpoint_test * test)297 static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test)
298 {
299 	u32 val;
300 
301 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
302 				 IRQ_TYPE_LEGACY);
303 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
304 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
305 				 COMMAND_RAISE_LEGACY_IRQ);
306 	val = wait_for_completion_timeout(&test->irq_raised,
307 					  msecs_to_jiffies(1000));
308 	if (!val)
309 		return false;
310 
311 	return true;
312 }
313 
pci_endpoint_test_msi_irq(struct pci_endpoint_test * test,u16 msi_num,bool msix)314 static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
315 				       u16 msi_num, bool msix)
316 {
317 	u32 val;
318 	struct pci_dev *pdev = test->pdev;
319 
320 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
321 				 msix == false ? IRQ_TYPE_MSI :
322 				 IRQ_TYPE_MSIX);
323 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
324 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
325 				 msix == false ? COMMAND_RAISE_MSI_IRQ :
326 				 COMMAND_RAISE_MSIX_IRQ);
327 	val = wait_for_completion_timeout(&test->irq_raised,
328 					  msecs_to_jiffies(1000));
329 	if (!val)
330 		return false;
331 
332 	if (pci_irq_vector(pdev, msi_num - 1) == test->last_irq)
333 		return true;
334 
335 	return false;
336 }
337 
pci_endpoint_test_validate_xfer_params(struct device * dev,struct pci_endpoint_test_xfer_param * param,size_t alignment)338 static int pci_endpoint_test_validate_xfer_params(struct device *dev,
339 		struct pci_endpoint_test_xfer_param *param, size_t alignment)
340 {
341 	if (!param->size) {
342 		dev_dbg(dev, "Data size is zero\n");
343 		return -EINVAL;
344 	}
345 
346 	if (param->size > SIZE_MAX - alignment) {
347 		dev_dbg(dev, "Maximum transfer data size exceeded\n");
348 		return -EINVAL;
349 	}
350 
351 	return 0;
352 }
353 
pci_endpoint_test_copy(struct pci_endpoint_test * test,unsigned long arg)354 static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
355 				   unsigned long arg)
356 {
357 	struct pci_endpoint_test_xfer_param param;
358 	bool ret = false;
359 	void *src_addr;
360 	void *dst_addr;
361 	u32 flags = 0;
362 	bool use_dma;
363 	size_t size;
364 	dma_addr_t src_phys_addr;
365 	dma_addr_t dst_phys_addr;
366 	struct pci_dev *pdev = test->pdev;
367 	struct device *dev = &pdev->dev;
368 	void *orig_src_addr;
369 	dma_addr_t orig_src_phys_addr;
370 	void *orig_dst_addr;
371 	dma_addr_t orig_dst_phys_addr;
372 	size_t offset;
373 	size_t alignment = test->alignment;
374 	int irq_type = test->irq_type;
375 	u32 src_crc32;
376 	u32 dst_crc32;
377 	int err;
378 
379 	err = copy_from_user(&param, (void __user *)arg, sizeof(param));
380 	if (err) {
381 		dev_err(dev, "Failed to get transfer param\n");
382 		return false;
383 	}
384 
385 	err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
386 	if (err)
387 		return false;
388 
389 	size = param.size;
390 
391 	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
392 	if (use_dma)
393 		flags |= FLAG_USE_DMA;
394 
395 	if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
396 		dev_err(dev, "Invalid IRQ type option\n");
397 		goto err;
398 	}
399 
400 	orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
401 	if (!orig_src_addr) {
402 		dev_err(dev, "Failed to allocate source buffer\n");
403 		ret = false;
404 		goto err;
405 	}
406 
407 	get_random_bytes(orig_src_addr, size + alignment);
408 	orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
409 					    size + alignment, DMA_TO_DEVICE);
410 	if (dma_mapping_error(dev, orig_src_phys_addr)) {
411 		dev_err(dev, "failed to map source buffer address\n");
412 		ret = false;
413 		goto err_src_phys_addr;
414 	}
415 
416 	if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
417 		src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
418 		offset = src_phys_addr - orig_src_phys_addr;
419 		src_addr = orig_src_addr + offset;
420 	} else {
421 		src_phys_addr = orig_src_phys_addr;
422 		src_addr = orig_src_addr;
423 	}
424 
425 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
426 				 lower_32_bits(src_phys_addr));
427 
428 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
429 				 upper_32_bits(src_phys_addr));
430 
431 	src_crc32 = crc32_le(~0, src_addr, size);
432 
433 	orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
434 	if (!orig_dst_addr) {
435 		dev_err(dev, "Failed to allocate destination address\n");
436 		ret = false;
437 		goto err_dst_addr;
438 	}
439 
440 	orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
441 					    size + alignment, DMA_FROM_DEVICE);
442 	if (dma_mapping_error(dev, orig_dst_phys_addr)) {
443 		dev_err(dev, "failed to map destination buffer address\n");
444 		ret = false;
445 		goto err_dst_phys_addr;
446 	}
447 
448 	if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
449 		dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
450 		offset = dst_phys_addr - orig_dst_phys_addr;
451 		dst_addr = orig_dst_addr + offset;
452 	} else {
453 		dst_phys_addr = orig_dst_phys_addr;
454 		dst_addr = orig_dst_addr;
455 	}
456 
457 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
458 				 lower_32_bits(dst_phys_addr));
459 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
460 				 upper_32_bits(dst_phys_addr));
461 
462 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
463 				 size);
464 
465 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
466 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
467 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
468 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
469 				 COMMAND_COPY);
470 
471 	wait_for_completion(&test->irq_raised);
472 
473 	dma_unmap_single(dev, orig_dst_phys_addr, size + alignment,
474 			 DMA_FROM_DEVICE);
475 
476 	dst_crc32 = crc32_le(~0, dst_addr, size);
477 	if (dst_crc32 == src_crc32)
478 		ret = true;
479 
480 err_dst_phys_addr:
481 	kfree(orig_dst_addr);
482 
483 err_dst_addr:
484 	dma_unmap_single(dev, orig_src_phys_addr, size + alignment,
485 			 DMA_TO_DEVICE);
486 
487 err_src_phys_addr:
488 	kfree(orig_src_addr);
489 
490 err:
491 	return ret;
492 }
493 
pci_endpoint_test_write(struct pci_endpoint_test * test,unsigned long arg)494 static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
495 				    unsigned long arg)
496 {
497 	struct pci_endpoint_test_xfer_param param;
498 	bool ret = false;
499 	u32 flags = 0;
500 	bool use_dma;
501 	u32 reg;
502 	void *addr;
503 	dma_addr_t phys_addr;
504 	struct pci_dev *pdev = test->pdev;
505 	struct device *dev = &pdev->dev;
506 	void *orig_addr;
507 	dma_addr_t orig_phys_addr;
508 	size_t offset;
509 	size_t alignment = test->alignment;
510 	int irq_type = test->irq_type;
511 	size_t size;
512 	u32 crc32;
513 	int err;
514 
515 	err = copy_from_user(&param, (void __user *)arg, sizeof(param));
516 	if (err != 0) {
517 		dev_err(dev, "Failed to get transfer param\n");
518 		return false;
519 	}
520 
521 	err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
522 	if (err)
523 		return false;
524 
525 	size = param.size;
526 
527 	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
528 	if (use_dma)
529 		flags |= FLAG_USE_DMA;
530 
531 	if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
532 		dev_err(dev, "Invalid IRQ type option\n");
533 		goto err;
534 	}
535 
536 	orig_addr = kzalloc(size + alignment, GFP_KERNEL);
537 	if (!orig_addr) {
538 		dev_err(dev, "Failed to allocate address\n");
539 		ret = false;
540 		goto err;
541 	}
542 
543 	get_random_bytes(orig_addr, size + alignment);
544 
545 	orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
546 					DMA_TO_DEVICE);
547 	if (dma_mapping_error(dev, orig_phys_addr)) {
548 		dev_err(dev, "failed to map source buffer address\n");
549 		ret = false;
550 		goto err_phys_addr;
551 	}
552 
553 	if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
554 		phys_addr =  PTR_ALIGN(orig_phys_addr, alignment);
555 		offset = phys_addr - orig_phys_addr;
556 		addr = orig_addr + offset;
557 	} else {
558 		phys_addr = orig_phys_addr;
559 		addr = orig_addr;
560 	}
561 
562 	crc32 = crc32_le(~0, addr, size);
563 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
564 				 crc32);
565 
566 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
567 				 lower_32_bits(phys_addr));
568 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
569 				 upper_32_bits(phys_addr));
570 
571 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
572 
573 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
574 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
575 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
576 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
577 				 COMMAND_READ);
578 
579 	wait_for_completion(&test->irq_raised);
580 
581 	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
582 	if (reg & STATUS_READ_SUCCESS)
583 		ret = true;
584 
585 	dma_unmap_single(dev, orig_phys_addr, size + alignment,
586 			 DMA_TO_DEVICE);
587 
588 err_phys_addr:
589 	kfree(orig_addr);
590 
591 err:
592 	return ret;
593 }
594 
pci_endpoint_test_read(struct pci_endpoint_test * test,unsigned long arg)595 static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
596 				   unsigned long arg)
597 {
598 	struct pci_endpoint_test_xfer_param param;
599 	bool ret = false;
600 	u32 flags = 0;
601 	bool use_dma;
602 	size_t size;
603 	void *addr;
604 	dma_addr_t phys_addr;
605 	struct pci_dev *pdev = test->pdev;
606 	struct device *dev = &pdev->dev;
607 	void *orig_addr;
608 	dma_addr_t orig_phys_addr;
609 	size_t offset;
610 	size_t alignment = test->alignment;
611 	int irq_type = test->irq_type;
612 	u32 crc32;
613 	int err;
614 
615 	err = copy_from_user(&param, (void __user *)arg, sizeof(param));
616 	if (err) {
617 		dev_err(dev, "Failed to get transfer param\n");
618 		return false;
619 	}
620 
621 	err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
622 	if (err)
623 		return false;
624 
625 	size = param.size;
626 
627 	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
628 	if (use_dma)
629 		flags |= FLAG_USE_DMA;
630 
631 	if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
632 		dev_err(dev, "Invalid IRQ type option\n");
633 		goto err;
634 	}
635 
636 	orig_addr = kzalloc(size + alignment, GFP_KERNEL);
637 	if (!orig_addr) {
638 		dev_err(dev, "Failed to allocate destination address\n");
639 		ret = false;
640 		goto err;
641 	}
642 
643 	orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
644 					DMA_FROM_DEVICE);
645 	if (dma_mapping_error(dev, orig_phys_addr)) {
646 		dev_err(dev, "failed to map source buffer address\n");
647 		ret = false;
648 		goto err_phys_addr;
649 	}
650 
651 	if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
652 		phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
653 		offset = phys_addr - orig_phys_addr;
654 		addr = orig_addr + offset;
655 	} else {
656 		phys_addr = orig_phys_addr;
657 		addr = orig_addr;
658 	}
659 
660 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
661 				 lower_32_bits(phys_addr));
662 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
663 				 upper_32_bits(phys_addr));
664 
665 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
666 
667 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
668 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
669 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
670 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
671 				 COMMAND_WRITE);
672 
673 	wait_for_completion(&test->irq_raised);
674 
675 	dma_unmap_single(dev, orig_phys_addr, size + alignment,
676 			 DMA_FROM_DEVICE);
677 
678 	crc32 = crc32_le(~0, addr, size);
679 	if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
680 		ret = true;
681 
682 err_phys_addr:
683 	kfree(orig_addr);
684 err:
685 	return ret;
686 }
687 
pci_endpoint_test_clear_irq(struct pci_endpoint_test * test)688 static bool pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
689 {
690 	pci_endpoint_test_release_irq(test);
691 	pci_endpoint_test_free_irq_vectors(test);
692 	return true;
693 }
694 
pci_endpoint_test_set_irq(struct pci_endpoint_test * test,int req_irq_type)695 static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
696 				      int req_irq_type)
697 {
698 	struct pci_dev *pdev = test->pdev;
699 	struct device *dev = &pdev->dev;
700 
701 	if (req_irq_type < IRQ_TYPE_LEGACY || req_irq_type > IRQ_TYPE_MSIX) {
702 		dev_err(dev, "Invalid IRQ type option\n");
703 		return false;
704 	}
705 
706 	if (test->irq_type == req_irq_type)
707 		return true;
708 
709 	pci_endpoint_test_release_irq(test);
710 	pci_endpoint_test_free_irq_vectors(test);
711 
712 	if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
713 		goto err;
714 
715 	if (!pci_endpoint_test_request_irq(test))
716 		goto err;
717 
718 	return true;
719 
720 err:
721 	pci_endpoint_test_free_irq_vectors(test);
722 	return false;
723 }
724 
pci_endpoint_test_ioctl(struct file * file,unsigned int cmd,unsigned long arg)725 static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
726 				    unsigned long arg)
727 {
728 	int ret = -EINVAL;
729 	enum pci_barno bar;
730 	struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
731 	struct pci_dev *pdev = test->pdev;
732 
733 	mutex_lock(&test->mutex);
734 
735 	reinit_completion(&test->irq_raised);
736 	test->last_irq = -ENODATA;
737 
738 	switch (cmd) {
739 	case PCITEST_BAR:
740 		bar = arg;
741 		if (bar < 0 || bar > 5)
742 			goto ret;
743 		if (is_am654_pci_dev(pdev) && bar == BAR_0)
744 			goto ret;
745 		ret = pci_endpoint_test_bar(test, bar);
746 		break;
747 	case PCITEST_LEGACY_IRQ:
748 		ret = pci_endpoint_test_legacy_irq(test);
749 		break;
750 	case PCITEST_MSI:
751 	case PCITEST_MSIX:
752 		ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
753 		break;
754 	case PCITEST_WRITE:
755 		ret = pci_endpoint_test_write(test, arg);
756 		break;
757 	case PCITEST_READ:
758 		ret = pci_endpoint_test_read(test, arg);
759 		break;
760 	case PCITEST_COPY:
761 		ret = pci_endpoint_test_copy(test, arg);
762 		break;
763 	case PCITEST_SET_IRQTYPE:
764 		ret = pci_endpoint_test_set_irq(test, arg);
765 		break;
766 	case PCITEST_GET_IRQTYPE:
767 		ret = irq_type;
768 		break;
769 	case PCITEST_CLEAR_IRQ:
770 		ret = pci_endpoint_test_clear_irq(test);
771 		break;
772 	}
773 
774 ret:
775 	mutex_unlock(&test->mutex);
776 	return ret;
777 }
778 
779 static const struct file_operations pci_endpoint_test_fops = {
780 	.owner = THIS_MODULE,
781 	.unlocked_ioctl = pci_endpoint_test_ioctl,
782 };
783 
pci_endpoint_test_probe(struct pci_dev * pdev,const struct pci_device_id * ent)784 static int pci_endpoint_test_probe(struct pci_dev *pdev,
785 				   const struct pci_device_id *ent)
786 {
787 	int err;
788 	int id;
789 	char name[24];
790 	enum pci_barno bar;
791 	void __iomem *base;
792 	struct device *dev = &pdev->dev;
793 	struct pci_endpoint_test *test;
794 	struct pci_endpoint_test_data *data;
795 	enum pci_barno test_reg_bar = BAR_0;
796 	struct miscdevice *misc_device;
797 
798 	if (pci_is_bridge(pdev))
799 		return -ENODEV;
800 
801 	test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
802 	if (!test)
803 		return -ENOMEM;
804 
805 	test->test_reg_bar = 0;
806 	test->alignment = 0;
807 	test->pdev = pdev;
808 	test->irq_type = IRQ_TYPE_UNDEFINED;
809 
810 	if (no_msi)
811 		irq_type = IRQ_TYPE_LEGACY;
812 
813 	data = (struct pci_endpoint_test_data *)ent->driver_data;
814 	if (data) {
815 		test_reg_bar = data->test_reg_bar;
816 		test->test_reg_bar = test_reg_bar;
817 		test->alignment = data->alignment;
818 		irq_type = data->irq_type;
819 	}
820 
821 	init_completion(&test->irq_raised);
822 	mutex_init(&test->mutex);
823 
824 	if ((dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)) != 0) &&
825 	    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
826 		dev_err(dev, "Cannot set DMA mask\n");
827 		return -EINVAL;
828 	}
829 
830 	err = pci_enable_device(pdev);
831 	if (err) {
832 		dev_err(dev, "Cannot enable PCI device\n");
833 		return err;
834 	}
835 
836 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
837 	if (err) {
838 		dev_err(dev, "Cannot obtain PCI resources\n");
839 		goto err_disable_pdev;
840 	}
841 
842 	pci_set_master(pdev);
843 
844 	if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) {
845 		err = -EINVAL;
846 		goto err_disable_irq;
847 	}
848 
849 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
850 		if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
851 			base = pci_ioremap_bar(pdev, bar);
852 			if (!base) {
853 				dev_err(dev, "Failed to read BAR%d\n", bar);
854 				WARN_ON(bar == test_reg_bar);
855 			}
856 			test->bar[bar] = base;
857 		}
858 	}
859 
860 	test->base = test->bar[test_reg_bar];
861 	if (!test->base) {
862 		err = -ENOMEM;
863 		dev_err(dev, "Cannot perform PCI test without BAR%d\n",
864 			test_reg_bar);
865 		goto err_iounmap;
866 	}
867 
868 	pci_set_drvdata(pdev, test);
869 
870 	id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
871 	if (id < 0) {
872 		err = id;
873 		dev_err(dev, "Unable to get id\n");
874 		goto err_iounmap;
875 	}
876 
877 	snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
878 	test->name = kstrdup(name, GFP_KERNEL);
879 	if (!test->name) {
880 		err = -ENOMEM;
881 		goto err_ida_remove;
882 	}
883 
884 	if (!pci_endpoint_test_request_irq(test)) {
885 		err = -EINVAL;
886 		goto err_kfree_test_name;
887 	}
888 
889 	misc_device = &test->miscdev;
890 	misc_device->minor = MISC_DYNAMIC_MINOR;
891 	misc_device->name = kstrdup(name, GFP_KERNEL);
892 	if (!misc_device->name) {
893 		err = -ENOMEM;
894 		goto err_release_irq;
895 	}
896 	misc_device->fops = &pci_endpoint_test_fops,
897 
898 	err = misc_register(misc_device);
899 	if (err) {
900 		dev_err(dev, "Failed to register device\n");
901 		goto err_kfree_name;
902 	}
903 
904 	return 0;
905 
906 err_kfree_name:
907 	kfree(misc_device->name);
908 
909 err_release_irq:
910 	pci_endpoint_test_release_irq(test);
911 
912 err_kfree_test_name:
913 	kfree(test->name);
914 
915 err_ida_remove:
916 	ida_simple_remove(&pci_endpoint_test_ida, id);
917 
918 err_iounmap:
919 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
920 		if (test->bar[bar])
921 			pci_iounmap(pdev, test->bar[bar]);
922 	}
923 
924 err_disable_irq:
925 	pci_endpoint_test_free_irq_vectors(test);
926 	pci_release_regions(pdev);
927 
928 err_disable_pdev:
929 	pci_disable_device(pdev);
930 
931 	return err;
932 }
933 
pci_endpoint_test_remove(struct pci_dev * pdev)934 static void pci_endpoint_test_remove(struct pci_dev *pdev)
935 {
936 	int id;
937 	enum pci_barno bar;
938 	struct pci_endpoint_test *test = pci_get_drvdata(pdev);
939 	struct miscdevice *misc_device = &test->miscdev;
940 
941 	if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
942 		return;
943 	if (id < 0)
944 		return;
945 
946 	pci_endpoint_test_release_irq(test);
947 	pci_endpoint_test_free_irq_vectors(test);
948 
949 	misc_deregister(&test->miscdev);
950 	kfree(misc_device->name);
951 	kfree(test->name);
952 	ida_simple_remove(&pci_endpoint_test_ida, id);
953 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
954 		if (test->bar[bar])
955 			pci_iounmap(pdev, test->bar[bar]);
956 	}
957 
958 	pci_release_regions(pdev);
959 	pci_disable_device(pdev);
960 }
961 
962 static const struct pci_endpoint_test_data default_data = {
963 	.test_reg_bar = BAR_0,
964 	.alignment = SZ_4K,
965 	.irq_type = IRQ_TYPE_MSI,
966 };
967 
968 static const struct pci_endpoint_test_data am654_data = {
969 	.test_reg_bar = BAR_2,
970 	.alignment = SZ_64K,
971 	.irq_type = IRQ_TYPE_MSI,
972 };
973 
974 static const struct pci_endpoint_test_data j721e_data = {
975 	.alignment = 256,
976 	.irq_type = IRQ_TYPE_MSI,
977 };
978 
979 static const struct pci_device_id pci_endpoint_test_tbl[] = {
980 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
981 	  .driver_data = (kernel_ulong_t)&default_data,
982 	},
983 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
984 	  .driver_data = (kernel_ulong_t)&default_data,
985 	},
986 	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0),
987 	  .driver_data = (kernel_ulong_t)&default_data,
988 	},
989 	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_LS1088A),
990 	  .driver_data = (kernel_ulong_t)&default_data,
991 	},
992 	{ PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
993 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
994 	  .driver_data = (kernel_ulong_t)&am654_data
995 	},
996 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774A1),},
997 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
998 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
999 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
1000 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A779F0),
1001 	  .driver_data = (kernel_ulong_t)&default_data,
1002 	},
1003 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
1004 	  .driver_data = (kernel_ulong_t)&j721e_data,
1005 	},
1006 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J7200),
1007 	  .driver_data = (kernel_ulong_t)&j721e_data,
1008 	},
1009 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
1010 	  .driver_data = (kernel_ulong_t)&j721e_data,
1011 	},
1012 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721S2),
1013 	  .driver_data = (kernel_ulong_t)&j721e_data,
1014 	},
1015 	{ }
1016 };
1017 MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
1018 
1019 static struct pci_driver pci_endpoint_test_driver = {
1020 	.name		= DRV_MODULE_NAME,
1021 	.id_table	= pci_endpoint_test_tbl,
1022 	.probe		= pci_endpoint_test_probe,
1023 	.remove		= pci_endpoint_test_remove,
1024 };
1025 module_pci_driver(pci_endpoint_test_driver);
1026 
1027 MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
1028 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1029 MODULE_LICENSE("GPL v2");
1030