• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Host side test driver to test endpoint functionality
3  *
4  * Copyright (C) 2017 Texas Instruments
5  * Author: Kishon Vijay Abraham I <kishon@ti.com>
6  *
7  * This program is free software: you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 of
9  * the License as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include <linux/crc32.h>
21 #include <linux/delay.h>
22 #include <linux/fs.h>
23 #include <linux/io.h>
24 #include <linux/interrupt.h>
25 #include <linux/irq.h>
26 #include <linux/miscdevice.h>
27 #include <linux/module.h>
28 #include <linux/mutex.h>
29 #include <linux/random.h>
30 #include <linux/slab.h>
31 #include <linux/pci.h>
32 #include <linux/pci_ids.h>
33 
34 #include <linux/pci_regs.h>
35 
36 #include <uapi/linux/pcitest.h>
37 
38 #define DRV_MODULE_NAME			"pci-endpoint-test"
39 
40 #define PCI_ENDPOINT_TEST_MAGIC		0x0
41 
42 #define PCI_ENDPOINT_TEST_COMMAND	0x4
43 #define COMMAND_RAISE_LEGACY_IRQ	BIT(0)
44 #define COMMAND_RAISE_MSI_IRQ		BIT(1)
45 #define MSI_NUMBER_SHIFT		2
46 /* 6 bits for MSI number */
47 #define COMMAND_READ                    BIT(8)
48 #define COMMAND_WRITE                   BIT(9)
49 #define COMMAND_COPY                    BIT(10)
50 
51 #define PCI_ENDPOINT_TEST_STATUS	0x8
52 #define STATUS_READ_SUCCESS             BIT(0)
53 #define STATUS_READ_FAIL                BIT(1)
54 #define STATUS_WRITE_SUCCESS            BIT(2)
55 #define STATUS_WRITE_FAIL               BIT(3)
56 #define STATUS_COPY_SUCCESS             BIT(4)
57 #define STATUS_COPY_FAIL                BIT(5)
58 #define STATUS_IRQ_RAISED               BIT(6)
59 #define STATUS_SRC_ADDR_INVALID         BIT(7)
60 #define STATUS_DST_ADDR_INVALID         BIT(8)
61 
62 #define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR	0xc
63 #define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR	0x10
64 
65 #define PCI_ENDPOINT_TEST_LOWER_DST_ADDR	0x14
66 #define PCI_ENDPOINT_TEST_UPPER_DST_ADDR	0x18
67 
68 #define PCI_ENDPOINT_TEST_SIZE		0x1c
69 #define PCI_ENDPOINT_TEST_CHECKSUM	0x20
70 
71 static DEFINE_IDA(pci_endpoint_test_ida);
72 
73 #define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
74 					    miscdev)
75 
76 static bool no_msi;
77 module_param(no_msi, bool, 0444);
78 MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
79 
80 enum pci_barno {
81 	BAR_0,
82 	BAR_1,
83 	BAR_2,
84 	BAR_3,
85 	BAR_4,
86 	BAR_5,
87 };
88 
89 struct pci_endpoint_test {
90 	struct pci_dev	*pdev;
91 	void __iomem	*base;
92 	void __iomem	*bar[6];
93 	struct completion irq_raised;
94 	int		last_irq;
95 	int		num_irqs;
96 	/* mutex to protect the ioctls */
97 	struct mutex	mutex;
98 	struct miscdevice miscdev;
99 	enum pci_barno test_reg_bar;
100 	size_t alignment;
101 };
102 
103 struct pci_endpoint_test_data {
104 	enum pci_barno test_reg_bar;
105 	size_t alignment;
106 	bool no_msi;
107 };
108 
pci_endpoint_test_readl(struct pci_endpoint_test * test,u32 offset)109 static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
110 					  u32 offset)
111 {
112 	return readl(test->base + offset);
113 }
114 
pci_endpoint_test_writel(struct pci_endpoint_test * test,u32 offset,u32 value)115 static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
116 					    u32 offset, u32 value)
117 {
118 	writel(value, test->base + offset);
119 }
120 
pci_endpoint_test_bar_readl(struct pci_endpoint_test * test,int bar,int offset)121 static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test,
122 					      int bar, int offset)
123 {
124 	return readl(test->bar[bar] + offset);
125 }
126 
pci_endpoint_test_bar_writel(struct pci_endpoint_test * test,int bar,u32 offset,u32 value)127 static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test,
128 						int bar, u32 offset, u32 value)
129 {
130 	writel(value, test->bar[bar] + offset);
131 }
132 
pci_endpoint_test_irqhandler(int irq,void * dev_id)133 static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
134 {
135 	struct pci_endpoint_test *test = dev_id;
136 	u32 reg;
137 
138 	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
139 	if (reg & STATUS_IRQ_RAISED) {
140 		test->last_irq = irq;
141 		complete(&test->irq_raised);
142 		reg &= ~STATUS_IRQ_RAISED;
143 	}
144 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS,
145 				 reg);
146 
147 	return IRQ_HANDLED;
148 }
149 
pci_endpoint_test_bar(struct pci_endpoint_test * test,enum pci_barno barno)150 static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
151 				  enum pci_barno barno)
152 {
153 	int j;
154 	u32 val;
155 	int size;
156 	struct pci_dev *pdev = test->pdev;
157 
158 	if (!test->bar[barno])
159 		return false;
160 
161 	size = pci_resource_len(pdev, barno);
162 
163 	if (barno == test->test_reg_bar)
164 		size = 0x4;
165 
166 	for (j = 0; j < size; j += 4)
167 		pci_endpoint_test_bar_writel(test, barno, j, 0xA0A0A0A0);
168 
169 	for (j = 0; j < size; j += 4) {
170 		val = pci_endpoint_test_bar_readl(test, barno, j);
171 		if (val != 0xA0A0A0A0)
172 			return false;
173 	}
174 
175 	return true;
176 }
177 
pci_endpoint_test_legacy_irq(struct pci_endpoint_test * test)178 static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test)
179 {
180 	u32 val;
181 
182 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
183 				 COMMAND_RAISE_LEGACY_IRQ);
184 	val = wait_for_completion_timeout(&test->irq_raised,
185 					  msecs_to_jiffies(1000));
186 	if (!val)
187 		return false;
188 
189 	return true;
190 }
191 
pci_endpoint_test_msi_irq(struct pci_endpoint_test * test,u8 msi_num)192 static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
193 				      u8 msi_num)
194 {
195 	u32 val;
196 	struct pci_dev *pdev = test->pdev;
197 
198 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
199 				 msi_num << MSI_NUMBER_SHIFT |
200 				 COMMAND_RAISE_MSI_IRQ);
201 	val = wait_for_completion_timeout(&test->irq_raised,
202 					  msecs_to_jiffies(1000));
203 	if (!val)
204 		return false;
205 
206 	if (test->last_irq - pdev->irq == msi_num - 1)
207 		return true;
208 
209 	return false;
210 }
211 
pci_endpoint_test_copy(struct pci_endpoint_test * test,size_t size)212 static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size)
213 {
214 	bool ret = false;
215 	void *src_addr;
216 	void *dst_addr;
217 	dma_addr_t src_phys_addr;
218 	dma_addr_t dst_phys_addr;
219 	struct pci_dev *pdev = test->pdev;
220 	struct device *dev = &pdev->dev;
221 	void *orig_src_addr;
222 	dma_addr_t orig_src_phys_addr;
223 	void *orig_dst_addr;
224 	dma_addr_t orig_dst_phys_addr;
225 	size_t offset;
226 	size_t alignment = test->alignment;
227 	u32 src_crc32;
228 	u32 dst_crc32;
229 
230 	if (size > SIZE_MAX - alignment)
231 		goto err;
232 
233 	orig_src_addr = dma_alloc_coherent(dev, size + alignment,
234 					   &orig_src_phys_addr, GFP_KERNEL);
235 	if (!orig_src_addr) {
236 		dev_err(dev, "failed to allocate source buffer\n");
237 		ret = false;
238 		goto err;
239 	}
240 
241 	if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
242 		src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
243 		offset = src_phys_addr - orig_src_phys_addr;
244 		src_addr = orig_src_addr + offset;
245 	} else {
246 		src_phys_addr = orig_src_phys_addr;
247 		src_addr = orig_src_addr;
248 	}
249 
250 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
251 				 lower_32_bits(src_phys_addr));
252 
253 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
254 				 upper_32_bits(src_phys_addr));
255 
256 	get_random_bytes(src_addr, size);
257 	src_crc32 = crc32_le(~0, src_addr, size);
258 
259 	orig_dst_addr = dma_alloc_coherent(dev, size + alignment,
260 					   &orig_dst_phys_addr, GFP_KERNEL);
261 	if (!orig_dst_addr) {
262 		dev_err(dev, "failed to allocate destination address\n");
263 		ret = false;
264 		goto err_orig_src_addr;
265 	}
266 
267 	if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
268 		dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
269 		offset = dst_phys_addr - orig_dst_phys_addr;
270 		dst_addr = orig_dst_addr + offset;
271 	} else {
272 		dst_phys_addr = orig_dst_phys_addr;
273 		dst_addr = orig_dst_addr;
274 	}
275 
276 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
277 				 lower_32_bits(dst_phys_addr));
278 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
279 				 upper_32_bits(dst_phys_addr));
280 
281 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
282 				 size);
283 
284 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
285 				 1 << MSI_NUMBER_SHIFT | COMMAND_COPY);
286 
287 	wait_for_completion(&test->irq_raised);
288 
289 	dst_crc32 = crc32_le(~0, dst_addr, size);
290 	if (dst_crc32 == src_crc32)
291 		ret = true;
292 
293 	dma_free_coherent(dev, size + alignment, orig_dst_addr,
294 			  orig_dst_phys_addr);
295 
296 err_orig_src_addr:
297 	dma_free_coherent(dev, size + alignment, orig_src_addr,
298 			  orig_src_phys_addr);
299 
300 err:
301 	return ret;
302 }
303 
pci_endpoint_test_write(struct pci_endpoint_test * test,size_t size)304 static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size)
305 {
306 	bool ret = false;
307 	u32 reg;
308 	void *addr;
309 	dma_addr_t phys_addr;
310 	struct pci_dev *pdev = test->pdev;
311 	struct device *dev = &pdev->dev;
312 	void *orig_addr;
313 	dma_addr_t orig_phys_addr;
314 	size_t offset;
315 	size_t alignment = test->alignment;
316 	u32 crc32;
317 
318 	if (size > SIZE_MAX - alignment)
319 		goto err;
320 
321 	orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
322 				       GFP_KERNEL);
323 	if (!orig_addr) {
324 		dev_err(dev, "failed to allocate address\n");
325 		ret = false;
326 		goto err;
327 	}
328 
329 	if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
330 		phys_addr =  PTR_ALIGN(orig_phys_addr, alignment);
331 		offset = phys_addr - orig_phys_addr;
332 		addr = orig_addr + offset;
333 	} else {
334 		phys_addr = orig_phys_addr;
335 		addr = orig_addr;
336 	}
337 
338 	get_random_bytes(addr, size);
339 
340 	crc32 = crc32_le(~0, addr, size);
341 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
342 				 crc32);
343 
344 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
345 				 lower_32_bits(phys_addr));
346 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
347 				 upper_32_bits(phys_addr));
348 
349 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
350 
351 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
352 				 1 << MSI_NUMBER_SHIFT | COMMAND_READ);
353 
354 	wait_for_completion(&test->irq_raised);
355 
356 	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
357 	if (reg & STATUS_READ_SUCCESS)
358 		ret = true;
359 
360 	dma_free_coherent(dev, size + alignment, orig_addr, orig_phys_addr);
361 
362 err:
363 	return ret;
364 }
365 
pci_endpoint_test_read(struct pci_endpoint_test * test,size_t size)366 static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size)
367 {
368 	bool ret = false;
369 	void *addr;
370 	dma_addr_t phys_addr;
371 	struct pci_dev *pdev = test->pdev;
372 	struct device *dev = &pdev->dev;
373 	void *orig_addr;
374 	dma_addr_t orig_phys_addr;
375 	size_t offset;
376 	size_t alignment = test->alignment;
377 	u32 crc32;
378 
379 	if (size > SIZE_MAX - alignment)
380 		goto err;
381 
382 	orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr,
383 				       GFP_KERNEL);
384 	if (!orig_addr) {
385 		dev_err(dev, "failed to allocate destination address\n");
386 		ret = false;
387 		goto err;
388 	}
389 
390 	if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
391 		phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
392 		offset = phys_addr - orig_phys_addr;
393 		addr = orig_addr + offset;
394 	} else {
395 		phys_addr = orig_phys_addr;
396 		addr = orig_addr;
397 	}
398 
399 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
400 				 lower_32_bits(phys_addr));
401 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
402 				 upper_32_bits(phys_addr));
403 
404 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
405 
406 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
407 				 1 << MSI_NUMBER_SHIFT | COMMAND_WRITE);
408 
409 	wait_for_completion(&test->irq_raised);
410 
411 	crc32 = crc32_le(~0, addr, size);
412 	if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
413 		ret = true;
414 
415 	dma_free_coherent(dev, size + alignment, orig_addr, orig_phys_addr);
416 err:
417 	return ret;
418 }
419 
pci_endpoint_test_ioctl(struct file * file,unsigned int cmd,unsigned long arg)420 static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
421 				    unsigned long arg)
422 {
423 	int ret = -EINVAL;
424 	enum pci_barno bar;
425 	struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
426 
427 	mutex_lock(&test->mutex);
428 	switch (cmd) {
429 	case PCITEST_BAR:
430 		bar = arg;
431 		if (bar < 0 || bar > 5)
432 			goto ret;
433 		ret = pci_endpoint_test_bar(test, bar);
434 		break;
435 	case PCITEST_LEGACY_IRQ:
436 		ret = pci_endpoint_test_legacy_irq(test);
437 		break;
438 	case PCITEST_MSI:
439 		ret = pci_endpoint_test_msi_irq(test, arg);
440 		break;
441 	case PCITEST_WRITE:
442 		ret = pci_endpoint_test_write(test, arg);
443 		break;
444 	case PCITEST_READ:
445 		ret = pci_endpoint_test_read(test, arg);
446 		break;
447 	case PCITEST_COPY:
448 		ret = pci_endpoint_test_copy(test, arg);
449 		break;
450 	}
451 
452 ret:
453 	mutex_unlock(&test->mutex);
454 	return ret;
455 }
456 
457 static const struct file_operations pci_endpoint_test_fops = {
458 	.owner = THIS_MODULE,
459 	.unlocked_ioctl = pci_endpoint_test_ioctl,
460 };
461 
pci_endpoint_test_probe(struct pci_dev * pdev,const struct pci_device_id * ent)462 static int pci_endpoint_test_probe(struct pci_dev *pdev,
463 				   const struct pci_device_id *ent)
464 {
465 	int i;
466 	int err;
467 	int irq = 0;
468 	int id;
469 	char name[20];
470 	enum pci_barno bar;
471 	void __iomem *base;
472 	struct device *dev = &pdev->dev;
473 	struct pci_endpoint_test *test;
474 	struct pci_endpoint_test_data *data;
475 	enum pci_barno test_reg_bar = BAR_0;
476 	struct miscdevice *misc_device;
477 
478 	if (pci_is_bridge(pdev))
479 		return -ENODEV;
480 
481 	test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
482 	if (!test)
483 		return -ENOMEM;
484 
485 	test->test_reg_bar = 0;
486 	test->alignment = 0;
487 	test->pdev = pdev;
488 
489 	data = (struct pci_endpoint_test_data *)ent->driver_data;
490 	if (data) {
491 		test_reg_bar = data->test_reg_bar;
492 		test->test_reg_bar = test_reg_bar;
493 		test->alignment = data->alignment;
494 		no_msi = data->no_msi;
495 	}
496 
497 	init_completion(&test->irq_raised);
498 	mutex_init(&test->mutex);
499 
500 	err = pci_enable_device(pdev);
501 	if (err) {
502 		dev_err(dev, "Cannot enable PCI device\n");
503 		return err;
504 	}
505 
506 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
507 	if (err) {
508 		dev_err(dev, "Cannot obtain PCI resources\n");
509 		goto err_disable_pdev;
510 	}
511 
512 	pci_set_master(pdev);
513 
514 	if (!no_msi) {
515 		irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
516 		if (irq < 0)
517 			dev_err(dev, "failed to get MSI interrupts\n");
518 		test->num_irqs = irq;
519 	}
520 
521 	err = devm_request_irq(dev, pdev->irq, pci_endpoint_test_irqhandler,
522 			       IRQF_SHARED, DRV_MODULE_NAME, test);
523 	if (err) {
524 		dev_err(dev, "failed to request IRQ %d\n", pdev->irq);
525 		goto err_disable_msi;
526 	}
527 
528 	for (i = 1; i < irq; i++) {
529 		err = devm_request_irq(dev, pdev->irq + i,
530 				       pci_endpoint_test_irqhandler,
531 				       IRQF_SHARED, DRV_MODULE_NAME, test);
532 		if (err)
533 			dev_err(dev, "failed to request IRQ %d for MSI %d\n",
534 				pdev->irq + i, i + 1);
535 	}
536 
537 	for (bar = BAR_0; bar <= BAR_5; bar++) {
538 		base = pci_ioremap_bar(pdev, bar);
539 		if (!base) {
540 			dev_err(dev, "failed to read BAR%d\n", bar);
541 			WARN_ON(bar == test_reg_bar);
542 		}
543 		test->bar[bar] = base;
544 	}
545 
546 	test->base = test->bar[test_reg_bar];
547 	if (!test->base) {
548 		err = -ENOMEM;
549 		dev_err(dev, "Cannot perform PCI test without BAR%d\n",
550 			test_reg_bar);
551 		goto err_iounmap;
552 	}
553 
554 	pci_set_drvdata(pdev, test);
555 
556 	id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
557 	if (id < 0) {
558 		err = id;
559 		dev_err(dev, "unable to get id\n");
560 		goto err_iounmap;
561 	}
562 
563 	snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
564 	misc_device = &test->miscdev;
565 	misc_device->minor = MISC_DYNAMIC_MINOR;
566 	misc_device->name = name;
567 	misc_device->fops = &pci_endpoint_test_fops,
568 
569 	err = misc_register(misc_device);
570 	if (err) {
571 		dev_err(dev, "failed to register device\n");
572 		goto err_ida_remove;
573 	}
574 
575 	return 0;
576 
577 err_ida_remove:
578 	ida_simple_remove(&pci_endpoint_test_ida, id);
579 
580 err_iounmap:
581 	for (bar = BAR_0; bar <= BAR_5; bar++) {
582 		if (test->bar[bar])
583 			pci_iounmap(pdev, test->bar[bar]);
584 	}
585 
586 	for (i = 0; i < irq; i++)
587 		devm_free_irq(dev, pdev->irq + i, test);
588 
589 err_disable_msi:
590 	pci_disable_msi(pdev);
591 	pci_release_regions(pdev);
592 
593 err_disable_pdev:
594 	pci_disable_device(pdev);
595 
596 	return err;
597 }
598 
pci_endpoint_test_remove(struct pci_dev * pdev)599 static void pci_endpoint_test_remove(struct pci_dev *pdev)
600 {
601 	int id;
602 	int i;
603 	enum pci_barno bar;
604 	struct pci_endpoint_test *test = pci_get_drvdata(pdev);
605 	struct miscdevice *misc_device = &test->miscdev;
606 
607 	if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
608 		return;
609 	if (id < 0)
610 		return;
611 
612 	misc_deregister(&test->miscdev);
613 	ida_simple_remove(&pci_endpoint_test_ida, id);
614 	for (bar = BAR_0; bar <= BAR_5; bar++) {
615 		if (test->bar[bar])
616 			pci_iounmap(pdev, test->bar[bar]);
617 	}
618 	for (i = 0; i < test->num_irqs; i++)
619 		devm_free_irq(&pdev->dev, pdev->irq + i, test);
620 	pci_disable_msi(pdev);
621 	pci_release_regions(pdev);
622 	pci_disable_device(pdev);
623 }
624 
625 static const struct pci_device_id pci_endpoint_test_tbl[] = {
626 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) },
627 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) },
628 	{ }
629 };
630 MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
631 
632 static struct pci_driver pci_endpoint_test_driver = {
633 	.name		= DRV_MODULE_NAME,
634 	.id_table	= pci_endpoint_test_tbl,
635 	.probe		= pci_endpoint_test_probe,
636 	.remove		= pci_endpoint_test_remove,
637 };
638 module_pci_driver(pci_endpoint_test_driver);
639 
640 MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
641 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
642 MODULE_LICENSE("GPL v2");
643