• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PCIe host controller driver for Texas Instruments Keystone SoCs
4  *
5  * Copyright (C) 2013-2014 Texas Instruments., Ltd.
6  *		http://www.ti.com
7  *
8  * Author: Murali Karicheri <m-karicheri2@ti.com>
9  * Implementation based on pci-exynos.c and pcie-designware.c
10  */
11 
12 #include <linux/irqchip/chained_irq.h>
13 #include <linux/clk.h>
14 #include <linux/delay.h>
15 #include <linux/interrupt.h>
16 #include <linux/irqdomain.h>
17 #include <linux/init.h>
18 #include <linux/msi.h>
19 #include <linux/of_irq.h>
20 #include <linux/of.h>
21 #include <linux/of_pci.h>
22 #include <linux/platform_device.h>
23 #include <linux/phy/phy.h>
24 #include <linux/resource.h>
25 #include <linux/signal.h>
26 
27 #include "pcie-designware.h"
28 #include "pci-keystone.h"
29 
30 #define DRIVER_NAME	"keystone-pcie"
31 
32 /* DEV_STAT_CTRL */
33 #define PCIE_CAP_BASE		0x70
34 
35 /* PCIE controller device IDs */
36 #define PCIE_RC_K2HK		0xb008
37 #define PCIE_RC_K2E		0xb009
38 #define PCIE_RC_K2L		0xb00a
39 #define PCIE_RC_K2G		0xb00b
40 
41 #define to_keystone_pcie(x)	dev_get_drvdata((x)->dev)
42 
quirk_limit_mrrs(struct pci_dev * dev)43 static void quirk_limit_mrrs(struct pci_dev *dev)
44 {
45 	struct pci_bus *bus = dev->bus;
46 	struct pci_dev *bridge = bus->self;
47 	static const struct pci_device_id rc_pci_devids[] = {
48 		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
49 		 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
50 		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E),
51 		 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
52 		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L),
53 		 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
54 		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G),
55 		 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
56 		{ 0, },
57 	};
58 
59 	if (pci_is_root_bus(bus))
60 		return;
61 
62 	/* look for the host bridge */
63 	while (!pci_is_root_bus(bus)) {
64 		bridge = bus->self;
65 		bus = bus->parent;
66 	}
67 
68 	if (bridge) {
69 		/*
70 		 * Keystone PCI controller has a h/w limitation of
71 		 * 256 bytes maximum read request size.  It can't handle
72 		 * anything higher than this.  So force this limit on
73 		 * all downstream devices.
74 		 */
75 		if (pci_match_id(rc_pci_devids, bridge)) {
76 			if (pcie_get_readrq(dev) > 256) {
77 				dev_info(&dev->dev, "limiting MRRS to 256\n");
78 				pcie_set_readrq(dev, 256);
79 			}
80 		}
81 	}
82 }
83 DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, quirk_limit_mrrs);
84 
ks_pcie_establish_link(struct keystone_pcie * ks_pcie)85 static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
86 {
87 	struct dw_pcie *pci = ks_pcie->pci;
88 	struct pcie_port *pp = &pci->pp;
89 	struct device *dev = pci->dev;
90 	unsigned int retries;
91 
92 	dw_pcie_setup_rc(pp);
93 
94 	if (dw_pcie_link_up(pci)) {
95 		dev_info(dev, "Link already up\n");
96 		return 0;
97 	}
98 
99 	/* check if the link is up or not */
100 	for (retries = 0; retries < 5; retries++) {
101 		ks_dw_pcie_initiate_link_train(ks_pcie);
102 		if (!dw_pcie_wait_for_link(pci))
103 			return 0;
104 	}
105 
106 	dev_err(dev, "phy link never came up\n");
107 	return -ETIMEDOUT;
108 }
109 
ks_pcie_msi_irq_handler(struct irq_desc * desc)110 static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
111 {
112 	unsigned int irq = irq_desc_get_irq(desc);
113 	struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
114 	u32 offset = irq - ks_pcie->msi_host_irqs[0];
115 	struct dw_pcie *pci = ks_pcie->pci;
116 	struct device *dev = pci->dev;
117 	struct irq_chip *chip = irq_desc_get_chip(desc);
118 
119 	dev_dbg(dev, "%s, irq %d\n", __func__, irq);
120 
121 	/*
122 	 * The chained irq handler installation would have replaced normal
123 	 * interrupt driver handler so we need to take care of mask/unmask and
124 	 * ack operation.
125 	 */
126 	chained_irq_enter(chip, desc);
127 	ks_dw_pcie_handle_msi_irq(ks_pcie, offset);
128 	chained_irq_exit(chip, desc);
129 }
130 
131 /**
132  * ks_pcie_legacy_irq_handler() - Handle legacy interrupt
133  * @irq: IRQ line for legacy interrupts
134  * @desc: Pointer to irq descriptor
135  *
136  * Traverse through pending legacy interrupts and invoke handler for each. Also
137  * takes care of interrupt controller level mask/ack operation.
138  */
ks_pcie_legacy_irq_handler(struct irq_desc * desc)139 static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
140 {
141 	unsigned int irq = irq_desc_get_irq(desc);
142 	struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
143 	struct dw_pcie *pci = ks_pcie->pci;
144 	struct device *dev = pci->dev;
145 	u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
146 	struct irq_chip *chip = irq_desc_get_chip(desc);
147 
148 	dev_dbg(dev, ": Handling legacy irq %d\n", irq);
149 
150 	/*
151 	 * The chained irq handler installation would have replaced normal
152 	 * interrupt driver handler so we need to take care of mask/unmask and
153 	 * ack operation.
154 	 */
155 	chained_irq_enter(chip, desc);
156 	ks_dw_pcie_handle_legacy_irq(ks_pcie, irq_offset);
157 	chained_irq_exit(chip, desc);
158 }
159 
ks_pcie_get_irq_controller_info(struct keystone_pcie * ks_pcie,char * controller,int * num_irqs)160 static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
161 					   char *controller, int *num_irqs)
162 {
163 	int temp, max_host_irqs, legacy = 1, *host_irqs;
164 	struct device *dev = ks_pcie->pci->dev;
165 	struct device_node *np_pcie = dev->of_node, **np_temp;
166 
167 	if (!strcmp(controller, "msi-interrupt-controller"))
168 		legacy = 0;
169 
170 	if (legacy) {
171 		np_temp = &ks_pcie->legacy_intc_np;
172 		max_host_irqs = PCI_NUM_INTX;
173 		host_irqs = &ks_pcie->legacy_host_irqs[0];
174 	} else {
175 		np_temp = &ks_pcie->msi_intc_np;
176 		max_host_irqs = MAX_MSI_HOST_IRQS;
177 		host_irqs =  &ks_pcie->msi_host_irqs[0];
178 	}
179 
180 	/* interrupt controller is in a child node */
181 	*np_temp = of_get_child_by_name(np_pcie, controller);
182 	if (!(*np_temp)) {
183 		dev_err(dev, "Node for %s is absent\n", controller);
184 		return -EINVAL;
185 	}
186 
187 	temp = of_irq_count(*np_temp);
188 	if (!temp) {
189 		dev_err(dev, "No IRQ entries in %s\n", controller);
190 		of_node_put(*np_temp);
191 		return -EINVAL;
192 	}
193 
194 	if (temp > max_host_irqs)
195 		dev_warn(dev, "Too many %s interrupts defined %u\n",
196 			(legacy ? "legacy" : "MSI"), temp);
197 
198 	/*
199 	 * support upto max_host_irqs. In dt from index 0 to 3 (legacy) or 0 to
200 	 * 7 (MSI)
201 	 */
202 	for (temp = 0; temp < max_host_irqs; temp++) {
203 		host_irqs[temp] = irq_of_parse_and_map(*np_temp, temp);
204 		if (!host_irqs[temp])
205 			break;
206 	}
207 
208 	of_node_put(*np_temp);
209 
210 	if (temp) {
211 		*num_irqs = temp;
212 		return 0;
213 	}
214 
215 	return -EINVAL;
216 }
217 
ks_pcie_setup_interrupts(struct keystone_pcie * ks_pcie)218 static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
219 {
220 	int i;
221 
222 	/* Legacy IRQ */
223 	for (i = 0; i < ks_pcie->num_legacy_host_irqs; i++) {
224 		irq_set_chained_handler_and_data(ks_pcie->legacy_host_irqs[i],
225 						 ks_pcie_legacy_irq_handler,
226 						 ks_pcie);
227 	}
228 	ks_dw_pcie_enable_legacy_irqs(ks_pcie);
229 
230 	/* MSI IRQ */
231 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
232 		for (i = 0; i < ks_pcie->num_msi_host_irqs; i++) {
233 			irq_set_chained_handler_and_data(ks_pcie->msi_host_irqs[i],
234 							 ks_pcie_msi_irq_handler,
235 							 ks_pcie);
236 		}
237 	}
238 
239 	if (ks_pcie->error_irq > 0)
240 		ks_dw_pcie_enable_error_irq(ks_pcie);
241 }
242 
243 #ifdef CONFIG_ARM
244 /*
245  * When a PCI device does not exist during config cycles, keystone host gets a
246  * bus error instead of returning 0xffffffff. This handler always returns 0
247  * for this kind of faults.
248  */
keystone_pcie_fault(unsigned long addr,unsigned int fsr,struct pt_regs * regs)249 static int keystone_pcie_fault(unsigned long addr, unsigned int fsr,
250 				struct pt_regs *regs)
251 {
252 	unsigned long instr = *(unsigned long *) instruction_pointer(regs);
253 
254 	if ((instr & 0x0e100090) == 0x00100090) {
255 		int reg = (instr >> 12) & 15;
256 
257 		regs->uregs[reg] = -1;
258 		regs->ARM_pc += 4;
259 	}
260 
261 	return 0;
262 }
263 #endif
264 
ks_pcie_host_init(struct pcie_port * pp)265 static int __init ks_pcie_host_init(struct pcie_port *pp)
266 {
267 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
268 	struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
269 	u32 val;
270 
271 	ks_pcie_establish_link(ks_pcie);
272 	ks_dw_pcie_setup_rc_app_regs(ks_pcie);
273 	ks_pcie_setup_interrupts(ks_pcie);
274 	writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
275 			pci->dbi_base + PCI_IO_BASE);
276 
277 	/* update the Vendor ID */
278 	writew(ks_pcie->device_id, pci->dbi_base + PCI_DEVICE_ID);
279 
280 	/* update the DEV_STAT_CTRL to publish right mrrs */
281 	val = readl(pci->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL);
282 	val &= ~PCI_EXP_DEVCTL_READRQ;
283 	/* set the mrrs to 256 bytes */
284 	val |= BIT(12);
285 	writel(val, pci->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL);
286 
287 #ifdef CONFIG_ARM
288 	/*
289 	 * PCIe access errors that result into OCP errors are caught by ARM as
290 	 * "External aborts"
291 	 */
292 	hook_fault_code(17, keystone_pcie_fault, SIGBUS, 0,
293 			"Asynchronous external abort");
294 #endif
295 
296 	return 0;
297 }
298 
299 static const struct dw_pcie_host_ops keystone_pcie_host_ops = {
300 	.rd_other_conf = ks_dw_pcie_rd_other_conf,
301 	.wr_other_conf = ks_dw_pcie_wr_other_conf,
302 	.host_init = ks_pcie_host_init,
303 	.msi_set_irq = ks_dw_pcie_msi_set_irq,
304 	.msi_clear_irq = ks_dw_pcie_msi_clear_irq,
305 	.get_msi_addr = ks_dw_pcie_get_msi_addr,
306 	.msi_host_init = ks_dw_pcie_msi_host_init,
307 	.msi_irq_ack = ks_dw_pcie_msi_irq_ack,
308 	.scan_bus = ks_dw_pcie_v3_65_scan_bus,
309 };
310 
pcie_err_irq_handler(int irq,void * priv)311 static irqreturn_t pcie_err_irq_handler(int irq, void *priv)
312 {
313 	struct keystone_pcie *ks_pcie = priv;
314 
315 	return ks_dw_pcie_handle_error_irq(ks_pcie);
316 }
317 
ks_add_pcie_port(struct keystone_pcie * ks_pcie,struct platform_device * pdev)318 static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
319 			 struct platform_device *pdev)
320 {
321 	struct dw_pcie *pci = ks_pcie->pci;
322 	struct pcie_port *pp = &pci->pp;
323 	struct device *dev = &pdev->dev;
324 	int ret;
325 
326 	ret = ks_pcie_get_irq_controller_info(ks_pcie,
327 					"legacy-interrupt-controller",
328 					&ks_pcie->num_legacy_host_irqs);
329 	if (ret)
330 		return ret;
331 
332 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
333 		ret = ks_pcie_get_irq_controller_info(ks_pcie,
334 						"msi-interrupt-controller",
335 						&ks_pcie->num_msi_host_irqs);
336 		if (ret)
337 			return ret;
338 	}
339 
340 	/*
341 	 * Index 0 is the platform interrupt for error interrupt
342 	 * from RC.  This is optional.
343 	 */
344 	ks_pcie->error_irq = irq_of_parse_and_map(ks_pcie->np, 0);
345 	if (ks_pcie->error_irq <= 0)
346 		dev_info(dev, "no error IRQ defined\n");
347 	else {
348 		ret = request_irq(ks_pcie->error_irq, pcie_err_irq_handler,
349 				  IRQF_SHARED, "pcie-error-irq", ks_pcie);
350 		if (ret < 0) {
351 			dev_err(dev, "failed to request error IRQ %d\n",
352 				ks_pcie->error_irq);
353 			return ret;
354 		}
355 	}
356 
357 	pp->ops = &keystone_pcie_host_ops;
358 	ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np);
359 	if (ret) {
360 		dev_err(dev, "failed to initialize host\n");
361 		return ret;
362 	}
363 
364 	return 0;
365 }
366 
367 static const struct of_device_id ks_pcie_of_match[] = {
368 	{
369 		.type = "pci",
370 		.compatible = "ti,keystone-pcie",
371 	},
372 	{ },
373 };
374 
375 static const struct dw_pcie_ops dw_pcie_ops = {
376 	.link_up = ks_dw_pcie_link_up,
377 };
378 
ks_pcie_remove(struct platform_device * pdev)379 static int __exit ks_pcie_remove(struct platform_device *pdev)
380 {
381 	struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
382 
383 	clk_disable_unprepare(ks_pcie->clk);
384 
385 	return 0;
386 }
387 
ks_pcie_probe(struct platform_device * pdev)388 static int __init ks_pcie_probe(struct platform_device *pdev)
389 {
390 	struct device *dev = &pdev->dev;
391 	struct dw_pcie *pci;
392 	struct keystone_pcie *ks_pcie;
393 	struct resource *res;
394 	void __iomem *reg_p;
395 	struct phy *phy;
396 	int ret;
397 
398 	ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL);
399 	if (!ks_pcie)
400 		return -ENOMEM;
401 
402 	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
403 	if (!pci)
404 		return -ENOMEM;
405 
406 	pci->dev = dev;
407 	pci->ops = &dw_pcie_ops;
408 
409 	ks_pcie->pci = pci;
410 
411 	/* initialize SerDes Phy if present */
412 	phy = devm_phy_get(dev, "pcie-phy");
413 	if (PTR_ERR_OR_ZERO(phy) == -EPROBE_DEFER)
414 		return PTR_ERR(phy);
415 
416 	if (!IS_ERR_OR_NULL(phy)) {
417 		ret = phy_init(phy);
418 		if (ret < 0)
419 			return ret;
420 	}
421 
422 	/* index 2 is to read PCI DEVICE_ID */
423 	res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
424 	reg_p = devm_ioremap_resource(dev, res);
425 	if (IS_ERR(reg_p))
426 		return PTR_ERR(reg_p);
427 	ks_pcie->device_id = readl(reg_p) >> 16;
428 	devm_iounmap(dev, reg_p);
429 	devm_release_mem_region(dev, res->start, resource_size(res));
430 
431 	ks_pcie->np = dev->of_node;
432 	platform_set_drvdata(pdev, ks_pcie);
433 	ks_pcie->clk = devm_clk_get(dev, "pcie");
434 	if (IS_ERR(ks_pcie->clk)) {
435 		dev_err(dev, "Failed to get pcie rc clock\n");
436 		return PTR_ERR(ks_pcie->clk);
437 	}
438 	ret = clk_prepare_enable(ks_pcie->clk);
439 	if (ret)
440 		return ret;
441 
442 	platform_set_drvdata(pdev, ks_pcie);
443 
444 	ret = ks_add_pcie_port(ks_pcie, pdev);
445 	if (ret < 0)
446 		goto fail_clk;
447 
448 	return 0;
449 fail_clk:
450 	clk_disable_unprepare(ks_pcie->clk);
451 
452 	return ret;
453 }
454 
455 static struct platform_driver ks_pcie_driver __refdata = {
456 	.probe  = ks_pcie_probe,
457 	.remove = __exit_p(ks_pcie_remove),
458 	.driver = {
459 		.name	= "keystone-pcie",
460 		.of_match_table = of_match_ptr(ks_pcie_of_match),
461 	},
462 };
463 builtin_platform_driver(ks_pcie_driver);
464