1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs
4 *
5 * Copyright (C) 2013-2014 Texas Instruments Incorporated - http://www.ti.com
6 *
7 * Authors: Kishon Vijay Abraham I <kishon@ti.com>
8 */
9
10 #include <linux/delay.h>
11 #include <linux/device.h>
12 #include <linux/err.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/irqdomain.h>
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/of_device.h>
19 #include <linux/of_gpio.h>
20 #include <linux/of_pci.h>
21 #include <linux/pci.h>
22 #include <linux/phy/phy.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/resource.h>
26 #include <linux/types.h>
27 #include <linux/mfd/syscon.h>
28 #include <linux/regmap.h>
29 #include <linux/gpio/consumer.h>
30
31 #include "../../pci.h"
32 #include "pcie-designware.h"
33
34 /* PCIe controller wrapper DRA7XX configuration registers */
35
36 #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN 0x0024
37 #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN 0x0028
38 #define ERR_SYS BIT(0)
39 #define ERR_FATAL BIT(1)
40 #define ERR_NONFATAL BIT(2)
41 #define ERR_COR BIT(3)
42 #define ERR_AXI BIT(4)
43 #define ERR_ECRC BIT(5)
44 #define PME_TURN_OFF BIT(8)
45 #define PME_TO_ACK BIT(9)
46 #define PM_PME BIT(10)
47 #define LINK_REQ_RST BIT(11)
48 #define LINK_UP_EVT BIT(12)
49 #define CFG_BME_EVT BIT(13)
50 #define CFG_MSE_EVT BIT(14)
51 #define INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \
52 ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \
53 LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT)
54
55 #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI 0x0034
56 #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI 0x0038
57 #define INTA BIT(0)
58 #define INTB BIT(1)
59 #define INTC BIT(2)
60 #define INTD BIT(3)
61 #define MSI BIT(4)
62 #define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD)
63
64 #define PCIECTRL_TI_CONF_DEVICE_TYPE 0x0100
65 #define DEVICE_TYPE_EP 0x0
66 #define DEVICE_TYPE_LEG_EP 0x1
67 #define DEVICE_TYPE_RC 0x4
68
69 #define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104
70 #define LTSSM_EN 0x1
71
72 #define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C
73 #define LINK_UP BIT(16)
74 #define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF
75
76 #define EXP_CAP_ID_OFFSET 0x70
77
78 #define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124
79 #define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128
80
81 #define PCIECTRL_TI_CONF_MSI_XMT 0x012c
82 #define MSI_REQ_GRANT BIT(0)
83 #define MSI_VECTOR_SHIFT 7
84
85 struct dra7xx_pcie {
86 struct dw_pcie *pci;
87 void __iomem *base; /* DT ti_conf */
88 int phy_count; /* DT phy-names count */
89 struct phy **phy;
90 int link_gen;
91 struct irq_domain *irq_domain;
92 enum dw_pcie_device_mode mode;
93 };
94
95 struct dra7xx_pcie_of_data {
96 enum dw_pcie_device_mode mode;
97 };
98
99 #define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev)
100
dra7xx_pcie_readl(struct dra7xx_pcie * pcie,u32 offset)101 static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset)
102 {
103 return readl(pcie->base + offset);
104 }
105
dra7xx_pcie_writel(struct dra7xx_pcie * pcie,u32 offset,u32 value)106 static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
107 u32 value)
108 {
109 writel(value, pcie->base + offset);
110 }
111
dra7xx_pcie_cpu_addr_fixup(struct dw_pcie * pci,u64 pci_addr)112 static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
113 {
114 return pci_addr & DRA7XX_CPU_TO_BUS_ADDR;
115 }
116
dra7xx_pcie_link_up(struct dw_pcie * pci)117 static int dra7xx_pcie_link_up(struct dw_pcie *pci)
118 {
119 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
120 u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);
121
122 return !!(reg & LINK_UP);
123 }
124
dra7xx_pcie_stop_link(struct dw_pcie * pci)125 static void dra7xx_pcie_stop_link(struct dw_pcie *pci)
126 {
127 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
128 u32 reg;
129
130 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
131 reg &= ~LTSSM_EN;
132 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
133 }
134
dra7xx_pcie_establish_link(struct dw_pcie * pci)135 static int dra7xx_pcie_establish_link(struct dw_pcie *pci)
136 {
137 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
138 struct device *dev = pci->dev;
139 u32 reg;
140 u32 exp_cap_off = EXP_CAP_ID_OFFSET;
141
142 if (dw_pcie_link_up(pci)) {
143 dev_err(dev, "link is already up\n");
144 return 0;
145 }
146
147 if (dra7xx->link_gen == 1) {
148 dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP,
149 4, ®);
150 if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
151 reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
152 reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
153 dw_pcie_write(pci->dbi_base + exp_cap_off +
154 PCI_EXP_LNKCAP, 4, reg);
155 }
156
157 dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2,
158 2, ®);
159 if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
160 reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
161 reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
162 dw_pcie_write(pci->dbi_base + exp_cap_off +
163 PCI_EXP_LNKCTL2, 2, reg);
164 }
165 }
166
167 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
168 reg |= LTSSM_EN;
169 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
170
171 return 0;
172 }
173
dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie * dra7xx)174 static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx)
175 {
176 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI,
177 LEG_EP_INTERRUPTS | MSI);
178
179 dra7xx_pcie_writel(dra7xx,
180 PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
181 MSI | LEG_EP_INTERRUPTS);
182 }
183
dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie * dra7xx)184 static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx)
185 {
186 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
187 INTERRUPTS);
188 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN,
189 INTERRUPTS);
190 }
191
dra7xx_pcie_enable_interrupts(struct dra7xx_pcie * dra7xx)192 static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
193 {
194 dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
195 dra7xx_pcie_enable_msi_interrupts(dra7xx);
196 }
197
dra7xx_pcie_host_init(struct pcie_port * pp)198 static int dra7xx_pcie_host_init(struct pcie_port *pp)
199 {
200 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
201 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
202
203 dw_pcie_setup_rc(pp);
204
205 dra7xx_pcie_establish_link(pci);
206 dw_pcie_wait_for_link(pci);
207 dw_pcie_msi_init(pp);
208 dra7xx_pcie_enable_interrupts(dra7xx);
209
210 return 0;
211 }
212
213 static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
214 .host_init = dra7xx_pcie_host_init,
215 };
216
dra7xx_pcie_intx_map(struct irq_domain * domain,unsigned int irq,irq_hw_number_t hwirq)217 static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
218 irq_hw_number_t hwirq)
219 {
220 irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
221 irq_set_chip_data(irq, domain->host_data);
222
223 return 0;
224 }
225
226 static const struct irq_domain_ops intx_domain_ops = {
227 .map = dra7xx_pcie_intx_map,
228 .xlate = pci_irqd_intx_xlate,
229 };
230
dra7xx_pcie_init_irq_domain(struct pcie_port * pp)231 static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
232 {
233 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
234 struct device *dev = pci->dev;
235 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
236 struct device_node *node = dev->of_node;
237 struct device_node *pcie_intc_node = of_get_next_child(node, NULL);
238
239 if (!pcie_intc_node) {
240 dev_err(dev, "No PCIe Intc node found\n");
241 return -ENODEV;
242 }
243
244 dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
245 &intx_domain_ops, pp);
246 if (!dra7xx->irq_domain) {
247 dev_err(dev, "Failed to get a INTx IRQ domain\n");
248 return -ENODEV;
249 }
250
251 return 0;
252 }
253
dra7xx_pcie_msi_irq_handler(int irq,void * arg)254 static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
255 {
256 struct dra7xx_pcie *dra7xx = arg;
257 struct dw_pcie *pci = dra7xx->pci;
258 struct pcie_port *pp = &pci->pp;
259 unsigned long reg;
260 u32 virq, bit;
261
262 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI);
263
264 switch (reg) {
265 case MSI:
266 dw_handle_msi_irq(pp);
267 break;
268 case INTA:
269 case INTB:
270 case INTC:
271 case INTD:
272 for_each_set_bit(bit, ®, PCI_NUM_INTX) {
273 virq = irq_find_mapping(dra7xx->irq_domain, bit);
274 if (virq)
275 generic_handle_irq(virq);
276 }
277 break;
278 }
279
280 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg);
281
282 return IRQ_HANDLED;
283 }
284
dra7xx_pcie_irq_handler(int irq,void * arg)285 static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
286 {
287 struct dra7xx_pcie *dra7xx = arg;
288 struct dw_pcie *pci = dra7xx->pci;
289 struct device *dev = pci->dev;
290 struct dw_pcie_ep *ep = &pci->ep;
291 u32 reg;
292
293 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN);
294
295 if (reg & ERR_SYS)
296 dev_dbg(dev, "System Error\n");
297
298 if (reg & ERR_FATAL)
299 dev_dbg(dev, "Fatal Error\n");
300
301 if (reg & ERR_NONFATAL)
302 dev_dbg(dev, "Non Fatal Error\n");
303
304 if (reg & ERR_COR)
305 dev_dbg(dev, "Correctable Error\n");
306
307 if (reg & ERR_AXI)
308 dev_dbg(dev, "AXI tag lookup fatal Error\n");
309
310 if (reg & ERR_ECRC)
311 dev_dbg(dev, "ECRC Error\n");
312
313 if (reg & PME_TURN_OFF)
314 dev_dbg(dev,
315 "Power Management Event Turn-Off message received\n");
316
317 if (reg & PME_TO_ACK)
318 dev_dbg(dev,
319 "Power Management Turn-Off Ack message received\n");
320
321 if (reg & PM_PME)
322 dev_dbg(dev, "PM Power Management Event message received\n");
323
324 if (reg & LINK_REQ_RST)
325 dev_dbg(dev, "Link Request Reset\n");
326
327 if (reg & LINK_UP_EVT) {
328 if (dra7xx->mode == DW_PCIE_EP_TYPE)
329 dw_pcie_ep_linkup(ep);
330 dev_dbg(dev, "Link-up state change\n");
331 }
332
333 if (reg & CFG_BME_EVT)
334 dev_dbg(dev, "CFG 'Bus Master Enable' change\n");
335
336 if (reg & CFG_MSE_EVT)
337 dev_dbg(dev, "CFG 'Memory Space Enable' change\n");
338
339 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg);
340
341 return IRQ_HANDLED;
342 }
343
dra7xx_pcie_ep_init(struct dw_pcie_ep * ep)344 static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
345 {
346 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
347 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
348 enum pci_barno bar;
349
350 for (bar = BAR_0; bar <= BAR_5; bar++)
351 dw_pcie_ep_reset_bar(pci, bar);
352
353 dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
354 }
355
dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie * dra7xx)356 static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx)
357 {
358 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1);
359 mdelay(1);
360 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1);
361 }
362
dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie * dra7xx,u8 interrupt_num)363 static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx,
364 u8 interrupt_num)
365 {
366 u32 reg;
367
368 reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT;
369 reg |= MSI_REQ_GRANT;
370 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg);
371 }
372
dra7xx_pcie_raise_irq(struct dw_pcie_ep * ep,u8 func_no,enum pci_epc_irq_type type,u16 interrupt_num)373 static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
374 enum pci_epc_irq_type type, u16 interrupt_num)
375 {
376 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
377 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
378
379 switch (type) {
380 case PCI_EPC_IRQ_LEGACY:
381 dra7xx_pcie_raise_legacy_irq(dra7xx);
382 break;
383 case PCI_EPC_IRQ_MSI:
384 dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num);
385 break;
386 default:
387 dev_err(pci->dev, "UNKNOWN IRQ type\n");
388 }
389
390 return 0;
391 }
392
393 static struct dw_pcie_ep_ops pcie_ep_ops = {
394 .ep_init = dra7xx_pcie_ep_init,
395 .raise_irq = dra7xx_pcie_raise_irq,
396 };
397
dra7xx_add_pcie_ep(struct dra7xx_pcie * dra7xx,struct platform_device * pdev)398 static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
399 struct platform_device *pdev)
400 {
401 int ret;
402 struct dw_pcie_ep *ep;
403 struct resource *res;
404 struct device *dev = &pdev->dev;
405 struct dw_pcie *pci = dra7xx->pci;
406
407 ep = &pci->ep;
408 ep->ops = &pcie_ep_ops;
409
410 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics");
411 pci->dbi_base = devm_ioremap_resource(dev, res);
412 if (IS_ERR(pci->dbi_base))
413 return PTR_ERR(pci->dbi_base);
414
415 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2");
416 pci->dbi_base2 = devm_ioremap_resource(dev, res);
417 if (IS_ERR(pci->dbi_base2))
418 return PTR_ERR(pci->dbi_base2);
419
420 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
421 if (!res)
422 return -EINVAL;
423
424 ep->phys_base = res->start;
425 ep->addr_size = resource_size(res);
426
427 ret = dw_pcie_ep_init(ep);
428 if (ret) {
429 dev_err(dev, "failed to initialize endpoint\n");
430 return ret;
431 }
432
433 return 0;
434 }
435
dra7xx_add_pcie_port(struct dra7xx_pcie * dra7xx,struct platform_device * pdev)436 static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
437 struct platform_device *pdev)
438 {
439 int ret;
440 struct dw_pcie *pci = dra7xx->pci;
441 struct pcie_port *pp = &pci->pp;
442 struct device *dev = pci->dev;
443 struct resource *res;
444
445 pp->irq = platform_get_irq(pdev, 1);
446 if (pp->irq < 0) {
447 dev_err(dev, "missing IRQ resource\n");
448 return pp->irq;
449 }
450
451 ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler,
452 IRQF_SHARED | IRQF_NO_THREAD,
453 "dra7-pcie-msi", dra7xx);
454 if (ret) {
455 dev_err(dev, "failed to request irq\n");
456 return ret;
457 }
458
459 ret = dra7xx_pcie_init_irq_domain(pp);
460 if (ret < 0)
461 return ret;
462
463 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics");
464 pci->dbi_base = devm_ioremap_resource(dev, res);
465 if (IS_ERR(pci->dbi_base))
466 return PTR_ERR(pci->dbi_base);
467
468 pp->ops = &dra7xx_pcie_host_ops;
469
470 ret = dw_pcie_host_init(pp);
471 if (ret) {
472 dev_err(dev, "failed to initialize host\n");
473 return ret;
474 }
475
476 return 0;
477 }
478
479 static const struct dw_pcie_ops dw_pcie_ops = {
480 .cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup,
481 .start_link = dra7xx_pcie_establish_link,
482 .stop_link = dra7xx_pcie_stop_link,
483 .link_up = dra7xx_pcie_link_up,
484 };
485
dra7xx_pcie_disable_phy(struct dra7xx_pcie * dra7xx)486 static void dra7xx_pcie_disable_phy(struct dra7xx_pcie *dra7xx)
487 {
488 int phy_count = dra7xx->phy_count;
489
490 while (phy_count--) {
491 phy_power_off(dra7xx->phy[phy_count]);
492 phy_exit(dra7xx->phy[phy_count]);
493 }
494 }
495
dra7xx_pcie_enable_phy(struct dra7xx_pcie * dra7xx)496 static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx)
497 {
498 int phy_count = dra7xx->phy_count;
499 int ret;
500 int i;
501
502 for (i = 0; i < phy_count; i++) {
503 ret = phy_init(dra7xx->phy[i]);
504 if (ret < 0)
505 goto err_phy;
506
507 ret = phy_power_on(dra7xx->phy[i]);
508 if (ret < 0) {
509 phy_exit(dra7xx->phy[i]);
510 goto err_phy;
511 }
512 }
513
514 return 0;
515
516 err_phy:
517 while (--i >= 0) {
518 phy_power_off(dra7xx->phy[i]);
519 phy_exit(dra7xx->phy[i]);
520 }
521
522 return ret;
523 }
524
525 static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = {
526 .mode = DW_PCIE_RC_TYPE,
527 };
528
529 static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = {
530 .mode = DW_PCIE_EP_TYPE,
531 };
532
533 static const struct of_device_id of_dra7xx_pcie_match[] = {
534 {
535 .compatible = "ti,dra7-pcie",
536 .data = &dra7xx_pcie_rc_of_data,
537 },
538 {
539 .compatible = "ti,dra7-pcie-ep",
540 .data = &dra7xx_pcie_ep_of_data,
541 },
542 {},
543 };
544
545 /*
546 * dra7xx_pcie_unaligned_memaccess: workaround for AM572x/AM571x Errata i870
547 * @dra7xx: the dra7xx device where the workaround should be applied
548 *
549 * Access to the PCIe slave port that are not 32-bit aligned will result
550 * in incorrect mapping to TLP Address and Byte enable fields. Therefore,
551 * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or
552 * 0x3.
553 *
554 * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1.
555 */
dra7xx_pcie_unaligned_memaccess(struct device * dev)556 static int dra7xx_pcie_unaligned_memaccess(struct device *dev)
557 {
558 int ret;
559 struct device_node *np = dev->of_node;
560 struct of_phandle_args args;
561 struct regmap *regmap;
562
563 regmap = syscon_regmap_lookup_by_phandle(np,
564 "ti,syscon-unaligned-access");
565 if (IS_ERR(regmap)) {
566 dev_dbg(dev, "can't get ti,syscon-unaligned-access\n");
567 return -EINVAL;
568 }
569
570 ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access",
571 2, 0, &args);
572 if (ret) {
573 dev_err(dev, "failed to parse ti,syscon-unaligned-access\n");
574 return ret;
575 }
576
577 ret = regmap_update_bits(regmap, args.args[0], args.args[1],
578 args.args[1]);
579 if (ret)
580 dev_err(dev, "failed to enable unaligned access\n");
581
582 of_node_put(args.np);
583
584 return ret;
585 }
586
dra7xx_pcie_probe(struct platform_device * pdev)587 static int __init dra7xx_pcie_probe(struct platform_device *pdev)
588 {
589 u32 reg;
590 int ret;
591 int irq;
592 int i;
593 int phy_count;
594 struct phy **phy;
595 struct device_link **link;
596 void __iomem *base;
597 struct resource *res;
598 struct dw_pcie *pci;
599 struct dra7xx_pcie *dra7xx;
600 struct device *dev = &pdev->dev;
601 struct device_node *np = dev->of_node;
602 char name[10];
603 struct gpio_desc *reset;
604 const struct of_device_id *match;
605 const struct dra7xx_pcie_of_data *data;
606 enum dw_pcie_device_mode mode;
607
608 match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev);
609 if (!match)
610 return -EINVAL;
611
612 data = (struct dra7xx_pcie_of_data *)match->data;
613 mode = (enum dw_pcie_device_mode)data->mode;
614
615 dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL);
616 if (!dra7xx)
617 return -ENOMEM;
618
619 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
620 if (!pci)
621 return -ENOMEM;
622
623 pci->dev = dev;
624 pci->ops = &dw_pcie_ops;
625
626 irq = platform_get_irq(pdev, 0);
627 if (irq < 0) {
628 dev_err(dev, "missing IRQ resource: %d\n", irq);
629 return irq;
630 }
631
632 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf");
633 base = devm_ioremap_nocache(dev, res->start, resource_size(res));
634 if (!base)
635 return -ENOMEM;
636
637 phy_count = of_property_count_strings(np, "phy-names");
638 if (phy_count < 0) {
639 dev_err(dev, "unable to find the strings\n");
640 return phy_count;
641 }
642
643 phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL);
644 if (!phy)
645 return -ENOMEM;
646
647 link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL);
648 if (!link)
649 return -ENOMEM;
650
651 for (i = 0; i < phy_count; i++) {
652 snprintf(name, sizeof(name), "pcie-phy%d", i);
653 phy[i] = devm_phy_get(dev, name);
654 if (IS_ERR(phy[i]))
655 return PTR_ERR(phy[i]);
656
657 link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
658 if (!link[i]) {
659 ret = -EINVAL;
660 goto err_link;
661 }
662 }
663
664 dra7xx->base = base;
665 dra7xx->phy = phy;
666 dra7xx->pci = pci;
667 dra7xx->phy_count = phy_count;
668
669 ret = dra7xx_pcie_enable_phy(dra7xx);
670 if (ret) {
671 dev_err(dev, "failed to enable phy\n");
672 return ret;
673 }
674
675 platform_set_drvdata(pdev, dra7xx);
676
677 pm_runtime_enable(dev);
678 ret = pm_runtime_get_sync(dev);
679 if (ret < 0) {
680 dev_err(dev, "pm_runtime_get_sync failed\n");
681 goto err_get_sync;
682 }
683
684 reset = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH);
685 if (IS_ERR(reset)) {
686 ret = PTR_ERR(reset);
687 dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret);
688 goto err_gpio;
689 }
690
691 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
692 reg &= ~LTSSM_EN;
693 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
694
695 dra7xx->link_gen = of_pci_get_max_link_speed(np);
696 if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2)
697 dra7xx->link_gen = 2;
698
699 switch (mode) {
700 case DW_PCIE_RC_TYPE:
701 if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST)) {
702 ret = -ENODEV;
703 goto err_gpio;
704 }
705
706 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
707 DEVICE_TYPE_RC);
708
709 ret = dra7xx_pcie_unaligned_memaccess(dev);
710 if (ret)
711 dev_err(dev, "WA for Errata i870 not applied\n");
712
713 ret = dra7xx_add_pcie_port(dra7xx, pdev);
714 if (ret < 0)
715 goto err_gpio;
716 break;
717 case DW_PCIE_EP_TYPE:
718 if (!IS_ENABLED(CONFIG_PCI_DRA7XX_EP)) {
719 ret = -ENODEV;
720 goto err_gpio;
721 }
722
723 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
724 DEVICE_TYPE_EP);
725
726 ret = dra7xx_pcie_unaligned_memaccess(dev);
727 if (ret)
728 goto err_gpio;
729
730 ret = dra7xx_add_pcie_ep(dra7xx, pdev);
731 if (ret < 0)
732 goto err_gpio;
733 break;
734 default:
735 dev_err(dev, "INVALID device type %d\n", mode);
736 }
737 dra7xx->mode = mode;
738
739 ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler,
740 IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
741 if (ret) {
742 dev_err(dev, "failed to request irq\n");
743 goto err_gpio;
744 }
745
746 return 0;
747
748 err_gpio:
749 pm_runtime_put(dev);
750
751 err_get_sync:
752 pm_runtime_disable(dev);
753 dra7xx_pcie_disable_phy(dra7xx);
754
755 err_link:
756 while (--i >= 0)
757 device_link_del(link[i]);
758
759 return ret;
760 }
761
762 #ifdef CONFIG_PM_SLEEP
dra7xx_pcie_suspend(struct device * dev)763 static int dra7xx_pcie_suspend(struct device *dev)
764 {
765 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
766 struct dw_pcie *pci = dra7xx->pci;
767 u32 val;
768
769 if (dra7xx->mode != DW_PCIE_RC_TYPE)
770 return 0;
771
772 /* clear MSE */
773 val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
774 val &= ~PCI_COMMAND_MEMORY;
775 dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
776
777 return 0;
778 }
779
dra7xx_pcie_resume(struct device * dev)780 static int dra7xx_pcie_resume(struct device *dev)
781 {
782 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
783 struct dw_pcie *pci = dra7xx->pci;
784 u32 val;
785
786 if (dra7xx->mode != DW_PCIE_RC_TYPE)
787 return 0;
788
789 /* set MSE */
790 val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
791 val |= PCI_COMMAND_MEMORY;
792 dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
793
794 return 0;
795 }
796
dra7xx_pcie_suspend_noirq(struct device * dev)797 static int dra7xx_pcie_suspend_noirq(struct device *dev)
798 {
799 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
800
801 dra7xx_pcie_disable_phy(dra7xx);
802
803 return 0;
804 }
805
dra7xx_pcie_resume_noirq(struct device * dev)806 static int dra7xx_pcie_resume_noirq(struct device *dev)
807 {
808 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
809 int ret;
810
811 ret = dra7xx_pcie_enable_phy(dra7xx);
812 if (ret) {
813 dev_err(dev, "failed to enable phy\n");
814 return ret;
815 }
816
817 return 0;
818 }
819 #endif
820
dra7xx_pcie_shutdown(struct platform_device * pdev)821 static void dra7xx_pcie_shutdown(struct platform_device *pdev)
822 {
823 struct device *dev = &pdev->dev;
824 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
825 int ret;
826
827 dra7xx_pcie_stop_link(dra7xx->pci);
828
829 ret = pm_runtime_put_sync(dev);
830 if (ret < 0)
831 dev_dbg(dev, "pm_runtime_put_sync failed\n");
832
833 pm_runtime_disable(dev);
834 dra7xx_pcie_disable_phy(dra7xx);
835 }
836
837 static const struct dev_pm_ops dra7xx_pcie_pm_ops = {
838 SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume)
839 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq,
840 dra7xx_pcie_resume_noirq)
841 };
842
843 static struct platform_driver dra7xx_pcie_driver = {
844 .driver = {
845 .name = "dra7-pcie",
846 .of_match_table = of_dra7xx_pcie_match,
847 .suppress_bind_attrs = true,
848 .pm = &dra7xx_pcie_pm_ops,
849 },
850 .shutdown = dra7xx_pcie_shutdown,
851 };
852 builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe);
853