1 /*
2 * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs
3 *
4 * Copyright (C) 2013-2014 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Authors: Kishon Vijay Abraham I <kishon@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/err.h>
16 #include <linux/interrupt.h>
17 #include <linux/irq.h>
18 #include <linux/irqdomain.h>
19 #include <linux/kernel.h>
20 #include <linux/init.h>
21 #include <linux/of_device.h>
22 #include <linux/of_gpio.h>
23 #include <linux/of_pci.h>
24 #include <linux/pci.h>
25 #include <linux/phy/phy.h>
26 #include <linux/platform_device.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/resource.h>
29 #include <linux/types.h>
30 #include <linux/mfd/syscon.h>
31 #include <linux/regmap.h>
32 #include <linux/gpio/consumer.h>
33
34 #include "pcie-designware.h"
35
36 /* PCIe controller wrapper DRA7XX configuration registers */
37
38 #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN 0x0024
39 #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN 0x0028
40 #define ERR_SYS BIT(0)
41 #define ERR_FATAL BIT(1)
42 #define ERR_NONFATAL BIT(2)
43 #define ERR_COR BIT(3)
44 #define ERR_AXI BIT(4)
45 #define ERR_ECRC BIT(5)
46 #define PME_TURN_OFF BIT(8)
47 #define PME_TO_ACK BIT(9)
48 #define PM_PME BIT(10)
49 #define LINK_REQ_RST BIT(11)
50 #define LINK_UP_EVT BIT(12)
51 #define CFG_BME_EVT BIT(13)
52 #define CFG_MSE_EVT BIT(14)
53 #define INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \
54 ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \
55 LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT)
56
57 #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI 0x0034
58 #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI 0x0038
59 #define INTA BIT(0)
60 #define INTB BIT(1)
61 #define INTC BIT(2)
62 #define INTD BIT(3)
63 #define MSI BIT(4)
64 #define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD)
65
66 #define PCIECTRL_TI_CONF_DEVICE_TYPE 0x0100
67 #define DEVICE_TYPE_EP 0x0
68 #define DEVICE_TYPE_LEG_EP 0x1
69 #define DEVICE_TYPE_RC 0x4
70
71 #define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104
72 #define LTSSM_EN 0x1
73
74 #define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C
75 #define LINK_UP BIT(16)
76 #define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF
77
78 #define EXP_CAP_ID_OFFSET 0x70
79
80 #define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124
81 #define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128
82
83 #define PCIECTRL_TI_CONF_MSI_XMT 0x012c
84 #define MSI_REQ_GRANT BIT(0)
85 #define MSI_VECTOR_SHIFT 7
86
87 struct dra7xx_pcie {
88 struct dw_pcie *pci;
89 void __iomem *base; /* DT ti_conf */
90 int phy_count; /* DT phy-names count */
91 struct phy **phy;
92 int link_gen;
93 struct irq_domain *irq_domain;
94 enum dw_pcie_device_mode mode;
95 };
96
97 struct dra7xx_pcie_of_data {
98 enum dw_pcie_device_mode mode;
99 };
100
101 #define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev)
102
dra7xx_pcie_readl(struct dra7xx_pcie * pcie,u32 offset)103 static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset)
104 {
105 return readl(pcie->base + offset);
106 }
107
dra7xx_pcie_writel(struct dra7xx_pcie * pcie,u32 offset,u32 value)108 static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
109 u32 value)
110 {
111 writel(value, pcie->base + offset);
112 }
113
dra7xx_pcie_cpu_addr_fixup(u64 pci_addr)114 static u64 dra7xx_pcie_cpu_addr_fixup(u64 pci_addr)
115 {
116 return pci_addr & DRA7XX_CPU_TO_BUS_ADDR;
117 }
118
dra7xx_pcie_link_up(struct dw_pcie * pci)119 static int dra7xx_pcie_link_up(struct dw_pcie *pci)
120 {
121 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
122 u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS);
123
124 return !!(reg & LINK_UP);
125 }
126
dra7xx_pcie_stop_link(struct dw_pcie * pci)127 static void dra7xx_pcie_stop_link(struct dw_pcie *pci)
128 {
129 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
130 u32 reg;
131
132 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
133 reg &= ~LTSSM_EN;
134 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
135 }
136
dra7xx_pcie_establish_link(struct dw_pcie * pci)137 static int dra7xx_pcie_establish_link(struct dw_pcie *pci)
138 {
139 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
140 struct device *dev = pci->dev;
141 u32 reg;
142 u32 exp_cap_off = EXP_CAP_ID_OFFSET;
143
144 if (dw_pcie_link_up(pci)) {
145 dev_err(dev, "link is already up\n");
146 return 0;
147 }
148
149 if (dra7xx->link_gen == 1) {
150 dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP,
151 4, ®);
152 if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
153 reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
154 reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
155 dw_pcie_write(pci->dbi_base + exp_cap_off +
156 PCI_EXP_LNKCAP, 4, reg);
157 }
158
159 dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2,
160 2, ®);
161 if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) {
162 reg &= ~((u32)PCI_EXP_LNKCAP_SLS);
163 reg |= PCI_EXP_LNKCAP_SLS_2_5GB;
164 dw_pcie_write(pci->dbi_base + exp_cap_off +
165 PCI_EXP_LNKCTL2, 2, reg);
166 }
167 }
168
169 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
170 reg |= LTSSM_EN;
171 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
172
173 return 0;
174 }
175
dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie * dra7xx)176 static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx)
177 {
178 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI,
179 LEG_EP_INTERRUPTS | MSI);
180
181 dra7xx_pcie_writel(dra7xx,
182 PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI,
183 MSI | LEG_EP_INTERRUPTS);
184 }
185
dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie * dra7xx)186 static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx)
187 {
188 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN,
189 INTERRUPTS);
190 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN,
191 INTERRUPTS);
192 }
193
dra7xx_pcie_enable_interrupts(struct dra7xx_pcie * dra7xx)194 static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx)
195 {
196 dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
197 dra7xx_pcie_enable_msi_interrupts(dra7xx);
198 }
199
dra7xx_pcie_host_init(struct pcie_port * pp)200 static int dra7xx_pcie_host_init(struct pcie_port *pp)
201 {
202 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
203 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
204
205 dw_pcie_setup_rc(pp);
206
207 dra7xx_pcie_establish_link(pci);
208 dw_pcie_wait_for_link(pci);
209 dw_pcie_msi_init(pp);
210 dra7xx_pcie_enable_interrupts(dra7xx);
211
212 return 0;
213 }
214
215 static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = {
216 .host_init = dra7xx_pcie_host_init,
217 };
218
dra7xx_pcie_intx_map(struct irq_domain * domain,unsigned int irq,irq_hw_number_t hwirq)219 static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
220 irq_hw_number_t hwirq)
221 {
222 irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
223 irq_set_chip_data(irq, domain->host_data);
224
225 return 0;
226 }
227
228 static const struct irq_domain_ops intx_domain_ops = {
229 .map = dra7xx_pcie_intx_map,
230 .xlate = pci_irqd_intx_xlate,
231 };
232
dra7xx_pcie_init_irq_domain(struct pcie_port * pp)233 static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
234 {
235 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
236 struct device *dev = pci->dev;
237 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
238 struct device_node *node = dev->of_node;
239 struct device_node *pcie_intc_node = of_get_next_child(node, NULL);
240
241 if (!pcie_intc_node) {
242 dev_err(dev, "No PCIe Intc node found\n");
243 return -ENODEV;
244 }
245
246 dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
247 &intx_domain_ops, pp);
248 if (!dra7xx->irq_domain) {
249 dev_err(dev, "Failed to get a INTx IRQ domain\n");
250 return -ENODEV;
251 }
252
253 return 0;
254 }
255
dra7xx_pcie_msi_irq_handler(int irq,void * arg)256 static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
257 {
258 struct dra7xx_pcie *dra7xx = arg;
259 struct dw_pcie *pci = dra7xx->pci;
260 struct pcie_port *pp = &pci->pp;
261 u32 reg;
262
263 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI);
264
265 switch (reg) {
266 case MSI:
267 dw_handle_msi_irq(pp);
268 break;
269 case INTA:
270 case INTB:
271 case INTC:
272 case INTD:
273 generic_handle_irq(irq_find_mapping(dra7xx->irq_domain,
274 ffs(reg) - 1));
275 break;
276 }
277
278 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg);
279
280 return IRQ_HANDLED;
281 }
282
dra7xx_pcie_irq_handler(int irq,void * arg)283 static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
284 {
285 struct dra7xx_pcie *dra7xx = arg;
286 struct dw_pcie *pci = dra7xx->pci;
287 struct device *dev = pci->dev;
288 struct dw_pcie_ep *ep = &pci->ep;
289 u32 reg;
290
291 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN);
292
293 if (reg & ERR_SYS)
294 dev_dbg(dev, "System Error\n");
295
296 if (reg & ERR_FATAL)
297 dev_dbg(dev, "Fatal Error\n");
298
299 if (reg & ERR_NONFATAL)
300 dev_dbg(dev, "Non Fatal Error\n");
301
302 if (reg & ERR_COR)
303 dev_dbg(dev, "Correctable Error\n");
304
305 if (reg & ERR_AXI)
306 dev_dbg(dev, "AXI tag lookup fatal Error\n");
307
308 if (reg & ERR_ECRC)
309 dev_dbg(dev, "ECRC Error\n");
310
311 if (reg & PME_TURN_OFF)
312 dev_dbg(dev,
313 "Power Management Event Turn-Off message received\n");
314
315 if (reg & PME_TO_ACK)
316 dev_dbg(dev,
317 "Power Management Turn-Off Ack message received\n");
318
319 if (reg & PM_PME)
320 dev_dbg(dev, "PM Power Management Event message received\n");
321
322 if (reg & LINK_REQ_RST)
323 dev_dbg(dev, "Link Request Reset\n");
324
325 if (reg & LINK_UP_EVT) {
326 if (dra7xx->mode == DW_PCIE_EP_TYPE)
327 dw_pcie_ep_linkup(ep);
328 dev_dbg(dev, "Link-up state change\n");
329 }
330
331 if (reg & CFG_BME_EVT)
332 dev_dbg(dev, "CFG 'Bus Master Enable' change\n");
333
334 if (reg & CFG_MSE_EVT)
335 dev_dbg(dev, "CFG 'Memory Space Enable' change\n");
336
337 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg);
338
339 return IRQ_HANDLED;
340 }
341
dw_pcie_ep_reset_bar(struct dw_pcie * pci,enum pci_barno bar)342 static void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
343 {
344 u32 reg;
345
346 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
347 dw_pcie_writel_dbi2(pci, reg, 0x0);
348 dw_pcie_writel_dbi(pci, reg, 0x0);
349 }
350
dra7xx_pcie_ep_init(struct dw_pcie_ep * ep)351 static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
352 {
353 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
354 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
355 enum pci_barno bar;
356
357 for (bar = BAR_0; bar <= BAR_5; bar++)
358 dw_pcie_ep_reset_bar(pci, bar);
359
360 dra7xx_pcie_enable_wrapper_interrupts(dra7xx);
361 }
362
dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie * dra7xx)363 static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx)
364 {
365 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1);
366 mdelay(1);
367 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1);
368 }
369
dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie * dra7xx,u8 interrupt_num)370 static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx,
371 u8 interrupt_num)
372 {
373 u32 reg;
374
375 reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT;
376 reg |= MSI_REQ_GRANT;
377 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg);
378 }
379
dra7xx_pcie_raise_irq(struct dw_pcie_ep * ep,enum pci_epc_irq_type type,u8 interrupt_num)380 static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep,
381 enum pci_epc_irq_type type, u8 interrupt_num)
382 {
383 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
384 struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
385
386 switch (type) {
387 case PCI_EPC_IRQ_LEGACY:
388 dra7xx_pcie_raise_legacy_irq(dra7xx);
389 break;
390 case PCI_EPC_IRQ_MSI:
391 dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num);
392 break;
393 default:
394 dev_err(pci->dev, "UNKNOWN IRQ type\n");
395 }
396
397 return 0;
398 }
399
400 static struct dw_pcie_ep_ops pcie_ep_ops = {
401 .ep_init = dra7xx_pcie_ep_init,
402 .raise_irq = dra7xx_pcie_raise_irq,
403 };
404
dra7xx_add_pcie_ep(struct dra7xx_pcie * dra7xx,struct platform_device * pdev)405 static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx,
406 struct platform_device *pdev)
407 {
408 int ret;
409 struct dw_pcie_ep *ep;
410 struct resource *res;
411 struct device *dev = &pdev->dev;
412 struct dw_pcie *pci = dra7xx->pci;
413
414 ep = &pci->ep;
415 ep->ops = &pcie_ep_ops;
416
417 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics");
418 pci->dbi_base = devm_ioremap(dev, res->start, resource_size(res));
419 if (!pci->dbi_base)
420 return -ENOMEM;
421
422 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2");
423 pci->dbi_base2 = devm_ioremap(dev, res->start, resource_size(res));
424 if (!pci->dbi_base2)
425 return -ENOMEM;
426
427 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
428 if (!res)
429 return -EINVAL;
430
431 ep->phys_base = res->start;
432 ep->addr_size = resource_size(res);
433
434 ret = dw_pcie_ep_init(ep);
435 if (ret) {
436 dev_err(dev, "failed to initialize endpoint\n");
437 return ret;
438 }
439
440 return 0;
441 }
442
dra7xx_add_pcie_port(struct dra7xx_pcie * dra7xx,struct platform_device * pdev)443 static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
444 struct platform_device *pdev)
445 {
446 int ret;
447 struct dw_pcie *pci = dra7xx->pci;
448 struct pcie_port *pp = &pci->pp;
449 struct device *dev = pci->dev;
450 struct resource *res;
451
452 pp->irq = platform_get_irq(pdev, 1);
453 if (pp->irq < 0) {
454 dev_err(dev, "missing IRQ resource\n");
455 return pp->irq;
456 }
457
458 ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler,
459 IRQF_SHARED | IRQF_NO_THREAD,
460 "dra7-pcie-msi", dra7xx);
461 if (ret) {
462 dev_err(dev, "failed to request irq\n");
463 return ret;
464 }
465
466 ret = dra7xx_pcie_init_irq_domain(pp);
467 if (ret < 0)
468 return ret;
469
470 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics");
471 pci->dbi_base = devm_ioremap(dev, res->start, resource_size(res));
472 if (!pci->dbi_base)
473 return -ENOMEM;
474
475 ret = dw_pcie_host_init(pp);
476 if (ret) {
477 dev_err(dev, "failed to initialize host\n");
478 return ret;
479 }
480
481 return 0;
482 }
483
484 static const struct dw_pcie_ops dw_pcie_ops = {
485 .cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup,
486 .start_link = dra7xx_pcie_establish_link,
487 .stop_link = dra7xx_pcie_stop_link,
488 .link_up = dra7xx_pcie_link_up,
489 };
490
dra7xx_pcie_disable_phy(struct dra7xx_pcie * dra7xx)491 static void dra7xx_pcie_disable_phy(struct dra7xx_pcie *dra7xx)
492 {
493 int phy_count = dra7xx->phy_count;
494
495 while (phy_count--) {
496 phy_power_off(dra7xx->phy[phy_count]);
497 phy_exit(dra7xx->phy[phy_count]);
498 }
499 }
500
dra7xx_pcie_enable_phy(struct dra7xx_pcie * dra7xx)501 static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx)
502 {
503 int phy_count = dra7xx->phy_count;
504 int ret;
505 int i;
506
507 for (i = 0; i < phy_count; i++) {
508 ret = phy_init(dra7xx->phy[i]);
509 if (ret < 0)
510 goto err_phy;
511
512 ret = phy_power_on(dra7xx->phy[i]);
513 if (ret < 0) {
514 phy_exit(dra7xx->phy[i]);
515 goto err_phy;
516 }
517 }
518
519 return 0;
520
521 err_phy:
522 while (--i >= 0) {
523 phy_power_off(dra7xx->phy[i]);
524 phy_exit(dra7xx->phy[i]);
525 }
526
527 return ret;
528 }
529
530 static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = {
531 .mode = DW_PCIE_RC_TYPE,
532 };
533
534 static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = {
535 .mode = DW_PCIE_EP_TYPE,
536 };
537
538 static const struct of_device_id of_dra7xx_pcie_match[] = {
539 {
540 .compatible = "ti,dra7-pcie",
541 .data = &dra7xx_pcie_rc_of_data,
542 },
543 {
544 .compatible = "ti,dra7-pcie-ep",
545 .data = &dra7xx_pcie_ep_of_data,
546 },
547 {},
548 };
549
550 /*
551 * dra7xx_pcie_unaligned_memaccess: workaround for AM572x/AM571x Errata i870
552 * @dra7xx: the dra7xx device where the workaround should be applied
553 *
554 * Access to the PCIe slave port that are not 32-bit aligned will result
555 * in incorrect mapping to TLP Address and Byte enable fields. Therefore,
556 * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or
557 * 0x3.
558 *
559 * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1.
560 */
dra7xx_pcie_unaligned_memaccess(struct device * dev)561 static int dra7xx_pcie_unaligned_memaccess(struct device *dev)
562 {
563 int ret;
564 struct device_node *np = dev->of_node;
565 struct of_phandle_args args;
566 struct regmap *regmap;
567
568 regmap = syscon_regmap_lookup_by_phandle(np,
569 "ti,syscon-unaligned-access");
570 if (IS_ERR(regmap)) {
571 dev_dbg(dev, "can't get ti,syscon-unaligned-access\n");
572 return -EINVAL;
573 }
574
575 ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access",
576 2, 0, &args);
577 if (ret) {
578 dev_err(dev, "failed to parse ti,syscon-unaligned-access\n");
579 return ret;
580 }
581
582 ret = regmap_update_bits(regmap, args.args[0], args.args[1],
583 args.args[1]);
584 if (ret)
585 dev_err(dev, "failed to enable unaligned access\n");
586
587 of_node_put(args.np);
588
589 return ret;
590 }
591
dra7xx_pcie_probe(struct platform_device * pdev)592 static int __init dra7xx_pcie_probe(struct platform_device *pdev)
593 {
594 u32 reg;
595 int ret;
596 int irq;
597 int i;
598 int phy_count;
599 struct phy **phy;
600 struct device_link **link;
601 void __iomem *base;
602 struct resource *res;
603 struct dw_pcie *pci;
604 struct pcie_port *pp;
605 struct dra7xx_pcie *dra7xx;
606 struct device *dev = &pdev->dev;
607 struct device_node *np = dev->of_node;
608 char name[10];
609 struct gpio_desc *reset;
610 const struct of_device_id *match;
611 const struct dra7xx_pcie_of_data *data;
612 enum dw_pcie_device_mode mode;
613
614 match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev);
615 if (!match)
616 return -EINVAL;
617
618 data = (struct dra7xx_pcie_of_data *)match->data;
619 mode = (enum dw_pcie_device_mode)data->mode;
620
621 dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL);
622 if (!dra7xx)
623 return -ENOMEM;
624
625 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
626 if (!pci)
627 return -ENOMEM;
628
629 pci->dev = dev;
630 pci->ops = &dw_pcie_ops;
631
632 pp = &pci->pp;
633 pp->ops = &dra7xx_pcie_host_ops;
634
635 irq = platform_get_irq(pdev, 0);
636 if (irq < 0) {
637 dev_err(dev, "missing IRQ resource: %d\n", irq);
638 return irq;
639 }
640
641 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf");
642 base = devm_ioremap_nocache(dev, res->start, resource_size(res));
643 if (!base)
644 return -ENOMEM;
645
646 phy_count = of_property_count_strings(np, "phy-names");
647 if (phy_count < 0) {
648 dev_err(dev, "unable to find the strings\n");
649 return phy_count;
650 }
651
652 phy = devm_kzalloc(dev, sizeof(*phy) * phy_count, GFP_KERNEL);
653 if (!phy)
654 return -ENOMEM;
655
656 link = devm_kzalloc(dev, sizeof(*link) * phy_count, GFP_KERNEL);
657 if (!link)
658 return -ENOMEM;
659
660 for (i = 0; i < phy_count; i++) {
661 snprintf(name, sizeof(name), "pcie-phy%d", i);
662 phy[i] = devm_phy_get(dev, name);
663 if (IS_ERR(phy[i]))
664 return PTR_ERR(phy[i]);
665
666 link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
667 if (!link[i]) {
668 ret = -EINVAL;
669 goto err_link;
670 }
671 }
672
673 dra7xx->base = base;
674 dra7xx->phy = phy;
675 dra7xx->pci = pci;
676 dra7xx->phy_count = phy_count;
677
678 ret = dra7xx_pcie_enable_phy(dra7xx);
679 if (ret) {
680 dev_err(dev, "failed to enable phy\n");
681 return ret;
682 }
683
684 platform_set_drvdata(pdev, dra7xx);
685
686 pm_runtime_enable(dev);
687 ret = pm_runtime_get_sync(dev);
688 if (ret < 0) {
689 dev_err(dev, "pm_runtime_get_sync failed\n");
690 goto err_get_sync;
691 }
692
693 reset = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH);
694 if (IS_ERR(reset)) {
695 ret = PTR_ERR(reset);
696 dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret);
697 goto err_gpio;
698 }
699
700 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD);
701 reg &= ~LTSSM_EN;
702 dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg);
703
704 dra7xx->link_gen = of_pci_get_max_link_speed(np);
705 if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2)
706 dra7xx->link_gen = 2;
707
708 switch (mode) {
709 case DW_PCIE_RC_TYPE:
710 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
711 DEVICE_TYPE_RC);
712
713 ret = dra7xx_pcie_unaligned_memaccess(dev);
714 if (ret)
715 dev_err(dev, "WA for Errata i870 not applied\n");
716
717 ret = dra7xx_add_pcie_port(dra7xx, pdev);
718 if (ret < 0)
719 goto err_gpio;
720 break;
721 case DW_PCIE_EP_TYPE:
722 dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
723 DEVICE_TYPE_EP);
724
725 ret = dra7xx_pcie_unaligned_memaccess(dev);
726 if (ret)
727 goto err_gpio;
728
729 ret = dra7xx_add_pcie_ep(dra7xx, pdev);
730 if (ret < 0)
731 goto err_gpio;
732 break;
733 default:
734 dev_err(dev, "INVALID device type %d\n", mode);
735 }
736 dra7xx->mode = mode;
737
738 ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler,
739 IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
740 if (ret) {
741 dev_err(dev, "failed to request irq\n");
742 goto err_gpio;
743 }
744
745 return 0;
746
747 err_gpio:
748 pm_runtime_put(dev);
749
750 err_get_sync:
751 pm_runtime_disable(dev);
752 dra7xx_pcie_disable_phy(dra7xx);
753
754 err_link:
755 while (--i >= 0)
756 device_link_del(link[i]);
757
758 return ret;
759 }
760
761 #ifdef CONFIG_PM_SLEEP
dra7xx_pcie_suspend(struct device * dev)762 static int dra7xx_pcie_suspend(struct device *dev)
763 {
764 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
765 struct dw_pcie *pci = dra7xx->pci;
766 u32 val;
767
768 if (dra7xx->mode != DW_PCIE_RC_TYPE)
769 return 0;
770
771 /* clear MSE */
772 val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
773 val &= ~PCI_COMMAND_MEMORY;
774 dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
775
776 return 0;
777 }
778
dra7xx_pcie_resume(struct device * dev)779 static int dra7xx_pcie_resume(struct device *dev)
780 {
781 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
782 struct dw_pcie *pci = dra7xx->pci;
783 u32 val;
784
785 if (dra7xx->mode != DW_PCIE_RC_TYPE)
786 return 0;
787
788 /* set MSE */
789 val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
790 val |= PCI_COMMAND_MEMORY;
791 dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
792
793 return 0;
794 }
795
dra7xx_pcie_suspend_noirq(struct device * dev)796 static int dra7xx_pcie_suspend_noirq(struct device *dev)
797 {
798 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
799
800 dra7xx_pcie_disable_phy(dra7xx);
801
802 return 0;
803 }
804
dra7xx_pcie_resume_noirq(struct device * dev)805 static int dra7xx_pcie_resume_noirq(struct device *dev)
806 {
807 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
808 int ret;
809
810 ret = dra7xx_pcie_enable_phy(dra7xx);
811 if (ret) {
812 dev_err(dev, "failed to enable phy\n");
813 return ret;
814 }
815
816 return 0;
817 }
818 #endif
819
dra7xx_pcie_shutdown(struct platform_device * pdev)820 void dra7xx_pcie_shutdown(struct platform_device *pdev)
821 {
822 struct device *dev = &pdev->dev;
823 struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);
824 int ret;
825
826 dra7xx_pcie_stop_link(dra7xx->pci);
827
828 ret = pm_runtime_put_sync(dev);
829 if (ret < 0)
830 dev_dbg(dev, "pm_runtime_put_sync failed\n");
831
832 pm_runtime_disable(dev);
833 dra7xx_pcie_disable_phy(dra7xx);
834 }
835
836 static const struct dev_pm_ops dra7xx_pcie_pm_ops = {
837 SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume)
838 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq,
839 dra7xx_pcie_resume_noirq)
840 };
841
842 static struct platform_driver dra7xx_pcie_driver = {
843 .driver = {
844 .name = "dra7-pcie",
845 .of_match_table = of_dra7xx_pcie_match,
846 .suppress_bind_attrs = true,
847 .pm = &dra7xx_pcie_pm_ops,
848 },
849 .shutdown = dra7xx_pcie_shutdown,
850 };
851 builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe);
852