1 /*
2 * PCIe host controller driver for Freescale i.MX6 SoCs
3 *
4 * Copyright (C) 2013 Kosagi
5 * http://www.kosagi.com
6 *
7 * Author: Sean Cross <xobs@kosagi.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14 #include <linux/clk.h>
15 #include <linux/delay.h>
16 #include <linux/gpio.h>
17 #include <linux/kernel.h>
18 #include <linux/mfd/syscon.h>
19 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
20 #include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
21 #include <linux/module.h>
22 #include <linux/of_gpio.h>
23 #include <linux/of_device.h>
24 #include <linux/pci.h>
25 #include <linux/platform_device.h>
26 #include <linux/regmap.h>
27 #include <linux/regulator/consumer.h>
28 #include <linux/resource.h>
29 #include <linux/signal.h>
30 #include <linux/types.h>
31 #include <linux/interrupt.h>
32 #include <linux/reset.h>
33
34 #include "pcie-designware.h"
35
36 #define to_imx6_pcie(x) dev_get_drvdata((x)->dev)
37
38 enum imx6_pcie_variants {
39 IMX6Q,
40 IMX6SX,
41 IMX6QP,
42 IMX7D,
43 };
44
45 struct imx6_pcie {
46 struct dw_pcie *pci;
47 int reset_gpio;
48 bool gpio_active_high;
49 struct clk *pcie_bus;
50 struct clk *pcie_phy;
51 struct clk *pcie_inbound_axi;
52 struct clk *pcie;
53 struct regmap *iomuxc_gpr;
54 struct reset_control *pciephy_reset;
55 struct reset_control *apps_reset;
56 enum imx6_pcie_variants variant;
57 u32 tx_deemph_gen1;
58 u32 tx_deemph_gen2_3p5db;
59 u32 tx_deemph_gen2_6db;
60 u32 tx_swing_full;
61 u32 tx_swing_low;
62 int link_gen;
63 struct regulator *vpcie;
64 };
65
66 /* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */
67 #define PHY_PLL_LOCK_WAIT_MAX_RETRIES 2000
68 #define PHY_PLL_LOCK_WAIT_USLEEP_MIN 50
69 #define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200
70
71 /* PCIe Root Complex registers (memory-mapped) */
72 #define PCIE_RC_LCR 0x7c
73 #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1
74 #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2
75 #define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf
76
77 #define PCIE_RC_LCSR 0x80
78
79 /* PCIe Port Logic registers (memory-mapped) */
80 #define PL_OFFSET 0x700
81 #define PCIE_PL_PFLR (PL_OFFSET + 0x08)
82 #define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16)
83 #define PCIE_PL_PFLR_FORCE_LINK (1 << 15)
84 #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
85 #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
86
87 #define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
88 #define PCIE_PHY_CTRL_DATA_LOC 0
89 #define PCIE_PHY_CTRL_CAP_ADR_LOC 16
90 #define PCIE_PHY_CTRL_CAP_DAT_LOC 17
91 #define PCIE_PHY_CTRL_WR_LOC 18
92 #define PCIE_PHY_CTRL_RD_LOC 19
93
94 #define PCIE_PHY_STAT (PL_OFFSET + 0x110)
95 #define PCIE_PHY_STAT_ACK_LOC 16
96
97 #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
98 #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
99
100 /* PHY registers (not memory-mapped) */
101 #define PCIE_PHY_RX_ASIC_OUT 0x100D
102 #define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0)
103
104 #define PHY_RX_OVRD_IN_LO 0x1005
105 #define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
106 #define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
107
pcie_phy_poll_ack(struct imx6_pcie * imx6_pcie,int exp_val)108 static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, int exp_val)
109 {
110 struct dw_pcie *pci = imx6_pcie->pci;
111 u32 val;
112 u32 max_iterations = 10;
113 u32 wait_counter = 0;
114
115 do {
116 val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
117 val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
118 wait_counter++;
119
120 if (val == exp_val)
121 return 0;
122
123 udelay(1);
124 } while (wait_counter < max_iterations);
125
126 return -ETIMEDOUT;
127 }
128
pcie_phy_wait_ack(struct imx6_pcie * imx6_pcie,int addr)129 static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr)
130 {
131 struct dw_pcie *pci = imx6_pcie->pci;
132 u32 val;
133 int ret;
134
135 val = addr << PCIE_PHY_CTRL_DATA_LOC;
136 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
137
138 val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
139 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
140
141 ret = pcie_phy_poll_ack(imx6_pcie, 1);
142 if (ret)
143 return ret;
144
145 val = addr << PCIE_PHY_CTRL_DATA_LOC;
146 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val);
147
148 return pcie_phy_poll_ack(imx6_pcie, 0);
149 }
150
151 /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
pcie_phy_read(struct imx6_pcie * imx6_pcie,int addr,int * data)152 static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, int *data)
153 {
154 struct dw_pcie *pci = imx6_pcie->pci;
155 u32 val, phy_ctl;
156 int ret;
157
158 ret = pcie_phy_wait_ack(imx6_pcie, addr);
159 if (ret)
160 return ret;
161
162 /* assert Read signal */
163 phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
164 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl);
165
166 ret = pcie_phy_poll_ack(imx6_pcie, 1);
167 if (ret)
168 return ret;
169
170 val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT);
171 *data = val & 0xffff;
172
173 /* deassert Read signal */
174 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00);
175
176 return pcie_phy_poll_ack(imx6_pcie, 0);
177 }
178
pcie_phy_write(struct imx6_pcie * imx6_pcie,int addr,int data)179 static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data)
180 {
181 struct dw_pcie *pci = imx6_pcie->pci;
182 u32 var;
183 int ret;
184
185 /* write addr */
186 /* cap addr */
187 ret = pcie_phy_wait_ack(imx6_pcie, addr);
188 if (ret)
189 return ret;
190
191 var = data << PCIE_PHY_CTRL_DATA_LOC;
192 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
193
194 /* capture data */
195 var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
196 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
197
198 ret = pcie_phy_poll_ack(imx6_pcie, 1);
199 if (ret)
200 return ret;
201
202 /* deassert cap data */
203 var = data << PCIE_PHY_CTRL_DATA_LOC;
204 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
205
206 /* wait for ack de-assertion */
207 ret = pcie_phy_poll_ack(imx6_pcie, 0);
208 if (ret)
209 return ret;
210
211 /* assert wr signal */
212 var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
213 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
214
215 /* wait for ack */
216 ret = pcie_phy_poll_ack(imx6_pcie, 1);
217 if (ret)
218 return ret;
219
220 /* deassert wr signal */
221 var = data << PCIE_PHY_CTRL_DATA_LOC;
222 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var);
223
224 /* wait for ack de-assertion */
225 ret = pcie_phy_poll_ack(imx6_pcie, 0);
226 if (ret)
227 return ret;
228
229 dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x0);
230
231 return 0;
232 }
233
imx6_pcie_reset_phy(struct imx6_pcie * imx6_pcie)234 static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie)
235 {
236 u32 tmp;
237
238 pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
239 tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
240 PHY_RX_OVRD_IN_LO_RX_PLL_EN);
241 pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
242
243 usleep_range(2000, 3000);
244
245 pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp);
246 tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
247 PHY_RX_OVRD_IN_LO_RX_PLL_EN);
248 pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp);
249 }
250
251 /* Added for PCI abort handling */
imx6q_pcie_abort_handler(unsigned long addr,unsigned int fsr,struct pt_regs * regs)252 static int imx6q_pcie_abort_handler(unsigned long addr,
253 unsigned int fsr, struct pt_regs *regs)
254 {
255 unsigned long pc = instruction_pointer(regs);
256 unsigned long instr = *(unsigned long *)pc;
257 int reg = (instr >> 12) & 15;
258
259 /*
260 * If the instruction being executed was a read,
261 * make it look like it read all-ones.
262 */
263 if ((instr & 0x0c100000) == 0x04100000) {
264 unsigned long val;
265
266 if (instr & 0x00400000)
267 val = 255;
268 else
269 val = -1;
270
271 regs->uregs[reg] = val;
272 regs->ARM_pc += 4;
273 return 0;
274 }
275
276 if ((instr & 0x0e100090) == 0x00100090) {
277 regs->uregs[reg] = -1;
278 regs->ARM_pc += 4;
279 return 0;
280 }
281
282 return 1;
283 }
284
imx6_pcie_assert_core_reset(struct imx6_pcie * imx6_pcie)285 static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
286 {
287 struct device *dev = imx6_pcie->pci->dev;
288
289 switch (imx6_pcie->variant) {
290 case IMX7D:
291 reset_control_assert(imx6_pcie->pciephy_reset);
292 reset_control_assert(imx6_pcie->apps_reset);
293 break;
294 case IMX6SX:
295 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
296 IMX6SX_GPR12_PCIE_TEST_POWERDOWN,
297 IMX6SX_GPR12_PCIE_TEST_POWERDOWN);
298 /* Force PCIe PHY reset */
299 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
300 IMX6SX_GPR5_PCIE_BTNRST_RESET,
301 IMX6SX_GPR5_PCIE_BTNRST_RESET);
302 break;
303 case IMX6QP:
304 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
305 IMX6Q_GPR1_PCIE_SW_RST,
306 IMX6Q_GPR1_PCIE_SW_RST);
307 break;
308 case IMX6Q:
309 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
310 IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
311 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
312 IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16);
313 break;
314 }
315
316 if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
317 int ret = regulator_disable(imx6_pcie->vpcie);
318
319 if (ret)
320 dev_err(dev, "failed to disable vpcie regulator: %d\n",
321 ret);
322 }
323 }
324
imx6_pcie_enable_ref_clk(struct imx6_pcie * imx6_pcie)325 static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
326 {
327 struct dw_pcie *pci = imx6_pcie->pci;
328 struct device *dev = pci->dev;
329 int ret = 0;
330
331 switch (imx6_pcie->variant) {
332 case IMX6SX:
333 ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi);
334 if (ret) {
335 dev_err(dev, "unable to enable pcie_axi clock\n");
336 break;
337 }
338
339 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
340 IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0);
341 break;
342 case IMX6QP: /* FALLTHROUGH */
343 case IMX6Q:
344 /* power up core phy and enable ref clock */
345 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
346 IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
347 /*
348 * the async reset input need ref clock to sync internally,
349 * when the ref clock comes after reset, internal synced
350 * reset time is too short, cannot meet the requirement.
351 * add one ~10us delay here.
352 */
353 udelay(10);
354 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
355 IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
356 break;
357 case IMX7D:
358 break;
359 }
360
361 return ret;
362 }
363
imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie * imx6_pcie)364 static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie)
365 {
366 u32 val;
367 unsigned int retries;
368 struct device *dev = imx6_pcie->pci->dev;
369
370 for (retries = 0; retries < PHY_PLL_LOCK_WAIT_MAX_RETRIES; retries++) {
371 regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR22, &val);
372
373 if (val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED)
374 return;
375
376 usleep_range(PHY_PLL_LOCK_WAIT_USLEEP_MIN,
377 PHY_PLL_LOCK_WAIT_USLEEP_MAX);
378 }
379
380 dev_err(dev, "PCIe PLL lock timeout\n");
381 }
382
imx6_pcie_deassert_core_reset(struct imx6_pcie * imx6_pcie)383 static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
384 {
385 struct dw_pcie *pci = imx6_pcie->pci;
386 struct device *dev = pci->dev;
387 int ret;
388
389 if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) {
390 ret = regulator_enable(imx6_pcie->vpcie);
391 if (ret) {
392 dev_err(dev, "failed to enable vpcie regulator: %d\n",
393 ret);
394 return;
395 }
396 }
397
398 ret = clk_prepare_enable(imx6_pcie->pcie_phy);
399 if (ret) {
400 dev_err(dev, "unable to enable pcie_phy clock\n");
401 goto err_pcie_phy;
402 }
403
404 ret = clk_prepare_enable(imx6_pcie->pcie_bus);
405 if (ret) {
406 dev_err(dev, "unable to enable pcie_bus clock\n");
407 goto err_pcie_bus;
408 }
409
410 ret = clk_prepare_enable(imx6_pcie->pcie);
411 if (ret) {
412 dev_err(dev, "unable to enable pcie clock\n");
413 goto err_pcie;
414 }
415
416 ret = imx6_pcie_enable_ref_clk(imx6_pcie);
417 if (ret) {
418 dev_err(dev, "unable to enable pcie ref clock\n");
419 goto err_ref_clk;
420 }
421
422 /* allow the clocks to stabilize */
423 usleep_range(200, 500);
424
425 /* Some boards don't have PCIe reset GPIO. */
426 if (gpio_is_valid(imx6_pcie->reset_gpio)) {
427 gpio_set_value_cansleep(imx6_pcie->reset_gpio,
428 imx6_pcie->gpio_active_high);
429 msleep(100);
430 gpio_set_value_cansleep(imx6_pcie->reset_gpio,
431 !imx6_pcie->gpio_active_high);
432 }
433
434 switch (imx6_pcie->variant) {
435 case IMX7D:
436 reset_control_deassert(imx6_pcie->pciephy_reset);
437 imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie);
438 break;
439 case IMX6SX:
440 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5,
441 IMX6SX_GPR5_PCIE_BTNRST_RESET, 0);
442 break;
443 case IMX6QP:
444 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
445 IMX6Q_GPR1_PCIE_SW_RST, 0);
446
447 usleep_range(200, 500);
448 break;
449 case IMX6Q: /* Nothing to do */
450 break;
451 }
452
453 return;
454
455 err_ref_clk:
456 clk_disable_unprepare(imx6_pcie->pcie);
457 err_pcie:
458 clk_disable_unprepare(imx6_pcie->pcie_bus);
459 err_pcie_bus:
460 clk_disable_unprepare(imx6_pcie->pcie_phy);
461 err_pcie_phy:
462 if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) {
463 ret = regulator_disable(imx6_pcie->vpcie);
464 if (ret)
465 dev_err(dev, "failed to disable vpcie regulator: %d\n",
466 ret);
467 }
468 }
469
imx6_pcie_init_phy(struct imx6_pcie * imx6_pcie)470 static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
471 {
472 switch (imx6_pcie->variant) {
473 case IMX7D:
474 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
475 IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0);
476 break;
477 case IMX6SX:
478 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
479 IMX6SX_GPR12_PCIE_RX_EQ_MASK,
480 IMX6SX_GPR12_PCIE_RX_EQ_2);
481 /* FALLTHROUGH */
482 default:
483 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
484 IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
485
486 /* configure constant input signal to the pcie ctrl and phy */
487 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
488 IMX6Q_GPR12_LOS_LEVEL, 9 << 4);
489
490 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
491 IMX6Q_GPR8_TX_DEEMPH_GEN1,
492 imx6_pcie->tx_deemph_gen1 << 0);
493 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
494 IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB,
495 imx6_pcie->tx_deemph_gen2_3p5db << 6);
496 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
497 IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB,
498 imx6_pcie->tx_deemph_gen2_6db << 12);
499 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
500 IMX6Q_GPR8_TX_SWING_FULL,
501 imx6_pcie->tx_swing_full << 18);
502 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8,
503 IMX6Q_GPR8_TX_SWING_LOW,
504 imx6_pcie->tx_swing_low << 25);
505 break;
506 }
507
508 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
509 IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
510 }
511
imx6_pcie_wait_for_link(struct imx6_pcie * imx6_pcie)512 static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie)
513 {
514 struct dw_pcie *pci = imx6_pcie->pci;
515 struct device *dev = pci->dev;
516
517 /* check if the link is up or not */
518 if (!dw_pcie_wait_for_link(pci))
519 return 0;
520
521 dev_dbg(dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
522 dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0),
523 dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1));
524 return -ETIMEDOUT;
525 }
526
imx6_pcie_wait_for_speed_change(struct imx6_pcie * imx6_pcie)527 static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
528 {
529 struct dw_pcie *pci = imx6_pcie->pci;
530 struct device *dev = pci->dev;
531 u32 tmp;
532 unsigned int retries;
533
534 for (retries = 0; retries < 200; retries++) {
535 tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
536 /* Test if the speed change finished. */
537 if (!(tmp & PORT_LOGIC_SPEED_CHANGE))
538 return 0;
539 usleep_range(100, 1000);
540 }
541
542 dev_err(dev, "Speed change timeout\n");
543 return -EINVAL;
544 }
545
imx6_pcie_msi_handler(int irq,void * arg)546 static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg)
547 {
548 struct imx6_pcie *imx6_pcie = arg;
549 struct dw_pcie *pci = imx6_pcie->pci;
550 struct pcie_port *pp = &pci->pp;
551
552 return dw_handle_msi_irq(pp);
553 }
554
imx6_pcie_establish_link(struct imx6_pcie * imx6_pcie)555 static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
556 {
557 struct dw_pcie *pci = imx6_pcie->pci;
558 struct device *dev = pci->dev;
559 u32 tmp;
560 int ret;
561
562 /*
563 * Force Gen1 operation when starting the link. In case the link is
564 * started in Gen2 mode, there is a possibility the devices on the
565 * bus will not be detected at all. This happens with PCIe switches.
566 */
567 tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR);
568 tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
569 tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1;
570 dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
571
572 /* Start LTSSM. */
573 if (imx6_pcie->variant == IMX7D)
574 reset_control_deassert(imx6_pcie->apps_reset);
575 else
576 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
577 IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
578
579 ret = imx6_pcie_wait_for_link(imx6_pcie);
580 if (ret)
581 goto err_reset_phy;
582
583 if (imx6_pcie->link_gen == 2) {
584 /* Allow Gen2 mode after the link is up. */
585 tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR);
586 tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK;
587 tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2;
588 dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
589
590 /*
591 * Start Directed Speed Change so the best possible
592 * speed both link partners support can be negotiated.
593 */
594 tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
595 tmp |= PORT_LOGIC_SPEED_CHANGE;
596 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp);
597
598 if (imx6_pcie->variant != IMX7D) {
599 /*
600 * On i.MX7, DIRECT_SPEED_CHANGE behaves differently
601 * from i.MX6 family when no link speed transition
602 * occurs and we go Gen1 -> yep, Gen1. The difference
603 * is that, in such case, it will not be cleared by HW
604 * which will cause the following code to report false
605 * failure.
606 */
607
608 ret = imx6_pcie_wait_for_speed_change(imx6_pcie);
609 if (ret) {
610 dev_err(dev, "Failed to bring link up!\n");
611 goto err_reset_phy;
612 }
613 }
614
615 /* Make sure link training is finished as well! */
616 ret = imx6_pcie_wait_for_link(imx6_pcie);
617 if (ret) {
618 dev_err(dev, "Failed to bring link up!\n");
619 goto err_reset_phy;
620 }
621 } else {
622 dev_info(dev, "Link: Gen2 disabled\n");
623 }
624
625 tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCSR);
626 dev_info(dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf);
627 return 0;
628
629 err_reset_phy:
630 dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
631 dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0),
632 dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1));
633 imx6_pcie_reset_phy(imx6_pcie);
634 return ret;
635 }
636
imx6_pcie_host_init(struct pcie_port * pp)637 static int imx6_pcie_host_init(struct pcie_port *pp)
638 {
639 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
640 struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci);
641
642 imx6_pcie_assert_core_reset(imx6_pcie);
643 imx6_pcie_init_phy(imx6_pcie);
644 imx6_pcie_deassert_core_reset(imx6_pcie);
645 dw_pcie_setup_rc(pp);
646 imx6_pcie_establish_link(imx6_pcie);
647
648 if (IS_ENABLED(CONFIG_PCI_MSI))
649 dw_pcie_msi_init(pp);
650
651 return 0;
652 }
653
654 static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
655 .host_init = imx6_pcie_host_init,
656 };
657
imx6_add_pcie_port(struct imx6_pcie * imx6_pcie,struct platform_device * pdev)658 static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
659 struct platform_device *pdev)
660 {
661 struct dw_pcie *pci = imx6_pcie->pci;
662 struct pcie_port *pp = &pci->pp;
663 struct device *dev = &pdev->dev;
664 int ret;
665
666 if (IS_ENABLED(CONFIG_PCI_MSI)) {
667 pp->msi_irq = platform_get_irq_byname(pdev, "msi");
668 if (pp->msi_irq <= 0) {
669 dev_err(dev, "failed to get MSI irq\n");
670 return -ENODEV;
671 }
672
673 ret = devm_request_irq(dev, pp->msi_irq,
674 imx6_pcie_msi_handler,
675 IRQF_SHARED | IRQF_NO_THREAD,
676 "mx6-pcie-msi", imx6_pcie);
677 if (ret) {
678 dev_err(dev, "failed to request MSI irq\n");
679 return ret;
680 }
681 }
682
683 pp->root_bus_nr = -1;
684 pp->ops = &imx6_pcie_host_ops;
685
686 ret = dw_pcie_host_init(pp);
687 if (ret) {
688 dev_err(dev, "failed to initialize host\n");
689 return ret;
690 }
691
692 return 0;
693 }
694
695 static const struct dw_pcie_ops dw_pcie_ops = {
696 /* No special ops needed, but pcie-designware still expects this struct */
697 };
698
imx6_pcie_probe(struct platform_device * pdev)699 static int imx6_pcie_probe(struct platform_device *pdev)
700 {
701 struct device *dev = &pdev->dev;
702 struct dw_pcie *pci;
703 struct imx6_pcie *imx6_pcie;
704 struct resource *dbi_base;
705 struct device_node *node = dev->of_node;
706 int ret;
707
708 imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
709 if (!imx6_pcie)
710 return -ENOMEM;
711
712 pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
713 if (!pci)
714 return -ENOMEM;
715
716 pci->dev = dev;
717 pci->ops = &dw_pcie_ops;
718
719 imx6_pcie->pci = pci;
720 imx6_pcie->variant =
721 (enum imx6_pcie_variants)of_device_get_match_data(dev);
722
723 dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
724 pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
725 if (IS_ERR(pci->dbi_base))
726 return PTR_ERR(pci->dbi_base);
727
728 /* Fetch GPIOs */
729 imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0);
730 imx6_pcie->gpio_active_high = of_property_read_bool(node,
731 "reset-gpio-active-high");
732 if (gpio_is_valid(imx6_pcie->reset_gpio)) {
733 ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio,
734 imx6_pcie->gpio_active_high ?
735 GPIOF_OUT_INIT_HIGH :
736 GPIOF_OUT_INIT_LOW,
737 "PCIe reset");
738 if (ret) {
739 dev_err(dev, "unable to get reset gpio\n");
740 return ret;
741 }
742 } else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) {
743 return imx6_pcie->reset_gpio;
744 }
745
746 /* Fetch clocks */
747 imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
748 if (IS_ERR(imx6_pcie->pcie_phy)) {
749 dev_err(dev, "pcie_phy clock source missing or invalid\n");
750 return PTR_ERR(imx6_pcie->pcie_phy);
751 }
752
753 imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus");
754 if (IS_ERR(imx6_pcie->pcie_bus)) {
755 dev_err(dev, "pcie_bus clock source missing or invalid\n");
756 return PTR_ERR(imx6_pcie->pcie_bus);
757 }
758
759 imx6_pcie->pcie = devm_clk_get(dev, "pcie");
760 if (IS_ERR(imx6_pcie->pcie)) {
761 dev_err(dev, "pcie clock source missing or invalid\n");
762 return PTR_ERR(imx6_pcie->pcie);
763 }
764
765 switch (imx6_pcie->variant) {
766 case IMX6SX:
767 imx6_pcie->pcie_inbound_axi = devm_clk_get(dev,
768 "pcie_inbound_axi");
769 if (IS_ERR(imx6_pcie->pcie_inbound_axi)) {
770 dev_err(dev, "pcie_inbound_axi clock missing or invalid\n");
771 return PTR_ERR(imx6_pcie->pcie_inbound_axi);
772 }
773 break;
774 case IMX7D:
775 imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev,
776 "pciephy");
777 if (IS_ERR(imx6_pcie->pciephy_reset)) {
778 dev_err(dev, "Failed to get PCIEPHY reset control\n");
779 return PTR_ERR(imx6_pcie->pciephy_reset);
780 }
781
782 imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev,
783 "apps");
784 if (IS_ERR(imx6_pcie->apps_reset)) {
785 dev_err(dev, "Failed to get PCIE APPS reset control\n");
786 return PTR_ERR(imx6_pcie->apps_reset);
787 }
788 break;
789 default:
790 break;
791 }
792
793 /* Grab GPR config register range */
794 imx6_pcie->iomuxc_gpr =
795 syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
796 if (IS_ERR(imx6_pcie->iomuxc_gpr)) {
797 dev_err(dev, "unable to find iomuxc registers\n");
798 return PTR_ERR(imx6_pcie->iomuxc_gpr);
799 }
800
801 /* Grab PCIe PHY Tx Settings */
802 if (of_property_read_u32(node, "fsl,tx-deemph-gen1",
803 &imx6_pcie->tx_deemph_gen1))
804 imx6_pcie->tx_deemph_gen1 = 0;
805
806 if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db",
807 &imx6_pcie->tx_deemph_gen2_3p5db))
808 imx6_pcie->tx_deemph_gen2_3p5db = 0;
809
810 if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db",
811 &imx6_pcie->tx_deemph_gen2_6db))
812 imx6_pcie->tx_deemph_gen2_6db = 20;
813
814 if (of_property_read_u32(node, "fsl,tx-swing-full",
815 &imx6_pcie->tx_swing_full))
816 imx6_pcie->tx_swing_full = 127;
817
818 if (of_property_read_u32(node, "fsl,tx-swing-low",
819 &imx6_pcie->tx_swing_low))
820 imx6_pcie->tx_swing_low = 127;
821
822 /* Limit link speed */
823 ret = of_property_read_u32(node, "fsl,max-link-speed",
824 &imx6_pcie->link_gen);
825 if (ret)
826 imx6_pcie->link_gen = 1;
827
828 imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie");
829 if (IS_ERR(imx6_pcie->vpcie)) {
830 if (PTR_ERR(imx6_pcie->vpcie) != -ENODEV)
831 return PTR_ERR(imx6_pcie->vpcie);
832 imx6_pcie->vpcie = NULL;
833 }
834
835 platform_set_drvdata(pdev, imx6_pcie);
836
837 ret = imx6_add_pcie_port(imx6_pcie, pdev);
838 if (ret < 0)
839 return ret;
840
841 return 0;
842 }
843
imx6_pcie_shutdown(struct platform_device * pdev)844 static void imx6_pcie_shutdown(struct platform_device *pdev)
845 {
846 struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
847
848 /* bring down link, so bootloader gets clean state in case of reboot */
849 imx6_pcie_assert_core_reset(imx6_pcie);
850 }
851
852 static const struct of_device_id imx6_pcie_of_match[] = {
853 { .compatible = "fsl,imx6q-pcie", .data = (void *)IMX6Q, },
854 { .compatible = "fsl,imx6sx-pcie", .data = (void *)IMX6SX, },
855 { .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, },
856 { .compatible = "fsl,imx7d-pcie", .data = (void *)IMX7D, },
857 {},
858 };
859
860 static struct platform_driver imx6_pcie_driver = {
861 .driver = {
862 .name = "imx6q-pcie",
863 .of_match_table = imx6_pcie_of_match,
864 .suppress_bind_attrs = true,
865 },
866 .probe = imx6_pcie_probe,
867 .shutdown = imx6_pcie_shutdown,
868 };
869
imx6_pcie_init(void)870 static int __init imx6_pcie_init(void)
871 {
872 /*
873 * Since probe() can be deferred we need to make sure that
874 * hook_fault_code is not called after __init memory is freed
875 * by kernel and since imx6q_pcie_abort_handler() is a no-op,
876 * we can install the handler here without risking it
877 * accessing some uninitialized driver state.
878 */
879 hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0,
880 "external abort on non-linefetch");
881
882 return platform_driver_register(&imx6_pcie_driver);
883 }
884 device_initcall(imx6_pcie_init);
885