1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Freescale i.MX6 PCI Express Root-Complex driver
4 *
5 * Copyright (C) 2013 Marek Vasut <marex@denx.de>
6 *
7 * Based on upstream Linux kernel driver:
8 * pci-imx6.c: Sean Cross <xobs@kosagi.com>
9 * pcie-designware.c: Jingoo Han <jg1.han@samsung.com>
10 */
11
12 #include <common.h>
13 #include <init.h>
14 #include <pci.h>
15 #include <asm/arch/clock.h>
16 #include <asm/arch/iomux.h>
17 #include <asm/arch/crm_regs.h>
18 #include <asm/gpio.h>
19 #include <asm/io.h>
20 #include <dm.h>
21 #include <linux/sizes.h>
22 #include <errno.h>
23 #include <asm/arch/sys_proto.h>
24
25 #define PCI_ACCESS_READ 0
26 #define PCI_ACCESS_WRITE 1
27
28 #ifdef CONFIG_MX6SX
29 #define MX6_DBI_ADDR 0x08ffc000
30 #define MX6_IO_ADDR 0x08000000
31 #define MX6_MEM_ADDR 0x08100000
32 #define MX6_ROOT_ADDR 0x08f00000
33 #else
34 #define MX6_DBI_ADDR 0x01ffc000
35 #define MX6_IO_ADDR 0x01000000
36 #define MX6_MEM_ADDR 0x01100000
37 #define MX6_ROOT_ADDR 0x01f00000
38 #endif
39 #define MX6_DBI_SIZE 0x4000
40 #define MX6_IO_SIZE 0x100000
41 #define MX6_MEM_SIZE 0xe00000
42 #define MX6_ROOT_SIZE 0xfc000
43
44 /* PCIe Port Logic registers (memory-mapped) */
45 #define PL_OFFSET 0x700
46 #define PCIE_PL_PFLR (PL_OFFSET + 0x08)
47 #define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16)
48 #define PCIE_PL_PFLR_FORCE_LINK (1 << 15)
49 #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
50 #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
51 #define PCIE_PHY_DEBUG_R1_LINK_UP (1 << 4)
52 #define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING (1 << 29)
53
54 #define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
55 #define PCIE_PHY_CTRL_DATA_LOC 0
56 #define PCIE_PHY_CTRL_CAP_ADR_LOC 16
57 #define PCIE_PHY_CTRL_CAP_DAT_LOC 17
58 #define PCIE_PHY_CTRL_WR_LOC 18
59 #define PCIE_PHY_CTRL_RD_LOC 19
60
61 #define PCIE_PHY_STAT (PL_OFFSET + 0x110)
62 #define PCIE_PHY_STAT_DATA_LOC 0
63 #define PCIE_PHY_STAT_ACK_LOC 16
64
65 /* PHY registers (not memory-mapped) */
66 #define PCIE_PHY_RX_ASIC_OUT 0x100D
67
68 #define PHY_RX_OVRD_IN_LO 0x1005
69 #define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5)
70 #define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3)
71
72 #define PCIE_PHY_PUP_REQ (1 << 7)
73
74 /* iATU registers */
75 #define PCIE_ATU_VIEWPORT 0x900
76 #define PCIE_ATU_REGION_INBOUND (0x1 << 31)
77 #define PCIE_ATU_REGION_OUTBOUND (0x0 << 31)
78 #define PCIE_ATU_REGION_INDEX1 (0x1 << 0)
79 #define PCIE_ATU_REGION_INDEX0 (0x0 << 0)
80 #define PCIE_ATU_CR1 0x904
81 #define PCIE_ATU_TYPE_MEM (0x0 << 0)
82 #define PCIE_ATU_TYPE_IO (0x2 << 0)
83 #define PCIE_ATU_TYPE_CFG0 (0x4 << 0)
84 #define PCIE_ATU_TYPE_CFG1 (0x5 << 0)
85 #define PCIE_ATU_CR2 0x908
86 #define PCIE_ATU_ENABLE (0x1 << 31)
87 #define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30)
88 #define PCIE_ATU_LOWER_BASE 0x90C
89 #define PCIE_ATU_UPPER_BASE 0x910
90 #define PCIE_ATU_LIMIT 0x914
91 #define PCIE_ATU_LOWER_TARGET 0x918
92 #define PCIE_ATU_BUS(x) (((x) & 0xff) << 24)
93 #define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19)
94 #define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
95 #define PCIE_ATU_UPPER_TARGET 0x91C
96
97 struct imx_pcie_priv {
98 void __iomem *dbi_base;
99 void __iomem *cfg_base;
100 };
101
102 /*
103 * PHY access functions
104 */
pcie_phy_poll_ack(void __iomem * dbi_base,int exp_val)105 static int pcie_phy_poll_ack(void __iomem *dbi_base, int exp_val)
106 {
107 u32 val;
108 u32 max_iterations = 10;
109 u32 wait_counter = 0;
110
111 do {
112 val = readl(dbi_base + PCIE_PHY_STAT);
113 val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1;
114 wait_counter++;
115
116 if (val == exp_val)
117 return 0;
118
119 udelay(1);
120 } while (wait_counter < max_iterations);
121
122 return -ETIMEDOUT;
123 }
124
pcie_phy_wait_ack(void __iomem * dbi_base,int addr)125 static int pcie_phy_wait_ack(void __iomem *dbi_base, int addr)
126 {
127 u32 val;
128 int ret;
129
130 val = addr << PCIE_PHY_CTRL_DATA_LOC;
131 writel(val, dbi_base + PCIE_PHY_CTRL);
132
133 val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC);
134 writel(val, dbi_base + PCIE_PHY_CTRL);
135
136 ret = pcie_phy_poll_ack(dbi_base, 1);
137 if (ret)
138 return ret;
139
140 val = addr << PCIE_PHY_CTRL_DATA_LOC;
141 writel(val, dbi_base + PCIE_PHY_CTRL);
142
143 ret = pcie_phy_poll_ack(dbi_base, 0);
144 if (ret)
145 return ret;
146
147 return 0;
148 }
149
150 /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */
pcie_phy_read(void __iomem * dbi_base,int addr,int * data)151 static int pcie_phy_read(void __iomem *dbi_base, int addr , int *data)
152 {
153 u32 val, phy_ctl;
154 int ret;
155
156 ret = pcie_phy_wait_ack(dbi_base, addr);
157 if (ret)
158 return ret;
159
160 /* assert Read signal */
161 phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC;
162 writel(phy_ctl, dbi_base + PCIE_PHY_CTRL);
163
164 ret = pcie_phy_poll_ack(dbi_base, 1);
165 if (ret)
166 return ret;
167
168 val = readl(dbi_base + PCIE_PHY_STAT);
169 *data = val & 0xffff;
170
171 /* deassert Read signal */
172 writel(0x00, dbi_base + PCIE_PHY_CTRL);
173
174 ret = pcie_phy_poll_ack(dbi_base, 0);
175 if (ret)
176 return ret;
177
178 return 0;
179 }
180
pcie_phy_write(void __iomem * dbi_base,int addr,int data)181 static int pcie_phy_write(void __iomem *dbi_base, int addr, int data)
182 {
183 u32 var;
184 int ret;
185
186 /* write addr */
187 /* cap addr */
188 ret = pcie_phy_wait_ack(dbi_base, addr);
189 if (ret)
190 return ret;
191
192 var = data << PCIE_PHY_CTRL_DATA_LOC;
193 writel(var, dbi_base + PCIE_PHY_CTRL);
194
195 /* capture data */
196 var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC);
197 writel(var, dbi_base + PCIE_PHY_CTRL);
198
199 ret = pcie_phy_poll_ack(dbi_base, 1);
200 if (ret)
201 return ret;
202
203 /* deassert cap data */
204 var = data << PCIE_PHY_CTRL_DATA_LOC;
205 writel(var, dbi_base + PCIE_PHY_CTRL);
206
207 /* wait for ack de-assertion */
208 ret = pcie_phy_poll_ack(dbi_base, 0);
209 if (ret)
210 return ret;
211
212 /* assert wr signal */
213 var = 0x1 << PCIE_PHY_CTRL_WR_LOC;
214 writel(var, dbi_base + PCIE_PHY_CTRL);
215
216 /* wait for ack */
217 ret = pcie_phy_poll_ack(dbi_base, 1);
218 if (ret)
219 return ret;
220
221 /* deassert wr signal */
222 var = data << PCIE_PHY_CTRL_DATA_LOC;
223 writel(var, dbi_base + PCIE_PHY_CTRL);
224
225 /* wait for ack de-assertion */
226 ret = pcie_phy_poll_ack(dbi_base, 0);
227 if (ret)
228 return ret;
229
230 writel(0x0, dbi_base + PCIE_PHY_CTRL);
231
232 return 0;
233 }
234
imx6_pcie_link_up(struct imx_pcie_priv * priv)235 static int imx6_pcie_link_up(struct imx_pcie_priv *priv)
236 {
237 u32 rc, ltssm;
238 int rx_valid, temp;
239
240 /* link is debug bit 36, debug register 1 starts at bit 32 */
241 rc = readl(priv->dbi_base + PCIE_PHY_DEBUG_R1);
242 if ((rc & PCIE_PHY_DEBUG_R1_LINK_UP) &&
243 !(rc & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING))
244 return -EAGAIN;
245
246 /*
247 * From L0, initiate MAC entry to gen2 if EP/RC supports gen2.
248 * Wait 2ms (LTSSM timeout is 24ms, PHY lock is ~5us in gen2).
249 * If (MAC/LTSSM.state == Recovery.RcvrLock)
250 * && (PHY/rx_valid==0) then pulse PHY/rx_reset. Transition
251 * to gen2 is stuck
252 */
253 pcie_phy_read(priv->dbi_base, PCIE_PHY_RX_ASIC_OUT, &rx_valid);
254 ltssm = readl(priv->dbi_base + PCIE_PHY_DEBUG_R0) & 0x3F;
255
256 if (rx_valid & 0x01)
257 return 0;
258
259 if (ltssm != 0x0d)
260 return 0;
261
262 printf("transition to gen2 is stuck, reset PHY!\n");
263
264 pcie_phy_read(priv->dbi_base, PHY_RX_OVRD_IN_LO, &temp);
265 temp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | PHY_RX_OVRD_IN_LO_RX_PLL_EN);
266 pcie_phy_write(priv->dbi_base, PHY_RX_OVRD_IN_LO, temp);
267
268 udelay(3000);
269
270 pcie_phy_read(priv->dbi_base, PHY_RX_OVRD_IN_LO, &temp);
271 temp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | PHY_RX_OVRD_IN_LO_RX_PLL_EN);
272 pcie_phy_write(priv->dbi_base, PHY_RX_OVRD_IN_LO, temp);
273
274 return 0;
275 }
276
277 /*
278 * iATU region setup
279 */
imx_pcie_regions_setup(struct imx_pcie_priv * priv)280 static int imx_pcie_regions_setup(struct imx_pcie_priv *priv)
281 {
282 /*
283 * i.MX6 defines 16MB in the AXI address map for PCIe.
284 *
285 * That address space excepted the pcie registers is
286 * split and defined into different regions by iATU,
287 * with sizes and offsets as follows:
288 *
289 * 0x0100_0000 --- 0x010F_FFFF 1MB IORESOURCE_IO
290 * 0x0110_0000 --- 0x01EF_FFFF 14MB IORESOURCE_MEM
291 * 0x01F0_0000 --- 0x01FF_FFFF 1MB Cfg + Registers
292 */
293
294 /* CMD reg:I/O space, MEM space, and Bus Master Enable */
295 setbits_le32(priv->dbi_base + PCI_COMMAND,
296 PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
297
298 /* Set the CLASS_REV of RC CFG header to PCI_CLASS_BRIDGE_PCI */
299 setbits_le32(priv->dbi_base + PCI_CLASS_REVISION,
300 PCI_CLASS_BRIDGE_PCI << 16);
301
302 /* Region #0 is used for Outbound CFG space access. */
303 writel(0, priv->dbi_base + PCIE_ATU_VIEWPORT);
304
305 writel(lower_32_bits((uintptr_t)priv->cfg_base),
306 priv->dbi_base + PCIE_ATU_LOWER_BASE);
307 writel(upper_32_bits((uintptr_t)priv->cfg_base),
308 priv->dbi_base + PCIE_ATU_UPPER_BASE);
309 writel(lower_32_bits((uintptr_t)priv->cfg_base + MX6_ROOT_SIZE),
310 priv->dbi_base + PCIE_ATU_LIMIT);
311
312 writel(0, priv->dbi_base + PCIE_ATU_LOWER_TARGET);
313 writel(0, priv->dbi_base + PCIE_ATU_UPPER_TARGET);
314 writel(PCIE_ATU_TYPE_CFG0, priv->dbi_base + PCIE_ATU_CR1);
315 writel(PCIE_ATU_ENABLE, priv->dbi_base + PCIE_ATU_CR2);
316
317 return 0;
318 }
319
320 /*
321 * PCI Express accessors
322 */
get_bus_address(struct imx_pcie_priv * priv,pci_dev_t d,int where)323 static void __iomem *get_bus_address(struct imx_pcie_priv *priv,
324 pci_dev_t d, int where)
325 {
326 void __iomem *va_address;
327
328 /* Reconfigure Region #0 */
329 writel(0, priv->dbi_base + PCIE_ATU_VIEWPORT);
330
331 if (PCI_BUS(d) < 2)
332 writel(PCIE_ATU_TYPE_CFG0, priv->dbi_base + PCIE_ATU_CR1);
333 else
334 writel(PCIE_ATU_TYPE_CFG1, priv->dbi_base + PCIE_ATU_CR1);
335
336 if (PCI_BUS(d) == 0) {
337 va_address = priv->dbi_base;
338 } else {
339 writel(d << 8, priv->dbi_base + PCIE_ATU_LOWER_TARGET);
340 va_address = priv->cfg_base;
341 }
342
343 va_address += (where & ~0x3);
344
345 return va_address;
346 }
347
imx_pcie_addr_valid(pci_dev_t d)348 static int imx_pcie_addr_valid(pci_dev_t d)
349 {
350 if ((PCI_BUS(d) == 0) && (PCI_DEV(d) > 1))
351 return -EINVAL;
352 if ((PCI_BUS(d) == 1) && (PCI_DEV(d) > 0))
353 return -EINVAL;
354 return 0;
355 }
356
357 /*
358 * Replace the original ARM DABT handler with a simple jump-back one.
359 *
360 * The problem here is that if we have a PCIe bridge attached to this PCIe
361 * controller, but no PCIe device is connected to the bridges' downstream
362 * port, the attempt to read/write from/to the config space will produce
363 * a DABT. This is a behavior of the controller and can not be disabled
364 * unfortuatelly.
365 *
366 * To work around the problem, we backup the current DABT handler address
367 * and replace it with our own DABT handler, which only bounces right back
368 * into the code.
369 */
imx_pcie_fix_dabt_handler(bool set)370 static void imx_pcie_fix_dabt_handler(bool set)
371 {
372 extern uint32_t *_data_abort;
373 uint32_t *data_abort_addr = (uint32_t *)&_data_abort;
374
375 static const uint32_t data_abort_bounce_handler = 0xe25ef004;
376 uint32_t data_abort_bounce_addr = (uint32_t)&data_abort_bounce_handler;
377
378 static uint32_t data_abort_backup;
379
380 if (set) {
381 data_abort_backup = *data_abort_addr;
382 *data_abort_addr = data_abort_bounce_addr;
383 } else {
384 *data_abort_addr = data_abort_backup;
385 }
386 }
387
imx_pcie_read_cfg(struct imx_pcie_priv * priv,pci_dev_t d,int where,u32 * val)388 static int imx_pcie_read_cfg(struct imx_pcie_priv *priv, pci_dev_t d,
389 int where, u32 *val)
390 {
391 void __iomem *va_address;
392 int ret;
393
394 ret = imx_pcie_addr_valid(d);
395 if (ret) {
396 *val = 0xffffffff;
397 return 0;
398 }
399
400 va_address = get_bus_address(priv, d, where);
401
402 /*
403 * Read the PCIe config space. We must replace the DABT handler
404 * here in case we got data abort from the PCIe controller, see
405 * imx_pcie_fix_dabt_handler() description. Note that writing the
406 * "val" with valid value is also imperative here as in case we
407 * did got DABT, the val would contain random value.
408 */
409 imx_pcie_fix_dabt_handler(true);
410 writel(0xffffffff, val);
411 *val = readl(va_address);
412 imx_pcie_fix_dabt_handler(false);
413
414 return 0;
415 }
416
imx_pcie_write_cfg(struct imx_pcie_priv * priv,pci_dev_t d,int where,u32 val)417 static int imx_pcie_write_cfg(struct imx_pcie_priv *priv, pci_dev_t d,
418 int where, u32 val)
419 {
420 void __iomem *va_address = NULL;
421 int ret;
422
423 ret = imx_pcie_addr_valid(d);
424 if (ret)
425 return ret;
426
427 va_address = get_bus_address(priv, d, where);
428
429 /*
430 * Write the PCIe config space. We must replace the DABT handler
431 * here in case we got data abort from the PCIe controller, see
432 * imx_pcie_fix_dabt_handler() description.
433 */
434 imx_pcie_fix_dabt_handler(true);
435 writel(val, va_address);
436 imx_pcie_fix_dabt_handler(false);
437
438 return 0;
439 }
440
441 /*
442 * Initial bus setup
443 */
imx6_pcie_assert_core_reset(struct imx_pcie_priv * priv,bool prepare_for_boot)444 static int imx6_pcie_assert_core_reset(struct imx_pcie_priv *priv,
445 bool prepare_for_boot)
446 {
447 struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR;
448
449 if (is_mx6dqp())
450 setbits_le32(&iomuxc_regs->gpr[1], IOMUXC_GPR1_PCIE_SW_RST);
451
452 #if defined(CONFIG_MX6SX)
453 struct gpc *gpc_regs = (struct gpc *)GPC_BASE_ADDR;
454
455 /* SSP_EN is not used on MX6SX anymore */
456 setbits_le32(&iomuxc_regs->gpr[12], IOMUXC_GPR12_TEST_POWERDOWN);
457 /* Force PCIe PHY reset */
458 setbits_le32(&iomuxc_regs->gpr[5], IOMUXC_GPR5_PCIE_BTNRST);
459 /* Power up PCIe PHY */
460 setbits_le32(&gpc_regs->cntr, PCIE_PHY_PUP_REQ);
461 #else
462 /*
463 * If the bootloader already enabled the link we need some special
464 * handling to get the core back into a state where it is safe to
465 * touch it for configuration. As there is no dedicated reset signal
466 * wired up for MX6QDL, we need to manually force LTSSM into "detect"
467 * state before completely disabling LTSSM, which is a prerequisite
468 * for core configuration.
469 *
470 * If both LTSSM_ENABLE and REF_SSP_ENABLE are active we have a strong
471 * indication that the bootloader activated the link.
472 */
473 if (is_mx6dq() && prepare_for_boot) {
474 u32 val, gpr1, gpr12;
475
476 gpr1 = readl(&iomuxc_regs->gpr[1]);
477 gpr12 = readl(&iomuxc_regs->gpr[12]);
478 if ((gpr1 & IOMUXC_GPR1_PCIE_REF_CLK_EN) &&
479 (gpr12 & IOMUXC_GPR12_PCIE_CTL_2)) {
480 val = readl(priv->dbi_base + PCIE_PL_PFLR);
481 val &= ~PCIE_PL_PFLR_LINK_STATE_MASK;
482 val |= PCIE_PL_PFLR_FORCE_LINK;
483
484 imx_pcie_fix_dabt_handler(true);
485 writel(val, priv->dbi_base + PCIE_PL_PFLR);
486 imx_pcie_fix_dabt_handler(false);
487
488 gpr12 &= ~IOMUXC_GPR12_PCIE_CTL_2;
489 writel(val, &iomuxc_regs->gpr[12]);
490 }
491 }
492 setbits_le32(&iomuxc_regs->gpr[1], IOMUXC_GPR1_TEST_POWERDOWN);
493 clrbits_le32(&iomuxc_regs->gpr[1], IOMUXC_GPR1_REF_SSP_EN);
494 #endif
495
496 return 0;
497 }
498
imx6_pcie_init_phy(void)499 static int imx6_pcie_init_phy(void)
500 {
501 struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR;
502
503 clrbits_le32(&iomuxc_regs->gpr[12], IOMUXC_GPR12_APPS_LTSSM_ENABLE);
504
505 clrsetbits_le32(&iomuxc_regs->gpr[12],
506 IOMUXC_GPR12_DEVICE_TYPE_MASK,
507 IOMUXC_GPR12_DEVICE_TYPE_RC);
508 clrsetbits_le32(&iomuxc_regs->gpr[12],
509 IOMUXC_GPR12_LOS_LEVEL_MASK,
510 IOMUXC_GPR12_LOS_LEVEL_9);
511
512 #ifdef CONFIG_MX6SX
513 clrsetbits_le32(&iomuxc_regs->gpr[12],
514 IOMUXC_GPR12_RX_EQ_MASK,
515 IOMUXC_GPR12_RX_EQ_2);
516 #endif
517
518 writel((0x0 << IOMUXC_GPR8_PCS_TX_DEEMPH_GEN1_OFFSET) |
519 (0x0 << IOMUXC_GPR8_PCS_TX_DEEMPH_GEN2_3P5DB_OFFSET) |
520 (20 << IOMUXC_GPR8_PCS_TX_DEEMPH_GEN2_6DB_OFFSET) |
521 (127 << IOMUXC_GPR8_PCS_TX_SWING_FULL_OFFSET) |
522 (127 << IOMUXC_GPR8_PCS_TX_SWING_LOW_OFFSET),
523 &iomuxc_regs->gpr[8]);
524
525 return 0;
526 }
527
imx6_pcie_toggle_power(void)528 __weak int imx6_pcie_toggle_power(void)
529 {
530 #ifdef CONFIG_PCIE_IMX_POWER_GPIO
531 gpio_request(CONFIG_PCIE_IMX_POWER_GPIO, "pcie_power");
532 gpio_direction_output(CONFIG_PCIE_IMX_POWER_GPIO, 0);
533 mdelay(20);
534 gpio_set_value(CONFIG_PCIE_IMX_POWER_GPIO, 1);
535 mdelay(20);
536 gpio_free(CONFIG_PCIE_IMX_POWER_GPIO);
537 #endif
538 return 0;
539 }
540
imx6_pcie_toggle_reset(void)541 __weak int imx6_pcie_toggle_reset(void)
542 {
543 /*
544 * See 'PCI EXPRESS BASE SPECIFICATION, REV 3.0, SECTION 6.6.1'
545 * for detailed understanding of the PCIe CR reset logic.
546 *
547 * The PCIe #PERST reset line _MUST_ be connected, otherwise your
548 * design does not conform to the specification. You must wait at
549 * least 20 ms after de-asserting the #PERST so the EP device can
550 * do self-initialisation.
551 *
552 * In case your #PERST pin is connected to a plain GPIO pin of the
553 * CPU, you can define CONFIG_PCIE_IMX_PERST_GPIO in your board's
554 * configuration file and the condition below will handle the rest
555 * of the reset toggling.
556 *
557 * In case your #PERST toggling logic is more complex, for example
558 * connected via CPLD or somesuch, you can override this function
559 * in your board file and implement reset logic as needed. You must
560 * not forget to wait at least 20 ms after de-asserting #PERST in
561 * this case either though.
562 *
563 * In case your #PERST line of the PCIe EP device is not connected
564 * at all, your design is broken and you should fix your design,
565 * otherwise you will observe problems like for example the link
566 * not coming up after rebooting the system back from running Linux
567 * that uses the PCIe as well OR the PCIe link might not come up in
568 * Linux at all in the first place since it's in some non-reset
569 * state due to being previously used in U-Boot.
570 */
571 #ifdef CONFIG_PCIE_IMX_PERST_GPIO
572 gpio_request(CONFIG_PCIE_IMX_PERST_GPIO, "pcie_reset");
573 gpio_direction_output(CONFIG_PCIE_IMX_PERST_GPIO, 0);
574 mdelay(20);
575 gpio_set_value(CONFIG_PCIE_IMX_PERST_GPIO, 1);
576 mdelay(20);
577 gpio_free(CONFIG_PCIE_IMX_PERST_GPIO);
578 #else
579 puts("WARNING: Make sure the PCIe #PERST line is connected!\n");
580 #endif
581 return 0;
582 }
583
imx6_pcie_deassert_core_reset(void)584 static int imx6_pcie_deassert_core_reset(void)
585 {
586 struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR;
587
588 imx6_pcie_toggle_power();
589
590 enable_pcie_clock();
591
592 if (is_mx6dqp())
593 clrbits_le32(&iomuxc_regs->gpr[1], IOMUXC_GPR1_PCIE_SW_RST);
594
595 /*
596 * Wait for the clock to settle a bit, when the clock are sourced
597 * from the CPU, we need about 30 ms to settle.
598 */
599 mdelay(50);
600
601 #if defined(CONFIG_MX6SX)
602 /* SSP_EN is not used on MX6SX anymore */
603 clrbits_le32(&iomuxc_regs->gpr[12], IOMUXC_GPR12_TEST_POWERDOWN);
604 /* Clear PCIe PHY reset bit */
605 clrbits_le32(&iomuxc_regs->gpr[5], IOMUXC_GPR5_PCIE_BTNRST);
606 #else
607 /* Enable PCIe */
608 clrbits_le32(&iomuxc_regs->gpr[1], IOMUXC_GPR1_TEST_POWERDOWN);
609 setbits_le32(&iomuxc_regs->gpr[1], IOMUXC_GPR1_REF_SSP_EN);
610 #endif
611
612 imx6_pcie_toggle_reset();
613
614 return 0;
615 }
616
imx_pcie_link_up(struct imx_pcie_priv * priv)617 static int imx_pcie_link_up(struct imx_pcie_priv *priv)
618 {
619 struct iomuxc *iomuxc_regs = (struct iomuxc *)IOMUXC_BASE_ADDR;
620 uint32_t tmp;
621 int count = 0;
622
623 imx6_pcie_assert_core_reset(priv, false);
624 imx6_pcie_init_phy();
625 imx6_pcie_deassert_core_reset();
626
627 imx_pcie_regions_setup(priv);
628
629 /*
630 * By default, the subordinate is set equally to the secondary
631 * bus (0x01) when the RC boots.
632 * This means that theoretically, only bus 1 is reachable from the RC.
633 * Force the PCIe RC subordinate to 0xff, otherwise no downstream
634 * devices will be detected if the enumeration is applied strictly.
635 */
636 tmp = readl(priv->dbi_base + 0x18);
637 tmp |= (0xff << 16);
638 writel(tmp, priv->dbi_base + 0x18);
639
640 /*
641 * FIXME: Force the PCIe RC to Gen1 operation
642 * The RC must be forced into Gen1 mode before bringing the link
643 * up, otherwise no downstream devices are detected. After the
644 * link is up, a managed Gen1->Gen2 transition can be initiated.
645 */
646 tmp = readl(priv->dbi_base + 0x7c);
647 tmp &= ~0xf;
648 tmp |= 0x1;
649 writel(tmp, priv->dbi_base + 0x7c);
650
651 /* LTSSM enable, starting link. */
652 setbits_le32(&iomuxc_regs->gpr[12], IOMUXC_GPR12_APPS_LTSSM_ENABLE);
653
654 while (!imx6_pcie_link_up(priv)) {
655 udelay(10);
656 count++;
657 if (count >= 4000) {
658 #ifdef CONFIG_PCI_SCAN_SHOW
659 puts("PCI: pcie phy link never came up\n");
660 #endif
661 debug("DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
662 readl(priv->dbi_base + PCIE_PHY_DEBUG_R0),
663 readl(priv->dbi_base + PCIE_PHY_DEBUG_R1));
664 return -EINVAL;
665 }
666 }
667
668 return 0;
669 }
670
671 #if !CONFIG_IS_ENABLED(DM_PCI)
672 static struct imx_pcie_priv imx_pcie_priv = {
673 .dbi_base = (void __iomem *)MX6_DBI_ADDR,
674 .cfg_base = (void __iomem *)MX6_ROOT_ADDR,
675 };
676
677 static struct imx_pcie_priv *priv = &imx_pcie_priv;
678
imx_pcie_read_config(struct pci_controller * hose,pci_dev_t d,int where,u32 * val)679 static int imx_pcie_read_config(struct pci_controller *hose, pci_dev_t d,
680 int where, u32 *val)
681 {
682 struct imx_pcie_priv *priv = hose->priv_data;
683
684 return imx_pcie_read_cfg(priv, d, where, val);
685 }
686
imx_pcie_write_config(struct pci_controller * hose,pci_dev_t d,int where,u32 val)687 static int imx_pcie_write_config(struct pci_controller *hose, pci_dev_t d,
688 int where, u32 val)
689 {
690 struct imx_pcie_priv *priv = hose->priv_data;
691
692 return imx_pcie_write_cfg(priv, d, where, val);
693 }
694
imx_pcie_init(void)695 void imx_pcie_init(void)
696 {
697 /* Static instance of the controller. */
698 static struct pci_controller pcc;
699 struct pci_controller *hose = &pcc;
700 int ret;
701
702 memset(&pcc, 0, sizeof(pcc));
703
704 hose->priv_data = priv;
705
706 /* PCI I/O space */
707 pci_set_region(&hose->regions[0],
708 MX6_IO_ADDR, MX6_IO_ADDR,
709 MX6_IO_SIZE, PCI_REGION_IO);
710
711 /* PCI memory space */
712 pci_set_region(&hose->regions[1],
713 MX6_MEM_ADDR, MX6_MEM_ADDR,
714 MX6_MEM_SIZE, PCI_REGION_MEM);
715
716 /* System memory space */
717 pci_set_region(&hose->regions[2],
718 MMDC0_ARB_BASE_ADDR, MMDC0_ARB_BASE_ADDR,
719 0xefffffff, PCI_REGION_MEM | PCI_REGION_SYS_MEMORY);
720
721 hose->region_count = 3;
722
723 pci_set_ops(hose,
724 pci_hose_read_config_byte_via_dword,
725 pci_hose_read_config_word_via_dword,
726 imx_pcie_read_config,
727 pci_hose_write_config_byte_via_dword,
728 pci_hose_write_config_word_via_dword,
729 imx_pcie_write_config);
730
731 /* Start the controller. */
732 ret = imx_pcie_link_up(priv);
733
734 if (!ret) {
735 pci_register_hose(hose);
736 hose->last_busno = pci_hose_scan(hose);
737 }
738 }
739
imx_pcie_remove(void)740 void imx_pcie_remove(void)
741 {
742 imx6_pcie_assert_core_reset(priv, true);
743 }
744
745 /* Probe function. */
pci_init_board(void)746 void pci_init_board(void)
747 {
748 imx_pcie_init();
749 }
750 #else
imx_pcie_dm_read_config(struct udevice * dev,pci_dev_t bdf,uint offset,ulong * value,enum pci_size_t size)751 static int imx_pcie_dm_read_config(struct udevice *dev, pci_dev_t bdf,
752 uint offset, ulong *value,
753 enum pci_size_t size)
754 {
755 struct imx_pcie_priv *priv = dev_get_priv(dev);
756 u32 tmpval;
757 int ret;
758
759 ret = imx_pcie_read_cfg(priv, bdf, offset, &tmpval);
760 if (ret)
761 return ret;
762
763 *value = pci_conv_32_to_size(tmpval, offset, size);
764 return 0;
765 }
766
imx_pcie_dm_write_config(struct udevice * dev,pci_dev_t bdf,uint offset,ulong value,enum pci_size_t size)767 static int imx_pcie_dm_write_config(struct udevice *dev, pci_dev_t bdf,
768 uint offset, ulong value,
769 enum pci_size_t size)
770 {
771 struct imx_pcie_priv *priv = dev_get_priv(dev);
772 u32 tmpval, newval;
773 int ret;
774
775 ret = imx_pcie_read_cfg(priv, bdf, offset, &tmpval);
776 if (ret)
777 return ret;
778
779 newval = pci_conv_size_to_32(tmpval, value, offset, size);
780 return imx_pcie_write_cfg(priv, bdf, offset, newval);
781 }
782
imx_pcie_dm_probe(struct udevice * dev)783 static int imx_pcie_dm_probe(struct udevice *dev)
784 {
785 struct imx_pcie_priv *priv = dev_get_priv(dev);
786
787 return imx_pcie_link_up(priv);
788 }
789
imx_pcie_dm_remove(struct udevice * dev)790 static int imx_pcie_dm_remove(struct udevice *dev)
791 {
792 struct imx_pcie_priv *priv = dev_get_priv(dev);
793
794 imx6_pcie_assert_core_reset(priv, true);
795
796 return 0;
797 }
798
imx_pcie_ofdata_to_platdata(struct udevice * dev)799 static int imx_pcie_ofdata_to_platdata(struct udevice *dev)
800 {
801 struct imx_pcie_priv *priv = dev_get_priv(dev);
802
803 priv->dbi_base = (void __iomem *)devfdt_get_addr_index(dev, 0);
804 priv->cfg_base = (void __iomem *)devfdt_get_addr_index(dev, 1);
805 if (!priv->dbi_base || !priv->cfg_base)
806 return -EINVAL;
807
808 return 0;
809 }
810
811 static const struct dm_pci_ops imx_pcie_ops = {
812 .read_config = imx_pcie_dm_read_config,
813 .write_config = imx_pcie_dm_write_config,
814 };
815
816 static const struct udevice_id imx_pcie_ids[] = {
817 { .compatible = "fsl,imx6q-pcie" },
818 { }
819 };
820
821 U_BOOT_DRIVER(imx_pcie) = {
822 .name = "imx_pcie",
823 .id = UCLASS_PCI,
824 .of_match = imx_pcie_ids,
825 .ops = &imx_pcie_ops,
826 .probe = imx_pcie_dm_probe,
827 .remove = imx_pcie_dm_remove,
828 .ofdata_to_platdata = imx_pcie_ofdata_to_platdata,
829 .priv_auto_alloc_size = sizeof(struct imx_pcie_priv),
830 .flags = DM_FLAG_OS_PREPARE,
831 };
832 #endif
833