1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * PCIe host controller driver for Tegra SoCs
4 *
5 * Copyright (c) 2010, CompuLab, Ltd.
6 * Author: Mike Rapoport <mike@compulab.co.il>
7 *
8 * Based on NVIDIA PCIe driver
9 * Copyright (c) 2008-2009, NVIDIA Corporation.
10 *
11 * Bits taken from arch/arm/mach-dove/pcie.c
12 *
13 * Author: Thierry Reding <treding@nvidia.com>
14 */
15
16 #include <linux/clk.h>
17 #include <linux/debugfs.h>
18 #include <linux/delay.h>
19 #include <linux/export.h>
20 #include <linux/gpio/consumer.h>
21 #include <linux/interrupt.h>
22 #include <linux/iopoll.h>
23 #include <linux/irq.h>
24 #include <linux/irqdomain.h>
25 #include <linux/kernel.h>
26 #include <linux/init.h>
27 #include <linux/module.h>
28 #include <linux/msi.h>
29 #include <linux/of_address.h>
30 #include <linux/of_pci.h>
31 #include <linux/of_platform.h>
32 #include <linux/pci.h>
33 #include <linux/phy/phy.h>
34 #include <linux/pinctrl/consumer.h>
35 #include <linux/platform_device.h>
36 #include <linux/reset.h>
37 #include <linux/sizes.h>
38 #include <linux/slab.h>
39 #include <linux/vmalloc.h>
40 #include <linux/regulator/consumer.h>
41
42 #include <soc/tegra/cpuidle.h>
43 #include <soc/tegra/pmc.h>
44
45 #include "../pci.h"
46
47 #define INT_PCI_MSI_NR (8 * 32)
48
49 /* register definitions */
50
51 #define AFI_AXI_BAR0_SZ 0x00
52 #define AFI_AXI_BAR1_SZ 0x04
53 #define AFI_AXI_BAR2_SZ 0x08
54 #define AFI_AXI_BAR3_SZ 0x0c
55 #define AFI_AXI_BAR4_SZ 0x10
56 #define AFI_AXI_BAR5_SZ 0x14
57
58 #define AFI_AXI_BAR0_START 0x18
59 #define AFI_AXI_BAR1_START 0x1c
60 #define AFI_AXI_BAR2_START 0x20
61 #define AFI_AXI_BAR3_START 0x24
62 #define AFI_AXI_BAR4_START 0x28
63 #define AFI_AXI_BAR5_START 0x2c
64
65 #define AFI_FPCI_BAR0 0x30
66 #define AFI_FPCI_BAR1 0x34
67 #define AFI_FPCI_BAR2 0x38
68 #define AFI_FPCI_BAR3 0x3c
69 #define AFI_FPCI_BAR4 0x40
70 #define AFI_FPCI_BAR5 0x44
71
72 #define AFI_CACHE_BAR0_SZ 0x48
73 #define AFI_CACHE_BAR0_ST 0x4c
74 #define AFI_CACHE_BAR1_SZ 0x50
75 #define AFI_CACHE_BAR1_ST 0x54
76
77 #define AFI_MSI_BAR_SZ 0x60
78 #define AFI_MSI_FPCI_BAR_ST 0x64
79 #define AFI_MSI_AXI_BAR_ST 0x68
80
81 #define AFI_MSI_VEC0 0x6c
82 #define AFI_MSI_VEC1 0x70
83 #define AFI_MSI_VEC2 0x74
84 #define AFI_MSI_VEC3 0x78
85 #define AFI_MSI_VEC4 0x7c
86 #define AFI_MSI_VEC5 0x80
87 #define AFI_MSI_VEC6 0x84
88 #define AFI_MSI_VEC7 0x88
89
90 #define AFI_MSI_EN_VEC0 0x8c
91 #define AFI_MSI_EN_VEC1 0x90
92 #define AFI_MSI_EN_VEC2 0x94
93 #define AFI_MSI_EN_VEC3 0x98
94 #define AFI_MSI_EN_VEC4 0x9c
95 #define AFI_MSI_EN_VEC5 0xa0
96 #define AFI_MSI_EN_VEC6 0xa4
97 #define AFI_MSI_EN_VEC7 0xa8
98
99 #define AFI_CONFIGURATION 0xac
100 #define AFI_CONFIGURATION_EN_FPCI (1 << 0)
101 #define AFI_CONFIGURATION_CLKEN_OVERRIDE (1 << 31)
102
103 #define AFI_FPCI_ERROR_MASKS 0xb0
104
105 #define AFI_INTR_MASK 0xb4
106 #define AFI_INTR_MASK_INT_MASK (1 << 0)
107 #define AFI_INTR_MASK_MSI_MASK (1 << 8)
108
109 #define AFI_INTR_CODE 0xb8
110 #define AFI_INTR_CODE_MASK 0xf
111 #define AFI_INTR_INI_SLAVE_ERROR 1
112 #define AFI_INTR_INI_DECODE_ERROR 2
113 #define AFI_INTR_TARGET_ABORT 3
114 #define AFI_INTR_MASTER_ABORT 4
115 #define AFI_INTR_INVALID_WRITE 5
116 #define AFI_INTR_LEGACY 6
117 #define AFI_INTR_FPCI_DECODE_ERROR 7
118 #define AFI_INTR_AXI_DECODE_ERROR 8
119 #define AFI_INTR_FPCI_TIMEOUT 9
120 #define AFI_INTR_PE_PRSNT_SENSE 10
121 #define AFI_INTR_PE_CLKREQ_SENSE 11
122 #define AFI_INTR_CLKCLAMP_SENSE 12
123 #define AFI_INTR_RDY4PD_SENSE 13
124 #define AFI_INTR_P2P_ERROR 14
125
126 #define AFI_INTR_SIGNATURE 0xbc
127 #define AFI_UPPER_FPCI_ADDRESS 0xc0
128 #define AFI_SM_INTR_ENABLE 0xc4
129 #define AFI_SM_INTR_INTA_ASSERT (1 << 0)
130 #define AFI_SM_INTR_INTB_ASSERT (1 << 1)
131 #define AFI_SM_INTR_INTC_ASSERT (1 << 2)
132 #define AFI_SM_INTR_INTD_ASSERT (1 << 3)
133 #define AFI_SM_INTR_INTA_DEASSERT (1 << 4)
134 #define AFI_SM_INTR_INTB_DEASSERT (1 << 5)
135 #define AFI_SM_INTR_INTC_DEASSERT (1 << 6)
136 #define AFI_SM_INTR_INTD_DEASSERT (1 << 7)
137
138 #define AFI_AFI_INTR_ENABLE 0xc8
139 #define AFI_INTR_EN_INI_SLVERR (1 << 0)
140 #define AFI_INTR_EN_INI_DECERR (1 << 1)
141 #define AFI_INTR_EN_TGT_SLVERR (1 << 2)
142 #define AFI_INTR_EN_TGT_DECERR (1 << 3)
143 #define AFI_INTR_EN_TGT_WRERR (1 << 4)
144 #define AFI_INTR_EN_DFPCI_DECERR (1 << 5)
145 #define AFI_INTR_EN_AXI_DECERR (1 << 6)
146 #define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7)
147 #define AFI_INTR_EN_PRSNT_SENSE (1 << 8)
148
149 #define AFI_PCIE_PME 0xf0
150
151 #define AFI_PCIE_CONFIG 0x0f8
152 #define AFI_PCIE_CONFIG_PCIE_DISABLE(x) (1 << ((x) + 1))
153 #define AFI_PCIE_CONFIG_PCIE_DISABLE_ALL 0xe
154 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
155 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
156 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20)
157 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1 (0x0 << 20)
158 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401 (0x0 << 20)
159 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
160 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20)
161 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1 (0x1 << 20)
162 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211 (0x1 << 20)
163 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
164 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111 (0x2 << 20)
165 #define AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(x) (1 << ((x) + 29))
166 #define AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL (0x7 << 29)
167
168 #define AFI_FUSE 0x104
169 #define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
170
171 #define AFI_PEX0_CTRL 0x110
172 #define AFI_PEX1_CTRL 0x118
173 #define AFI_PEX_CTRL_RST (1 << 0)
174 #define AFI_PEX_CTRL_CLKREQ_EN (1 << 1)
175 #define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
176 #define AFI_PEX_CTRL_OVERRIDE_EN (1 << 4)
177
178 #define AFI_PLLE_CONTROL 0x160
179 #define AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
180 #define AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
181
182 #define AFI_PEXBIAS_CTRL_0 0x168
183
184 #define RP_ECTL_2_R1 0x00000e84
185 #define RP_ECTL_2_R1_RX_CTLE_1C_MASK 0xffff
186
187 #define RP_ECTL_4_R1 0x00000e8c
188 #define RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK (0xffff << 16)
189 #define RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT 16
190
191 #define RP_ECTL_5_R1 0x00000e90
192 #define RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK 0xffffffff
193
194 #define RP_ECTL_6_R1 0x00000e94
195 #define RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK 0xffffffff
196
197 #define RP_ECTL_2_R2 0x00000ea4
198 #define RP_ECTL_2_R2_RX_CTLE_1C_MASK 0xffff
199
200 #define RP_ECTL_4_R2 0x00000eac
201 #define RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK (0xffff << 16)
202 #define RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT 16
203
204 #define RP_ECTL_5_R2 0x00000eb0
205 #define RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK 0xffffffff
206
207 #define RP_ECTL_6_R2 0x00000eb4
208 #define RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK 0xffffffff
209
210 #define RP_VEND_XP 0x00000f00
211 #define RP_VEND_XP_DL_UP (1 << 30)
212 #define RP_VEND_XP_OPPORTUNISTIC_ACK (1 << 27)
213 #define RP_VEND_XP_OPPORTUNISTIC_UPDATEFC (1 << 28)
214 #define RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK (0xff << 18)
215
216 #define RP_VEND_CTL0 0x00000f44
217 #define RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK (0xf << 12)
218 #define RP_VEND_CTL0_DSK_RST_PULSE_WIDTH (0x9 << 12)
219
220 #define RP_VEND_CTL1 0x00000f48
221 #define RP_VEND_CTL1_ERPT (1 << 13)
222
223 #define RP_VEND_XP_BIST 0x00000f4c
224 #define RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE (1 << 28)
225
226 #define RP_VEND_CTL2 0x00000fa8
227 #define RP_VEND_CTL2_PCA_ENABLE (1 << 7)
228
229 #define RP_PRIV_MISC 0x00000fe0
230 #define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0)
231 #define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0)
232 #define RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK (0x7f << 16)
233 #define RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD (0xf << 16)
234 #define RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE (1 << 23)
235 #define RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK (0x7f << 24)
236 #define RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD (0xf << 24)
237 #define RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE (1 << 31)
238
239 #define RP_LINK_CONTROL_STATUS 0x00000090
240 #define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
241 #define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
242
243 #define RP_LINK_CONTROL_STATUS_2 0x000000b0
244
245 #define PADS_CTL_SEL 0x0000009c
246
247 #define PADS_CTL 0x000000a0
248 #define PADS_CTL_IDDQ_1L (1 << 0)
249 #define PADS_CTL_TX_DATA_EN_1L (1 << 6)
250 #define PADS_CTL_RX_DATA_EN_1L (1 << 10)
251
252 #define PADS_PLL_CTL_TEGRA20 0x000000b8
253 #define PADS_PLL_CTL_TEGRA30 0x000000b4
254 #define PADS_PLL_CTL_RST_B4SM (1 << 1)
255 #define PADS_PLL_CTL_LOCKDET (1 << 8)
256 #define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16)
257 #define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16)
258 #define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16)
259 #define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16)
260 #define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20)
261 #define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20)
262 #define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20)
263 #define PADS_PLL_CTL_TXCLKREF_BUF_EN (1 << 22)
264
265 #define PADS_REFCLK_CFG0 0x000000c8
266 #define PADS_REFCLK_CFG1 0x000000cc
267 #define PADS_REFCLK_BIAS 0x000000d0
268
269 /*
270 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
271 * entries, one entry per PCIe port. These field definitions and desired
272 * values aren't in the TRM, but do come from NVIDIA.
273 */
274 #define PADS_REFCLK_CFG_TERM_SHIFT 2 /* 6:2 */
275 #define PADS_REFCLK_CFG_E_TERM_SHIFT 7
276 #define PADS_REFCLK_CFG_PREDI_SHIFT 8 /* 11:8 */
277 #define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */
278
279 #define PME_ACK_TIMEOUT 10000
280 #define LINK_RETRAIN_TIMEOUT 100000 /* in usec */
281
282 struct tegra_msi {
283 struct msi_controller chip;
284 DECLARE_BITMAP(used, INT_PCI_MSI_NR);
285 struct irq_domain *domain;
286 struct mutex lock;
287 void *virt;
288 dma_addr_t phys;
289 int irq;
290 };
291
292 /* used to differentiate between Tegra SoC generations */
293 struct tegra_pcie_port_soc {
294 struct {
295 u8 turnoff_bit;
296 u8 ack_bit;
297 } pme;
298 };
299
300 struct tegra_pcie_soc {
301 unsigned int num_ports;
302 const struct tegra_pcie_port_soc *ports;
303 unsigned int msi_base_shift;
304 unsigned long afi_pex2_ctrl;
305 u32 pads_pll_ctl;
306 u32 tx_ref_sel;
307 u32 pads_refclk_cfg0;
308 u32 pads_refclk_cfg1;
309 u32 update_fc_threshold;
310 bool has_pex_clkreq_en;
311 bool has_pex_bias_ctrl;
312 bool has_intr_prsnt_sense;
313 bool has_cml_clk;
314 bool has_gen2;
315 bool force_pca_enable;
316 bool program_uphy;
317 bool update_clamp_threshold;
318 bool program_deskew_time;
319 bool update_fc_timer;
320 bool has_cache_bars;
321 struct {
322 struct {
323 u32 rp_ectl_2_r1;
324 u32 rp_ectl_4_r1;
325 u32 rp_ectl_5_r1;
326 u32 rp_ectl_6_r1;
327 u32 rp_ectl_2_r2;
328 u32 rp_ectl_4_r2;
329 u32 rp_ectl_5_r2;
330 u32 rp_ectl_6_r2;
331 } regs;
332 bool enable;
333 } ectl;
334 };
335
to_tegra_msi(struct msi_controller * chip)336 static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip)
337 {
338 return container_of(chip, struct tegra_msi, chip);
339 }
340
341 struct tegra_pcie {
342 struct device *dev;
343
344 void __iomem *pads;
345 void __iomem *afi;
346 void __iomem *cfg;
347 int irq;
348
349 struct resource cs;
350
351 struct clk *pex_clk;
352 struct clk *afi_clk;
353 struct clk *pll_e;
354 struct clk *cml_clk;
355
356 struct reset_control *pex_rst;
357 struct reset_control *afi_rst;
358 struct reset_control *pcie_xrst;
359
360 bool legacy_phy;
361 struct phy *phy;
362
363 struct tegra_msi msi;
364
365 struct list_head ports;
366 u32 xbar_config;
367
368 struct regulator_bulk_data *supplies;
369 unsigned int num_supplies;
370
371 const struct tegra_pcie_soc *soc;
372 struct dentry *debugfs;
373 };
374
375 struct tegra_pcie_port {
376 struct tegra_pcie *pcie;
377 struct device_node *np;
378 struct list_head list;
379 struct resource regs;
380 void __iomem *base;
381 unsigned int index;
382 unsigned int lanes;
383
384 struct phy **phys;
385
386 struct gpio_desc *reset_gpio;
387 };
388
389 struct tegra_pcie_bus {
390 struct list_head list;
391 unsigned int nr;
392 };
393
afi_writel(struct tegra_pcie * pcie,u32 value,unsigned long offset)394 static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
395 unsigned long offset)
396 {
397 writel(value, pcie->afi + offset);
398 }
399
afi_readl(struct tegra_pcie * pcie,unsigned long offset)400 static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
401 {
402 return readl(pcie->afi + offset);
403 }
404
pads_writel(struct tegra_pcie * pcie,u32 value,unsigned long offset)405 static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
406 unsigned long offset)
407 {
408 writel(value, pcie->pads + offset);
409 }
410
pads_readl(struct tegra_pcie * pcie,unsigned long offset)411 static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
412 {
413 return readl(pcie->pads + offset);
414 }
415
416 /*
417 * The configuration space mapping on Tegra is somewhat similar to the ECAM
418 * defined by PCIe. However it deviates a bit in how the 4 bits for extended
419 * register accesses are mapped:
420 *
421 * [27:24] extended register number
422 * [23:16] bus number
423 * [15:11] device number
424 * [10: 8] function number
425 * [ 7: 0] register number
426 *
427 * Mapping the whole extended configuration space would require 256 MiB of
428 * virtual address space, only a small part of which will actually be used.
429 *
430 * To work around this, a 4 KiB region is used to generate the required
431 * configuration transaction with relevant B:D:F and register offset values.
432 * This is achieved by dynamically programming base address and size of
433 * AFI_AXI_BAR used for end point config space mapping to make sure that the
434 * address (access to which generates correct config transaction) falls in
435 * this 4 KiB region.
436 */
tegra_pcie_conf_offset(u8 bus,unsigned int devfn,unsigned int where)437 static unsigned int tegra_pcie_conf_offset(u8 bus, unsigned int devfn,
438 unsigned int where)
439 {
440 return ((where & 0xf00) << 16) | (bus << 16) | (PCI_SLOT(devfn) << 11) |
441 (PCI_FUNC(devfn) << 8) | (where & 0xff);
442 }
443
tegra_pcie_map_bus(struct pci_bus * bus,unsigned int devfn,int where)444 static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
445 unsigned int devfn,
446 int where)
447 {
448 struct tegra_pcie *pcie = bus->sysdata;
449 void __iomem *addr = NULL;
450
451 if (bus->number == 0) {
452 unsigned int slot = PCI_SLOT(devfn);
453 struct tegra_pcie_port *port;
454
455 list_for_each_entry(port, &pcie->ports, list) {
456 if (port->index + 1 == slot) {
457 addr = port->base + (where & ~3);
458 break;
459 }
460 }
461 } else {
462 unsigned int offset;
463 u32 base;
464
465 offset = tegra_pcie_conf_offset(bus->number, devfn, where);
466
467 /* move 4 KiB window to offset within the FPCI region */
468 base = 0xfe100000 + ((offset & ~(SZ_4K - 1)) >> 8);
469 afi_writel(pcie, base, AFI_FPCI_BAR0);
470
471 /* move to correct offset within the 4 KiB page */
472 addr = pcie->cfg + (offset & (SZ_4K - 1));
473 }
474
475 return addr;
476 }
477
tegra_pcie_config_read(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * value)478 static int tegra_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
479 int where, int size, u32 *value)
480 {
481 if (bus->number == 0)
482 return pci_generic_config_read32(bus, devfn, where, size,
483 value);
484
485 return pci_generic_config_read(bus, devfn, where, size, value);
486 }
487
tegra_pcie_config_write(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 value)488 static int tegra_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
489 int where, int size, u32 value)
490 {
491 if (bus->number == 0)
492 return pci_generic_config_write32(bus, devfn, where, size,
493 value);
494
495 return pci_generic_config_write(bus, devfn, where, size, value);
496 }
497
498 static struct pci_ops tegra_pcie_ops = {
499 .map_bus = tegra_pcie_map_bus,
500 .read = tegra_pcie_config_read,
501 .write = tegra_pcie_config_write,
502 };
503
tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port * port)504 static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
505 {
506 const struct tegra_pcie_soc *soc = port->pcie->soc;
507 unsigned long ret = 0;
508
509 switch (port->index) {
510 case 0:
511 ret = AFI_PEX0_CTRL;
512 break;
513
514 case 1:
515 ret = AFI_PEX1_CTRL;
516 break;
517
518 case 2:
519 ret = soc->afi_pex2_ctrl;
520 break;
521 }
522
523 return ret;
524 }
525
tegra_pcie_port_reset(struct tegra_pcie_port * port)526 static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
527 {
528 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
529 unsigned long value;
530
531 /* pulse reset signal */
532 if (port->reset_gpio) {
533 gpiod_set_value(port->reset_gpio, 1);
534 } else {
535 value = afi_readl(port->pcie, ctrl);
536 value &= ~AFI_PEX_CTRL_RST;
537 afi_writel(port->pcie, value, ctrl);
538 }
539
540 usleep_range(1000, 2000);
541
542 if (port->reset_gpio) {
543 gpiod_set_value(port->reset_gpio, 0);
544 } else {
545 value = afi_readl(port->pcie, ctrl);
546 value |= AFI_PEX_CTRL_RST;
547 afi_writel(port->pcie, value, ctrl);
548 }
549 }
550
tegra_pcie_enable_rp_features(struct tegra_pcie_port * port)551 static void tegra_pcie_enable_rp_features(struct tegra_pcie_port *port)
552 {
553 const struct tegra_pcie_soc *soc = port->pcie->soc;
554 u32 value;
555
556 /* Enable AER capability */
557 value = readl(port->base + RP_VEND_CTL1);
558 value |= RP_VEND_CTL1_ERPT;
559 writel(value, port->base + RP_VEND_CTL1);
560
561 /* Optimal settings to enhance bandwidth */
562 value = readl(port->base + RP_VEND_XP);
563 value |= RP_VEND_XP_OPPORTUNISTIC_ACK;
564 value |= RP_VEND_XP_OPPORTUNISTIC_UPDATEFC;
565 writel(value, port->base + RP_VEND_XP);
566
567 /*
568 * LTSSM will wait for DLLP to finish before entering L1 or L2,
569 * to avoid truncation of PM messages which results in receiver errors
570 */
571 value = readl(port->base + RP_VEND_XP_BIST);
572 value |= RP_VEND_XP_BIST_GOTO_L1_L2_AFTER_DLLP_DONE;
573 writel(value, port->base + RP_VEND_XP_BIST);
574
575 value = readl(port->base + RP_PRIV_MISC);
576 value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_ENABLE;
577 value |= RP_PRIV_MISC_TMS_CLK_CLAMP_ENABLE;
578
579 if (soc->update_clamp_threshold) {
580 value &= ~(RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD_MASK |
581 RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD_MASK);
582 value |= RP_PRIV_MISC_CTLR_CLK_CLAMP_THRESHOLD |
583 RP_PRIV_MISC_TMS_CLK_CLAMP_THRESHOLD;
584 }
585
586 writel(value, port->base + RP_PRIV_MISC);
587 }
588
tegra_pcie_program_ectl_settings(struct tegra_pcie_port * port)589 static void tegra_pcie_program_ectl_settings(struct tegra_pcie_port *port)
590 {
591 const struct tegra_pcie_soc *soc = port->pcie->soc;
592 u32 value;
593
594 value = readl(port->base + RP_ECTL_2_R1);
595 value &= ~RP_ECTL_2_R1_RX_CTLE_1C_MASK;
596 value |= soc->ectl.regs.rp_ectl_2_r1;
597 writel(value, port->base + RP_ECTL_2_R1);
598
599 value = readl(port->base + RP_ECTL_4_R1);
600 value &= ~RP_ECTL_4_R1_RX_CDR_CTRL_1C_MASK;
601 value |= soc->ectl.regs.rp_ectl_4_r1 <<
602 RP_ECTL_4_R1_RX_CDR_CTRL_1C_SHIFT;
603 writel(value, port->base + RP_ECTL_4_R1);
604
605 value = readl(port->base + RP_ECTL_5_R1);
606 value &= ~RP_ECTL_5_R1_RX_EQ_CTRL_L_1C_MASK;
607 value |= soc->ectl.regs.rp_ectl_5_r1;
608 writel(value, port->base + RP_ECTL_5_R1);
609
610 value = readl(port->base + RP_ECTL_6_R1);
611 value &= ~RP_ECTL_6_R1_RX_EQ_CTRL_H_1C_MASK;
612 value |= soc->ectl.regs.rp_ectl_6_r1;
613 writel(value, port->base + RP_ECTL_6_R1);
614
615 value = readl(port->base + RP_ECTL_2_R2);
616 value &= ~RP_ECTL_2_R2_RX_CTLE_1C_MASK;
617 value |= soc->ectl.regs.rp_ectl_2_r2;
618 writel(value, port->base + RP_ECTL_2_R2);
619
620 value = readl(port->base + RP_ECTL_4_R2);
621 value &= ~RP_ECTL_4_R2_RX_CDR_CTRL_1C_MASK;
622 value |= soc->ectl.regs.rp_ectl_4_r2 <<
623 RP_ECTL_4_R2_RX_CDR_CTRL_1C_SHIFT;
624 writel(value, port->base + RP_ECTL_4_R2);
625
626 value = readl(port->base + RP_ECTL_5_R2);
627 value &= ~RP_ECTL_5_R2_RX_EQ_CTRL_L_1C_MASK;
628 value |= soc->ectl.regs.rp_ectl_5_r2;
629 writel(value, port->base + RP_ECTL_5_R2);
630
631 value = readl(port->base + RP_ECTL_6_R2);
632 value &= ~RP_ECTL_6_R2_RX_EQ_CTRL_H_1C_MASK;
633 value |= soc->ectl.regs.rp_ectl_6_r2;
634 writel(value, port->base + RP_ECTL_6_R2);
635 }
636
tegra_pcie_apply_sw_fixup(struct tegra_pcie_port * port)637 static void tegra_pcie_apply_sw_fixup(struct tegra_pcie_port *port)
638 {
639 const struct tegra_pcie_soc *soc = port->pcie->soc;
640 u32 value;
641
642 /*
643 * Sometimes link speed change from Gen2 to Gen1 fails due to
644 * instability in deskew logic on lane-0. Increase the deskew
645 * retry time to resolve this issue.
646 */
647 if (soc->program_deskew_time) {
648 value = readl(port->base + RP_VEND_CTL0);
649 value &= ~RP_VEND_CTL0_DSK_RST_PULSE_WIDTH_MASK;
650 value |= RP_VEND_CTL0_DSK_RST_PULSE_WIDTH;
651 writel(value, port->base + RP_VEND_CTL0);
652 }
653
654 if (soc->update_fc_timer) {
655 value = readl(port->base + RP_VEND_XP);
656 value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK;
657 value |= soc->update_fc_threshold;
658 writel(value, port->base + RP_VEND_XP);
659 }
660
661 /*
662 * PCIe link doesn't come up with few legacy PCIe endpoints if
663 * root port advertises both Gen-1 and Gen-2 speeds in Tegra.
664 * Hence, the strategy followed here is to initially advertise
665 * only Gen-1 and after link is up, retrain link to Gen-2 speed
666 */
667 value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
668 value &= ~PCI_EXP_LNKSTA_CLS;
669 value |= PCI_EXP_LNKSTA_CLS_2_5GB;
670 writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
671 }
672
tegra_pcie_port_enable(struct tegra_pcie_port * port)673 static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
674 {
675 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
676 const struct tegra_pcie_soc *soc = port->pcie->soc;
677 unsigned long value;
678
679 /* enable reference clock */
680 value = afi_readl(port->pcie, ctrl);
681 value |= AFI_PEX_CTRL_REFCLK_EN;
682
683 if (soc->has_pex_clkreq_en)
684 value |= AFI_PEX_CTRL_CLKREQ_EN;
685
686 value |= AFI_PEX_CTRL_OVERRIDE_EN;
687
688 afi_writel(port->pcie, value, ctrl);
689
690 tegra_pcie_port_reset(port);
691
692 if (soc->force_pca_enable) {
693 value = readl(port->base + RP_VEND_CTL2);
694 value |= RP_VEND_CTL2_PCA_ENABLE;
695 writel(value, port->base + RP_VEND_CTL2);
696 }
697
698 tegra_pcie_enable_rp_features(port);
699
700 if (soc->ectl.enable)
701 tegra_pcie_program_ectl_settings(port);
702
703 tegra_pcie_apply_sw_fixup(port);
704 }
705
tegra_pcie_port_disable(struct tegra_pcie_port * port)706 static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
707 {
708 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
709 const struct tegra_pcie_soc *soc = port->pcie->soc;
710 unsigned long value;
711
712 /* assert port reset */
713 value = afi_readl(port->pcie, ctrl);
714 value &= ~AFI_PEX_CTRL_RST;
715 afi_writel(port->pcie, value, ctrl);
716
717 /* disable reference clock */
718 value = afi_readl(port->pcie, ctrl);
719
720 if (soc->has_pex_clkreq_en)
721 value &= ~AFI_PEX_CTRL_CLKREQ_EN;
722
723 value &= ~AFI_PEX_CTRL_REFCLK_EN;
724 afi_writel(port->pcie, value, ctrl);
725
726 /* disable PCIe port and set CLKREQ# as GPIO to allow PLLE power down */
727 value = afi_readl(port->pcie, AFI_PCIE_CONFIG);
728 value |= AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
729 value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
730 afi_writel(port->pcie, value, AFI_PCIE_CONFIG);
731 }
732
tegra_pcie_port_free(struct tegra_pcie_port * port)733 static void tegra_pcie_port_free(struct tegra_pcie_port *port)
734 {
735 struct tegra_pcie *pcie = port->pcie;
736 struct device *dev = pcie->dev;
737
738 devm_iounmap(dev, port->base);
739 devm_release_mem_region(dev, port->regs.start,
740 resource_size(&port->regs));
741 list_del(&port->list);
742 devm_kfree(dev, port);
743 }
744
745 /* Tegra PCIE root complex wrongly reports device class */
tegra_pcie_fixup_class(struct pci_dev * dev)746 static void tegra_pcie_fixup_class(struct pci_dev *dev)
747 {
748 dev->class = PCI_CLASS_BRIDGE_PCI << 8;
749 }
750 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
751 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
752 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
753 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
754
755 /* Tegra20 and Tegra30 PCIE requires relaxed ordering */
tegra_pcie_relax_enable(struct pci_dev * dev)756 static void tegra_pcie_relax_enable(struct pci_dev *dev)
757 {
758 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
759 }
760 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_relax_enable);
761 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_relax_enable);
762 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_relax_enable);
763 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_relax_enable);
764
tegra_pcie_map_irq(const struct pci_dev * pdev,u8 slot,u8 pin)765 static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
766 {
767 struct tegra_pcie *pcie = pdev->bus->sysdata;
768 int irq;
769
770 tegra_cpuidle_pcie_irqs_in_use();
771
772 irq = of_irq_parse_and_map_pci(pdev, slot, pin);
773 if (!irq)
774 irq = pcie->irq;
775
776 return irq;
777 }
778
tegra_pcie_isr(int irq,void * arg)779 static irqreturn_t tegra_pcie_isr(int irq, void *arg)
780 {
781 const char *err_msg[] = {
782 "Unknown",
783 "AXI slave error",
784 "AXI decode error",
785 "Target abort",
786 "Master abort",
787 "Invalid write",
788 "Legacy interrupt",
789 "Response decoding error",
790 "AXI response decoding error",
791 "Transaction timeout",
792 "Slot present pin change",
793 "Slot clock request change",
794 "TMS clock ramp change",
795 "TMS ready for power down",
796 "Peer2Peer error",
797 };
798 struct tegra_pcie *pcie = arg;
799 struct device *dev = pcie->dev;
800 u32 code, signature;
801
802 code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
803 signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
804 afi_writel(pcie, 0, AFI_INTR_CODE);
805
806 if (code == AFI_INTR_LEGACY)
807 return IRQ_NONE;
808
809 if (code >= ARRAY_SIZE(err_msg))
810 code = 0;
811
812 /*
813 * do not pollute kernel log with master abort reports since they
814 * happen a lot during enumeration
815 */
816 if (code == AFI_INTR_MASTER_ABORT || code == AFI_INTR_PE_PRSNT_SENSE)
817 dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature);
818 else
819 dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature);
820
821 if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
822 code == AFI_INTR_FPCI_DECODE_ERROR) {
823 u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
824 u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
825
826 if (code == AFI_INTR_MASTER_ABORT)
827 dev_dbg(dev, " FPCI address: %10llx\n", address);
828 else
829 dev_err(dev, " FPCI address: %10llx\n", address);
830 }
831
832 return IRQ_HANDLED;
833 }
834
835 /*
836 * FPCI map is as follows:
837 * - 0xfdfc000000: I/O space
838 * - 0xfdfe000000: type 0 configuration space
839 * - 0xfdff000000: type 1 configuration space
840 * - 0xfe00000000: type 0 extended configuration space
841 * - 0xfe10000000: type 1 extended configuration space
842 */
tegra_pcie_setup_translations(struct tegra_pcie * pcie)843 static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
844 {
845 u32 size;
846 struct resource_entry *entry;
847 struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
848
849 /* Bar 0: type 1 extended configuration space */
850 size = resource_size(&pcie->cs);
851 afi_writel(pcie, pcie->cs.start, AFI_AXI_BAR0_START);
852 afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
853
854 resource_list_for_each_entry(entry, &bridge->windows) {
855 u32 fpci_bar, axi_address;
856 struct resource *res = entry->res;
857
858 size = resource_size(res);
859
860 switch (resource_type(res)) {
861 case IORESOURCE_IO:
862 /* Bar 1: downstream IO bar */
863 fpci_bar = 0xfdfc0000;
864 axi_address = pci_pio_to_address(res->start);
865 afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
866 afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
867 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
868 break;
869 case IORESOURCE_MEM:
870 fpci_bar = (((res->start >> 12) & 0x0fffffff) << 4) | 0x1;
871 axi_address = res->start;
872
873 if (res->flags & IORESOURCE_PREFETCH) {
874 /* Bar 2: prefetchable memory BAR */
875 afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
876 afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
877 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
878
879 } else {
880 /* Bar 3: non prefetchable memory BAR */
881 afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
882 afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
883 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
884 }
885 break;
886 }
887 }
888
889 /* NULL out the remaining BARs as they are not used */
890 afi_writel(pcie, 0, AFI_AXI_BAR4_START);
891 afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
892 afi_writel(pcie, 0, AFI_FPCI_BAR4);
893
894 afi_writel(pcie, 0, AFI_AXI_BAR5_START);
895 afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
896 afi_writel(pcie, 0, AFI_FPCI_BAR5);
897
898 if (pcie->soc->has_cache_bars) {
899 /* map all upstream transactions as uncached */
900 afi_writel(pcie, 0, AFI_CACHE_BAR0_ST);
901 afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
902 afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
903 afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
904 }
905
906 /* MSI translations are setup only when needed */
907 afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
908 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
909 afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
910 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
911 }
912
tegra_pcie_pll_wait(struct tegra_pcie * pcie,unsigned long timeout)913 static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
914 {
915 const struct tegra_pcie_soc *soc = pcie->soc;
916 u32 value;
917
918 timeout = jiffies + msecs_to_jiffies(timeout);
919
920 while (time_before(jiffies, timeout)) {
921 value = pads_readl(pcie, soc->pads_pll_ctl);
922 if (value & PADS_PLL_CTL_LOCKDET)
923 return 0;
924 }
925
926 return -ETIMEDOUT;
927 }
928
tegra_pcie_phy_enable(struct tegra_pcie * pcie)929 static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
930 {
931 struct device *dev = pcie->dev;
932 const struct tegra_pcie_soc *soc = pcie->soc;
933 u32 value;
934 int err;
935
936 /* initialize internal PHY, enable up to 16 PCIE lanes */
937 pads_writel(pcie, 0x0, PADS_CTL_SEL);
938
939 /* override IDDQ to 1 on all 4 lanes */
940 value = pads_readl(pcie, PADS_CTL);
941 value |= PADS_CTL_IDDQ_1L;
942 pads_writel(pcie, value, PADS_CTL);
943
944 /*
945 * Set up PHY PLL inputs select PLLE output as refclock,
946 * set TX ref sel to div10 (not div5).
947 */
948 value = pads_readl(pcie, soc->pads_pll_ctl);
949 value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
950 value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
951 pads_writel(pcie, value, soc->pads_pll_ctl);
952
953 /* reset PLL */
954 value = pads_readl(pcie, soc->pads_pll_ctl);
955 value &= ~PADS_PLL_CTL_RST_B4SM;
956 pads_writel(pcie, value, soc->pads_pll_ctl);
957
958 usleep_range(20, 100);
959
960 /* take PLL out of reset */
961 value = pads_readl(pcie, soc->pads_pll_ctl);
962 value |= PADS_PLL_CTL_RST_B4SM;
963 pads_writel(pcie, value, soc->pads_pll_ctl);
964
965 /* wait for the PLL to lock */
966 err = tegra_pcie_pll_wait(pcie, 500);
967 if (err < 0) {
968 dev_err(dev, "PLL failed to lock: %d\n", err);
969 return err;
970 }
971
972 /* turn off IDDQ override */
973 value = pads_readl(pcie, PADS_CTL);
974 value &= ~PADS_CTL_IDDQ_1L;
975 pads_writel(pcie, value, PADS_CTL);
976
977 /* enable TX/RX data */
978 value = pads_readl(pcie, PADS_CTL);
979 value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
980 pads_writel(pcie, value, PADS_CTL);
981
982 return 0;
983 }
984
tegra_pcie_phy_disable(struct tegra_pcie * pcie)985 static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
986 {
987 const struct tegra_pcie_soc *soc = pcie->soc;
988 u32 value;
989
990 /* disable TX/RX data */
991 value = pads_readl(pcie, PADS_CTL);
992 value &= ~(PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L);
993 pads_writel(pcie, value, PADS_CTL);
994
995 /* override IDDQ */
996 value = pads_readl(pcie, PADS_CTL);
997 value |= PADS_CTL_IDDQ_1L;
998 pads_writel(pcie, value, PADS_CTL);
999
1000 /* reset PLL */
1001 value = pads_readl(pcie, soc->pads_pll_ctl);
1002 value &= ~PADS_PLL_CTL_RST_B4SM;
1003 pads_writel(pcie, value, soc->pads_pll_ctl);
1004
1005 usleep_range(20, 100);
1006
1007 return 0;
1008 }
1009
tegra_pcie_port_phy_power_on(struct tegra_pcie_port * port)1010 static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port)
1011 {
1012 struct device *dev = port->pcie->dev;
1013 unsigned int i;
1014 int err;
1015
1016 for (i = 0; i < port->lanes; i++) {
1017 err = phy_power_on(port->phys[i]);
1018 if (err < 0) {
1019 dev_err(dev, "failed to power on PHY#%u: %d\n", i, err);
1020 return err;
1021 }
1022 }
1023
1024 return 0;
1025 }
1026
tegra_pcie_port_phy_power_off(struct tegra_pcie_port * port)1027 static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
1028 {
1029 struct device *dev = port->pcie->dev;
1030 unsigned int i;
1031 int err;
1032
1033 for (i = 0; i < port->lanes; i++) {
1034 err = phy_power_off(port->phys[i]);
1035 if (err < 0) {
1036 dev_err(dev, "failed to power off PHY#%u: %d\n", i,
1037 err);
1038 return err;
1039 }
1040 }
1041
1042 return 0;
1043 }
1044
tegra_pcie_phy_power_on(struct tegra_pcie * pcie)1045 static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
1046 {
1047 struct device *dev = pcie->dev;
1048 struct tegra_pcie_port *port;
1049 int err;
1050
1051 if (pcie->legacy_phy) {
1052 if (pcie->phy)
1053 err = phy_power_on(pcie->phy);
1054 else
1055 err = tegra_pcie_phy_enable(pcie);
1056
1057 if (err < 0)
1058 dev_err(dev, "failed to power on PHY: %d\n", err);
1059
1060 return err;
1061 }
1062
1063 list_for_each_entry(port, &pcie->ports, list) {
1064 err = tegra_pcie_port_phy_power_on(port);
1065 if (err < 0) {
1066 dev_err(dev,
1067 "failed to power on PCIe port %u PHY: %d\n",
1068 port->index, err);
1069 return err;
1070 }
1071 }
1072
1073 return 0;
1074 }
1075
tegra_pcie_phy_power_off(struct tegra_pcie * pcie)1076 static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
1077 {
1078 struct device *dev = pcie->dev;
1079 struct tegra_pcie_port *port;
1080 int err;
1081
1082 if (pcie->legacy_phy) {
1083 if (pcie->phy)
1084 err = phy_power_off(pcie->phy);
1085 else
1086 err = tegra_pcie_phy_disable(pcie);
1087
1088 if (err < 0)
1089 dev_err(dev, "failed to power off PHY: %d\n", err);
1090
1091 return err;
1092 }
1093
1094 list_for_each_entry(port, &pcie->ports, list) {
1095 err = tegra_pcie_port_phy_power_off(port);
1096 if (err < 0) {
1097 dev_err(dev,
1098 "failed to power off PCIe port %u PHY: %d\n",
1099 port->index, err);
1100 return err;
1101 }
1102 }
1103
1104 return 0;
1105 }
1106
tegra_pcie_enable_controller(struct tegra_pcie * pcie)1107 static void tegra_pcie_enable_controller(struct tegra_pcie *pcie)
1108 {
1109 const struct tegra_pcie_soc *soc = pcie->soc;
1110 struct tegra_pcie_port *port;
1111 unsigned long value;
1112
1113 /* enable PLL power down */
1114 if (pcie->phy) {
1115 value = afi_readl(pcie, AFI_PLLE_CONTROL);
1116 value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
1117 value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
1118 afi_writel(pcie, value, AFI_PLLE_CONTROL);
1119 }
1120
1121 /* power down PCIe slot clock bias pad */
1122 if (soc->has_pex_bias_ctrl)
1123 afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
1124
1125 /* configure mode and disable all ports */
1126 value = afi_readl(pcie, AFI_PCIE_CONFIG);
1127 value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
1128 value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
1129 value |= AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO_ALL;
1130
1131 list_for_each_entry(port, &pcie->ports, list) {
1132 value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
1133 value &= ~AFI_PCIE_CONFIG_PCIE_CLKREQ_GPIO(port->index);
1134 }
1135
1136 afi_writel(pcie, value, AFI_PCIE_CONFIG);
1137
1138 if (soc->has_gen2) {
1139 value = afi_readl(pcie, AFI_FUSE);
1140 value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
1141 afi_writel(pcie, value, AFI_FUSE);
1142 } else {
1143 value = afi_readl(pcie, AFI_FUSE);
1144 value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
1145 afi_writel(pcie, value, AFI_FUSE);
1146 }
1147
1148 /* Disable AFI dynamic clock gating and enable PCIe */
1149 value = afi_readl(pcie, AFI_CONFIGURATION);
1150 value |= AFI_CONFIGURATION_EN_FPCI;
1151 value |= AFI_CONFIGURATION_CLKEN_OVERRIDE;
1152 afi_writel(pcie, value, AFI_CONFIGURATION);
1153
1154 value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
1155 AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
1156 AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
1157
1158 if (soc->has_intr_prsnt_sense)
1159 value |= AFI_INTR_EN_PRSNT_SENSE;
1160
1161 afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
1162 afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
1163
1164 /* don't enable MSI for now, only when needed */
1165 afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
1166
1167 /* disable all exceptions */
1168 afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
1169 }
1170
tegra_pcie_power_off(struct tegra_pcie * pcie)1171 static void tegra_pcie_power_off(struct tegra_pcie *pcie)
1172 {
1173 struct device *dev = pcie->dev;
1174 const struct tegra_pcie_soc *soc = pcie->soc;
1175 int err;
1176
1177 reset_control_assert(pcie->afi_rst);
1178
1179 clk_disable_unprepare(pcie->pll_e);
1180 if (soc->has_cml_clk)
1181 clk_disable_unprepare(pcie->cml_clk);
1182 clk_disable_unprepare(pcie->afi_clk);
1183
1184 if (!dev->pm_domain)
1185 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1186
1187 err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1188 if (err < 0)
1189 dev_warn(dev, "failed to disable regulators: %d\n", err);
1190 }
1191
tegra_pcie_power_on(struct tegra_pcie * pcie)1192 static int tegra_pcie_power_on(struct tegra_pcie *pcie)
1193 {
1194 struct device *dev = pcie->dev;
1195 const struct tegra_pcie_soc *soc = pcie->soc;
1196 int err;
1197
1198 reset_control_assert(pcie->pcie_xrst);
1199 reset_control_assert(pcie->afi_rst);
1200 reset_control_assert(pcie->pex_rst);
1201
1202 if (!dev->pm_domain)
1203 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1204
1205 /* enable regulators */
1206 err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
1207 if (err < 0)
1208 dev_err(dev, "failed to enable regulators: %d\n", err);
1209
1210 if (!dev->pm_domain) {
1211 err = tegra_powergate_power_on(TEGRA_POWERGATE_PCIE);
1212 if (err) {
1213 dev_err(dev, "failed to power ungate: %d\n", err);
1214 goto regulator_disable;
1215 }
1216 err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_PCIE);
1217 if (err) {
1218 dev_err(dev, "failed to remove clamp: %d\n", err);
1219 goto powergate;
1220 }
1221 }
1222
1223 err = clk_prepare_enable(pcie->afi_clk);
1224 if (err < 0) {
1225 dev_err(dev, "failed to enable AFI clock: %d\n", err);
1226 goto powergate;
1227 }
1228
1229 if (soc->has_cml_clk) {
1230 err = clk_prepare_enable(pcie->cml_clk);
1231 if (err < 0) {
1232 dev_err(dev, "failed to enable CML clock: %d\n", err);
1233 goto disable_afi_clk;
1234 }
1235 }
1236
1237 err = clk_prepare_enable(pcie->pll_e);
1238 if (err < 0) {
1239 dev_err(dev, "failed to enable PLLE clock: %d\n", err);
1240 goto disable_cml_clk;
1241 }
1242
1243 reset_control_deassert(pcie->afi_rst);
1244
1245 return 0;
1246
1247 disable_cml_clk:
1248 if (soc->has_cml_clk)
1249 clk_disable_unprepare(pcie->cml_clk);
1250 disable_afi_clk:
1251 clk_disable_unprepare(pcie->afi_clk);
1252 powergate:
1253 if (!dev->pm_domain)
1254 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1255 regulator_disable:
1256 regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1257
1258 return err;
1259 }
1260
tegra_pcie_apply_pad_settings(struct tegra_pcie * pcie)1261 static void tegra_pcie_apply_pad_settings(struct tegra_pcie *pcie)
1262 {
1263 const struct tegra_pcie_soc *soc = pcie->soc;
1264
1265 /* Configure the reference clock driver */
1266 pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0);
1267
1268 if (soc->num_ports > 2)
1269 pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1);
1270 }
1271
tegra_pcie_clocks_get(struct tegra_pcie * pcie)1272 static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
1273 {
1274 struct device *dev = pcie->dev;
1275 const struct tegra_pcie_soc *soc = pcie->soc;
1276
1277 pcie->pex_clk = devm_clk_get(dev, "pex");
1278 if (IS_ERR(pcie->pex_clk))
1279 return PTR_ERR(pcie->pex_clk);
1280
1281 pcie->afi_clk = devm_clk_get(dev, "afi");
1282 if (IS_ERR(pcie->afi_clk))
1283 return PTR_ERR(pcie->afi_clk);
1284
1285 pcie->pll_e = devm_clk_get(dev, "pll_e");
1286 if (IS_ERR(pcie->pll_e))
1287 return PTR_ERR(pcie->pll_e);
1288
1289 if (soc->has_cml_clk) {
1290 pcie->cml_clk = devm_clk_get(dev, "cml");
1291 if (IS_ERR(pcie->cml_clk))
1292 return PTR_ERR(pcie->cml_clk);
1293 }
1294
1295 return 0;
1296 }
1297
tegra_pcie_resets_get(struct tegra_pcie * pcie)1298 static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
1299 {
1300 struct device *dev = pcie->dev;
1301
1302 pcie->pex_rst = devm_reset_control_get_exclusive(dev, "pex");
1303 if (IS_ERR(pcie->pex_rst))
1304 return PTR_ERR(pcie->pex_rst);
1305
1306 pcie->afi_rst = devm_reset_control_get_exclusive(dev, "afi");
1307 if (IS_ERR(pcie->afi_rst))
1308 return PTR_ERR(pcie->afi_rst);
1309
1310 pcie->pcie_xrst = devm_reset_control_get_exclusive(dev, "pcie_x");
1311 if (IS_ERR(pcie->pcie_xrst))
1312 return PTR_ERR(pcie->pcie_xrst);
1313
1314 return 0;
1315 }
1316
tegra_pcie_phys_get_legacy(struct tegra_pcie * pcie)1317 static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie)
1318 {
1319 struct device *dev = pcie->dev;
1320 int err;
1321
1322 pcie->phy = devm_phy_optional_get(dev, "pcie");
1323 if (IS_ERR(pcie->phy)) {
1324 err = PTR_ERR(pcie->phy);
1325 dev_err(dev, "failed to get PHY: %d\n", err);
1326 return err;
1327 }
1328
1329 err = phy_init(pcie->phy);
1330 if (err < 0) {
1331 dev_err(dev, "failed to initialize PHY: %d\n", err);
1332 return err;
1333 }
1334
1335 pcie->legacy_phy = true;
1336
1337 return 0;
1338 }
1339
devm_of_phy_optional_get_index(struct device * dev,struct device_node * np,const char * consumer,unsigned int index)1340 static struct phy *devm_of_phy_optional_get_index(struct device *dev,
1341 struct device_node *np,
1342 const char *consumer,
1343 unsigned int index)
1344 {
1345 struct phy *phy;
1346 char *name;
1347
1348 name = kasprintf(GFP_KERNEL, "%s-%u", consumer, index);
1349 if (!name)
1350 return ERR_PTR(-ENOMEM);
1351
1352 phy = devm_of_phy_get(dev, np, name);
1353 kfree(name);
1354
1355 if (PTR_ERR(phy) == -ENODEV)
1356 phy = NULL;
1357
1358 return phy;
1359 }
1360
tegra_pcie_port_get_phys(struct tegra_pcie_port * port)1361 static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
1362 {
1363 struct device *dev = port->pcie->dev;
1364 struct phy *phy;
1365 unsigned int i;
1366 int err;
1367
1368 port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL);
1369 if (!port->phys)
1370 return -ENOMEM;
1371
1372 for (i = 0; i < port->lanes; i++) {
1373 phy = devm_of_phy_optional_get_index(dev, port->np, "pcie", i);
1374 if (IS_ERR(phy)) {
1375 dev_err(dev, "failed to get PHY#%u: %ld\n", i,
1376 PTR_ERR(phy));
1377 return PTR_ERR(phy);
1378 }
1379
1380 err = phy_init(phy);
1381 if (err < 0) {
1382 dev_err(dev, "failed to initialize PHY#%u: %d\n", i,
1383 err);
1384 return err;
1385 }
1386
1387 port->phys[i] = phy;
1388 }
1389
1390 return 0;
1391 }
1392
tegra_pcie_phys_get(struct tegra_pcie * pcie)1393 static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
1394 {
1395 const struct tegra_pcie_soc *soc = pcie->soc;
1396 struct device_node *np = pcie->dev->of_node;
1397 struct tegra_pcie_port *port;
1398 int err;
1399
1400 if (!soc->has_gen2 || of_find_property(np, "phys", NULL) != NULL)
1401 return tegra_pcie_phys_get_legacy(pcie);
1402
1403 list_for_each_entry(port, &pcie->ports, list) {
1404 err = tegra_pcie_port_get_phys(port);
1405 if (err < 0)
1406 return err;
1407 }
1408
1409 return 0;
1410 }
1411
tegra_pcie_phys_put(struct tegra_pcie * pcie)1412 static void tegra_pcie_phys_put(struct tegra_pcie *pcie)
1413 {
1414 struct tegra_pcie_port *port;
1415 struct device *dev = pcie->dev;
1416 int err, i;
1417
1418 if (pcie->legacy_phy) {
1419 err = phy_exit(pcie->phy);
1420 if (err < 0)
1421 dev_err(dev, "failed to teardown PHY: %d\n", err);
1422 return;
1423 }
1424
1425 list_for_each_entry(port, &pcie->ports, list) {
1426 for (i = 0; i < port->lanes; i++) {
1427 err = phy_exit(port->phys[i]);
1428 if (err < 0)
1429 dev_err(dev, "failed to teardown PHY#%u: %d\n",
1430 i, err);
1431 }
1432 }
1433 }
1434
1435
tegra_pcie_get_resources(struct tegra_pcie * pcie)1436 static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1437 {
1438 struct device *dev = pcie->dev;
1439 struct platform_device *pdev = to_platform_device(dev);
1440 struct resource *res;
1441 const struct tegra_pcie_soc *soc = pcie->soc;
1442 int err;
1443
1444 err = tegra_pcie_clocks_get(pcie);
1445 if (err) {
1446 dev_err(dev, "failed to get clocks: %d\n", err);
1447 return err;
1448 }
1449
1450 err = tegra_pcie_resets_get(pcie);
1451 if (err) {
1452 dev_err(dev, "failed to get resets: %d\n", err);
1453 return err;
1454 }
1455
1456 if (soc->program_uphy) {
1457 err = tegra_pcie_phys_get(pcie);
1458 if (err < 0) {
1459 dev_err(dev, "failed to get PHYs: %d\n", err);
1460 return err;
1461 }
1462 }
1463
1464 pcie->pads = devm_platform_ioremap_resource_byname(pdev, "pads");
1465 if (IS_ERR(pcie->pads)) {
1466 err = PTR_ERR(pcie->pads);
1467 goto phys_put;
1468 }
1469
1470 pcie->afi = devm_platform_ioremap_resource_byname(pdev, "afi");
1471 if (IS_ERR(pcie->afi)) {
1472 err = PTR_ERR(pcie->afi);
1473 goto phys_put;
1474 }
1475
1476 /* request configuration space, but remap later, on demand */
1477 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1478 if (!res) {
1479 err = -EADDRNOTAVAIL;
1480 goto phys_put;
1481 }
1482
1483 pcie->cs = *res;
1484
1485 /* constrain configuration space to 4 KiB */
1486 pcie->cs.end = pcie->cs.start + SZ_4K - 1;
1487
1488 pcie->cfg = devm_ioremap_resource(dev, &pcie->cs);
1489 if (IS_ERR(pcie->cfg)) {
1490 err = PTR_ERR(pcie->cfg);
1491 goto phys_put;
1492 }
1493
1494 /* request interrupt */
1495 err = platform_get_irq_byname(pdev, "intr");
1496 if (err < 0)
1497 goto phys_put;
1498
1499 pcie->irq = err;
1500
1501 err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1502 if (err) {
1503 dev_err(dev, "failed to register IRQ: %d\n", err);
1504 goto phys_put;
1505 }
1506
1507 return 0;
1508
1509 phys_put:
1510 if (soc->program_uphy)
1511 tegra_pcie_phys_put(pcie);
1512 return err;
1513 }
1514
tegra_pcie_put_resources(struct tegra_pcie * pcie)1515 static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1516 {
1517 const struct tegra_pcie_soc *soc = pcie->soc;
1518
1519 if (pcie->irq > 0)
1520 free_irq(pcie->irq, pcie);
1521
1522 if (soc->program_uphy)
1523 tegra_pcie_phys_put(pcie);
1524
1525 return 0;
1526 }
1527
tegra_pcie_pme_turnoff(struct tegra_pcie_port * port)1528 static void tegra_pcie_pme_turnoff(struct tegra_pcie_port *port)
1529 {
1530 struct tegra_pcie *pcie = port->pcie;
1531 const struct tegra_pcie_soc *soc = pcie->soc;
1532 int err;
1533 u32 val;
1534 u8 ack_bit;
1535
1536 val = afi_readl(pcie, AFI_PCIE_PME);
1537 val |= (0x1 << soc->ports[port->index].pme.turnoff_bit);
1538 afi_writel(pcie, val, AFI_PCIE_PME);
1539
1540 ack_bit = soc->ports[port->index].pme.ack_bit;
1541 err = readl_poll_timeout(pcie->afi + AFI_PCIE_PME, val,
1542 val & (0x1 << ack_bit), 1, PME_ACK_TIMEOUT);
1543 if (err)
1544 dev_err(pcie->dev, "PME Ack is not received on port: %d\n",
1545 port->index);
1546
1547 usleep_range(10000, 11000);
1548
1549 val = afi_readl(pcie, AFI_PCIE_PME);
1550 val &= ~(0x1 << soc->ports[port->index].pme.turnoff_bit);
1551 afi_writel(pcie, val, AFI_PCIE_PME);
1552 }
1553
tegra_msi_alloc(struct tegra_msi * chip)1554 static int tegra_msi_alloc(struct tegra_msi *chip)
1555 {
1556 int msi;
1557
1558 mutex_lock(&chip->lock);
1559
1560 msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
1561 if (msi < INT_PCI_MSI_NR)
1562 set_bit(msi, chip->used);
1563 else
1564 msi = -ENOSPC;
1565
1566 mutex_unlock(&chip->lock);
1567
1568 return msi;
1569 }
1570
tegra_msi_free(struct tegra_msi * chip,unsigned long irq)1571 static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
1572 {
1573 struct device *dev = chip->chip.dev;
1574
1575 mutex_lock(&chip->lock);
1576
1577 if (!test_bit(irq, chip->used))
1578 dev_err(dev, "trying to free unused MSI#%lu\n", irq);
1579 else
1580 clear_bit(irq, chip->used);
1581
1582 mutex_unlock(&chip->lock);
1583 }
1584
tegra_pcie_msi_irq(int irq,void * data)1585 static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
1586 {
1587 struct tegra_pcie *pcie = data;
1588 struct device *dev = pcie->dev;
1589 struct tegra_msi *msi = &pcie->msi;
1590 unsigned int i, processed = 0;
1591
1592 for (i = 0; i < 8; i++) {
1593 unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1594
1595 while (reg) {
1596 unsigned int offset = find_first_bit(®, 32);
1597 unsigned int index = i * 32 + offset;
1598 unsigned int irq;
1599
1600 /* clear the interrupt */
1601 afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4);
1602
1603 irq = irq_find_mapping(msi->domain, index);
1604 if (irq) {
1605 if (test_bit(index, msi->used))
1606 generic_handle_irq(irq);
1607 else
1608 dev_info(dev, "unhandled MSI\n");
1609 } else {
1610 /*
1611 * that's weird who triggered this?
1612 * just clear it
1613 */
1614 dev_info(dev, "unexpected MSI\n");
1615 }
1616
1617 /* see if there's any more pending in this vector */
1618 reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1619
1620 processed++;
1621 }
1622 }
1623
1624 return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
1625 }
1626
tegra_msi_setup_irq(struct msi_controller * chip,struct pci_dev * pdev,struct msi_desc * desc)1627 static int tegra_msi_setup_irq(struct msi_controller *chip,
1628 struct pci_dev *pdev, struct msi_desc *desc)
1629 {
1630 struct tegra_msi *msi = to_tegra_msi(chip);
1631 struct msi_msg msg;
1632 unsigned int irq;
1633 int hwirq;
1634
1635 hwirq = tegra_msi_alloc(msi);
1636 if (hwirq < 0)
1637 return hwirq;
1638
1639 irq = irq_create_mapping(msi->domain, hwirq);
1640 if (!irq) {
1641 tegra_msi_free(msi, hwirq);
1642 return -EINVAL;
1643 }
1644
1645 irq_set_msi_desc(irq, desc);
1646
1647 msg.address_lo = lower_32_bits(msi->phys);
1648 msg.address_hi = upper_32_bits(msi->phys);
1649 msg.data = hwirq;
1650
1651 pci_write_msi_msg(irq, &msg);
1652
1653 return 0;
1654 }
1655
tegra_msi_teardown_irq(struct msi_controller * chip,unsigned int irq)1656 static void tegra_msi_teardown_irq(struct msi_controller *chip,
1657 unsigned int irq)
1658 {
1659 struct tegra_msi *msi = to_tegra_msi(chip);
1660 struct irq_data *d = irq_get_irq_data(irq);
1661 irq_hw_number_t hwirq = irqd_to_hwirq(d);
1662
1663 irq_dispose_mapping(irq);
1664 tegra_msi_free(msi, hwirq);
1665 }
1666
1667 static struct irq_chip tegra_msi_irq_chip = {
1668 .name = "Tegra PCIe MSI",
1669 .irq_enable = pci_msi_unmask_irq,
1670 .irq_disable = pci_msi_mask_irq,
1671 .irq_mask = pci_msi_mask_irq,
1672 .irq_unmask = pci_msi_unmask_irq,
1673 };
1674
tegra_msi_map(struct irq_domain * domain,unsigned int irq,irq_hw_number_t hwirq)1675 static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
1676 irq_hw_number_t hwirq)
1677 {
1678 irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
1679 irq_set_chip_data(irq, domain->host_data);
1680
1681 tegra_cpuidle_pcie_irqs_in_use();
1682
1683 return 0;
1684 }
1685
1686 static const struct irq_domain_ops msi_domain_ops = {
1687 .map = tegra_msi_map,
1688 };
1689
tegra_pcie_msi_setup(struct tegra_pcie * pcie)1690 static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
1691 {
1692 struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
1693 struct platform_device *pdev = to_platform_device(pcie->dev);
1694 struct tegra_msi *msi = &pcie->msi;
1695 struct device *dev = pcie->dev;
1696 int err;
1697
1698 mutex_init(&msi->lock);
1699
1700 msi->chip.dev = dev;
1701 msi->chip.setup_irq = tegra_msi_setup_irq;
1702 msi->chip.teardown_irq = tegra_msi_teardown_irq;
1703
1704 msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
1705 &msi_domain_ops, &msi->chip);
1706 if (!msi->domain) {
1707 dev_err(dev, "failed to create IRQ domain\n");
1708 return -ENOMEM;
1709 }
1710
1711 err = platform_get_irq_byname(pdev, "msi");
1712 if (err < 0)
1713 goto free_irq_domain;
1714
1715 msi->irq = err;
1716
1717 err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD,
1718 tegra_msi_irq_chip.name, pcie);
1719 if (err < 0) {
1720 dev_err(dev, "failed to request IRQ: %d\n", err);
1721 goto free_irq_domain;
1722 }
1723
1724 /* Though the PCIe controller can address >32-bit address space, to
1725 * facilitate endpoints that support only 32-bit MSI target address,
1726 * the mask is set to 32-bit to make sure that MSI target address is
1727 * always a 32-bit address
1728 */
1729 err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
1730 if (err < 0) {
1731 dev_err(dev, "failed to set DMA coherent mask: %d\n", err);
1732 goto free_irq;
1733 }
1734
1735 msi->virt = dma_alloc_attrs(dev, PAGE_SIZE, &msi->phys, GFP_KERNEL,
1736 DMA_ATTR_NO_KERNEL_MAPPING);
1737 if (!msi->virt) {
1738 dev_err(dev, "failed to allocate DMA memory for MSI\n");
1739 err = -ENOMEM;
1740 goto free_irq;
1741 }
1742
1743 host->msi = &msi->chip;
1744
1745 return 0;
1746
1747 free_irq:
1748 free_irq(msi->irq, pcie);
1749 free_irq_domain:
1750 irq_domain_remove(msi->domain);
1751 return err;
1752 }
1753
tegra_pcie_enable_msi(struct tegra_pcie * pcie)1754 static void tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1755 {
1756 const struct tegra_pcie_soc *soc = pcie->soc;
1757 struct tegra_msi *msi = &pcie->msi;
1758 u32 reg;
1759
1760 afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
1761 afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
1762 /* this register is in 4K increments */
1763 afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1764
1765 /* enable all MSI vectors */
1766 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0);
1767 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1);
1768 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2);
1769 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3);
1770 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4);
1771 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5);
1772 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6);
1773 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7);
1774
1775 /* and unmask the MSI interrupt */
1776 reg = afi_readl(pcie, AFI_INTR_MASK);
1777 reg |= AFI_INTR_MASK_MSI_MASK;
1778 afi_writel(pcie, reg, AFI_INTR_MASK);
1779 }
1780
tegra_pcie_msi_teardown(struct tegra_pcie * pcie)1781 static void tegra_pcie_msi_teardown(struct tegra_pcie *pcie)
1782 {
1783 struct tegra_msi *msi = &pcie->msi;
1784 unsigned int i, irq;
1785
1786 dma_free_attrs(pcie->dev, PAGE_SIZE, msi->virt, msi->phys,
1787 DMA_ATTR_NO_KERNEL_MAPPING);
1788
1789 if (msi->irq > 0)
1790 free_irq(msi->irq, pcie);
1791
1792 for (i = 0; i < INT_PCI_MSI_NR; i++) {
1793 irq = irq_find_mapping(msi->domain, i);
1794 if (irq > 0)
1795 irq_dispose_mapping(irq);
1796 }
1797
1798 irq_domain_remove(msi->domain);
1799 }
1800
tegra_pcie_disable_msi(struct tegra_pcie * pcie)1801 static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1802 {
1803 u32 value;
1804
1805 /* mask the MSI interrupt */
1806 value = afi_readl(pcie, AFI_INTR_MASK);
1807 value &= ~AFI_INTR_MASK_MSI_MASK;
1808 afi_writel(pcie, value, AFI_INTR_MASK);
1809
1810 /* disable all MSI vectors */
1811 afi_writel(pcie, 0, AFI_MSI_EN_VEC0);
1812 afi_writel(pcie, 0, AFI_MSI_EN_VEC1);
1813 afi_writel(pcie, 0, AFI_MSI_EN_VEC2);
1814 afi_writel(pcie, 0, AFI_MSI_EN_VEC3);
1815 afi_writel(pcie, 0, AFI_MSI_EN_VEC4);
1816 afi_writel(pcie, 0, AFI_MSI_EN_VEC5);
1817 afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
1818 afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
1819
1820 return 0;
1821 }
1822
tegra_pcie_disable_interrupts(struct tegra_pcie * pcie)1823 static void tegra_pcie_disable_interrupts(struct tegra_pcie *pcie)
1824 {
1825 u32 value;
1826
1827 value = afi_readl(pcie, AFI_INTR_MASK);
1828 value &= ~AFI_INTR_MASK_INT_MASK;
1829 afi_writel(pcie, value, AFI_INTR_MASK);
1830 }
1831
tegra_pcie_get_xbar_config(struct tegra_pcie * pcie,u32 lanes,u32 * xbar)1832 static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1833 u32 *xbar)
1834 {
1835 struct device *dev = pcie->dev;
1836 struct device_node *np = dev->of_node;
1837
1838 if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
1839 switch (lanes) {
1840 case 0x010004:
1841 dev_info(dev, "4x1, 1x1 configuration\n");
1842 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401;
1843 return 0;
1844
1845 case 0x010102:
1846 dev_info(dev, "2x1, 1X1, 1x1 configuration\n");
1847 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1848 return 0;
1849
1850 case 0x010101:
1851 dev_info(dev, "1x1, 1x1, 1x1 configuration\n");
1852 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111;
1853 return 0;
1854
1855 default:
1856 dev_info(dev, "wrong configuration updated in DT, "
1857 "switching to default 2x1, 1x1, 1x1 "
1858 "configuration\n");
1859 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1860 return 0;
1861 }
1862 } else if (of_device_is_compatible(np, "nvidia,tegra124-pcie") ||
1863 of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
1864 switch (lanes) {
1865 case 0x0000104:
1866 dev_info(dev, "4x1, 1x1 configuration\n");
1867 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
1868 return 0;
1869
1870 case 0x0000102:
1871 dev_info(dev, "2x1, 1x1 configuration\n");
1872 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
1873 return 0;
1874 }
1875 } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1876 switch (lanes) {
1877 case 0x00000204:
1878 dev_info(dev, "4x1, 2x1 configuration\n");
1879 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1880 return 0;
1881
1882 case 0x00020202:
1883 dev_info(dev, "2x3 configuration\n");
1884 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1885 return 0;
1886
1887 case 0x00010104:
1888 dev_info(dev, "4x1, 1x2 configuration\n");
1889 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1890 return 0;
1891 }
1892 } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1893 switch (lanes) {
1894 case 0x00000004:
1895 dev_info(dev, "single-mode configuration\n");
1896 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1897 return 0;
1898
1899 case 0x00000202:
1900 dev_info(dev, "dual-mode configuration\n");
1901 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1902 return 0;
1903 }
1904 }
1905
1906 return -EINVAL;
1907 }
1908
1909 /*
1910 * Check whether a given set of supplies is available in a device tree node.
1911 * This is used to check whether the new or the legacy device tree bindings
1912 * should be used.
1913 */
of_regulator_bulk_available(struct device_node * np,struct regulator_bulk_data * supplies,unsigned int num_supplies)1914 static bool of_regulator_bulk_available(struct device_node *np,
1915 struct regulator_bulk_data *supplies,
1916 unsigned int num_supplies)
1917 {
1918 char property[32];
1919 unsigned int i;
1920
1921 for (i = 0; i < num_supplies; i++) {
1922 snprintf(property, 32, "%s-supply", supplies[i].supply);
1923
1924 if (of_find_property(np, property, NULL) == NULL)
1925 return false;
1926 }
1927
1928 return true;
1929 }
1930
1931 /*
1932 * Old versions of the device tree binding for this device used a set of power
1933 * supplies that didn't match the hardware inputs. This happened to work for a
1934 * number of cases but is not future proof. However to preserve backwards-
1935 * compatibility with old device trees, this function will try to use the old
1936 * set of supplies.
1937 */
tegra_pcie_get_legacy_regulators(struct tegra_pcie * pcie)1938 static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
1939 {
1940 struct device *dev = pcie->dev;
1941 struct device_node *np = dev->of_node;
1942
1943 if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
1944 pcie->num_supplies = 3;
1945 else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
1946 pcie->num_supplies = 2;
1947
1948 if (pcie->num_supplies == 0) {
1949 dev_err(dev, "device %pOF not supported in legacy mode\n", np);
1950 return -ENODEV;
1951 }
1952
1953 pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1954 sizeof(*pcie->supplies),
1955 GFP_KERNEL);
1956 if (!pcie->supplies)
1957 return -ENOMEM;
1958
1959 pcie->supplies[0].supply = "pex-clk";
1960 pcie->supplies[1].supply = "vdd";
1961
1962 if (pcie->num_supplies > 2)
1963 pcie->supplies[2].supply = "avdd";
1964
1965 return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies);
1966 }
1967
1968 /*
1969 * Obtains the list of regulators required for a particular generation of the
1970 * IP block.
1971 *
1972 * This would've been nice to do simply by providing static tables for use
1973 * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
1974 * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
1975 * and either seems to be optional depending on which ports are being used.
1976 */
tegra_pcie_get_regulators(struct tegra_pcie * pcie,u32 lane_mask)1977 static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
1978 {
1979 struct device *dev = pcie->dev;
1980 struct device_node *np = dev->of_node;
1981 unsigned int i = 0;
1982
1983 if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
1984 pcie->num_supplies = 4;
1985
1986 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1987 sizeof(*pcie->supplies),
1988 GFP_KERNEL);
1989 if (!pcie->supplies)
1990 return -ENOMEM;
1991
1992 pcie->supplies[i++].supply = "dvdd-pex";
1993 pcie->supplies[i++].supply = "hvdd-pex-pll";
1994 pcie->supplies[i++].supply = "hvdd-pex";
1995 pcie->supplies[i++].supply = "vddio-pexctl-aud";
1996 } else if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
1997 pcie->num_supplies = 3;
1998
1999 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
2000 sizeof(*pcie->supplies),
2001 GFP_KERNEL);
2002 if (!pcie->supplies)
2003 return -ENOMEM;
2004
2005 pcie->supplies[i++].supply = "hvddio-pex";
2006 pcie->supplies[i++].supply = "dvddio-pex";
2007 pcie->supplies[i++].supply = "vddio-pex-ctl";
2008 } else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
2009 pcie->num_supplies = 4;
2010
2011 pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2012 sizeof(*pcie->supplies),
2013 GFP_KERNEL);
2014 if (!pcie->supplies)
2015 return -ENOMEM;
2016
2017 pcie->supplies[i++].supply = "avddio-pex";
2018 pcie->supplies[i++].supply = "dvddio-pex";
2019 pcie->supplies[i++].supply = "hvdd-pex";
2020 pcie->supplies[i++].supply = "vddio-pex-ctl";
2021 } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
2022 bool need_pexa = false, need_pexb = false;
2023
2024 /* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
2025 if (lane_mask & 0x0f)
2026 need_pexa = true;
2027
2028 /* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
2029 if (lane_mask & 0x30)
2030 need_pexb = true;
2031
2032 pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
2033 (need_pexb ? 2 : 0);
2034
2035 pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2036 sizeof(*pcie->supplies),
2037 GFP_KERNEL);
2038 if (!pcie->supplies)
2039 return -ENOMEM;
2040
2041 pcie->supplies[i++].supply = "avdd-pex-pll";
2042 pcie->supplies[i++].supply = "hvdd-pex";
2043 pcie->supplies[i++].supply = "vddio-pex-ctl";
2044 pcie->supplies[i++].supply = "avdd-plle";
2045
2046 if (need_pexa) {
2047 pcie->supplies[i++].supply = "avdd-pexa";
2048 pcie->supplies[i++].supply = "vdd-pexa";
2049 }
2050
2051 if (need_pexb) {
2052 pcie->supplies[i++].supply = "avdd-pexb";
2053 pcie->supplies[i++].supply = "vdd-pexb";
2054 }
2055 } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
2056 pcie->num_supplies = 5;
2057
2058 pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
2059 sizeof(*pcie->supplies),
2060 GFP_KERNEL);
2061 if (!pcie->supplies)
2062 return -ENOMEM;
2063
2064 pcie->supplies[0].supply = "avdd-pex";
2065 pcie->supplies[1].supply = "vdd-pex";
2066 pcie->supplies[2].supply = "avdd-pex-pll";
2067 pcie->supplies[3].supply = "avdd-plle";
2068 pcie->supplies[4].supply = "vddio-pex-clk";
2069 }
2070
2071 if (of_regulator_bulk_available(dev->of_node, pcie->supplies,
2072 pcie->num_supplies))
2073 return devm_regulator_bulk_get(dev, pcie->num_supplies,
2074 pcie->supplies);
2075
2076 /*
2077 * If not all regulators are available for this new scheme, assume
2078 * that the device tree complies with an older version of the device
2079 * tree binding.
2080 */
2081 dev_info(dev, "using legacy DT binding for power supplies\n");
2082
2083 devm_kfree(dev, pcie->supplies);
2084 pcie->num_supplies = 0;
2085
2086 return tegra_pcie_get_legacy_regulators(pcie);
2087 }
2088
tegra_pcie_parse_dt(struct tegra_pcie * pcie)2089 static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
2090 {
2091 struct device *dev = pcie->dev;
2092 struct device_node *np = dev->of_node, *port;
2093 const struct tegra_pcie_soc *soc = pcie->soc;
2094 u32 lanes = 0, mask = 0;
2095 unsigned int lane = 0;
2096 int err;
2097
2098 /* parse root ports */
2099 for_each_child_of_node(np, port) {
2100 struct tegra_pcie_port *rp;
2101 unsigned int index;
2102 u32 value;
2103 char *label;
2104
2105 err = of_pci_get_devfn(port);
2106 if (err < 0) {
2107 dev_err(dev, "failed to parse address: %d\n", err);
2108 goto err_node_put;
2109 }
2110
2111 index = PCI_SLOT(err);
2112
2113 if (index < 1 || index > soc->num_ports) {
2114 dev_err(dev, "invalid port number: %d\n", index);
2115 err = -EINVAL;
2116 goto err_node_put;
2117 }
2118
2119 index--;
2120
2121 err = of_property_read_u32(port, "nvidia,num-lanes", &value);
2122 if (err < 0) {
2123 dev_err(dev, "failed to parse # of lanes: %d\n",
2124 err);
2125 goto err_node_put;
2126 }
2127
2128 if (value > 16) {
2129 dev_err(dev, "invalid # of lanes: %u\n", value);
2130 err = -EINVAL;
2131 goto err_node_put;
2132 }
2133
2134 lanes |= value << (index << 3);
2135
2136 if (!of_device_is_available(port)) {
2137 lane += value;
2138 continue;
2139 }
2140
2141 mask |= ((1 << value) - 1) << lane;
2142 lane += value;
2143
2144 rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
2145 if (!rp) {
2146 err = -ENOMEM;
2147 goto err_node_put;
2148 }
2149
2150 err = of_address_to_resource(port, 0, &rp->regs);
2151 if (err < 0) {
2152 dev_err(dev, "failed to parse address: %d\n", err);
2153 goto err_node_put;
2154 }
2155
2156 INIT_LIST_HEAD(&rp->list);
2157 rp->index = index;
2158 rp->lanes = value;
2159 rp->pcie = pcie;
2160 rp->np = port;
2161
2162 rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
2163 if (IS_ERR(rp->base)) {
2164 err = PTR_ERR(rp->base);
2165 goto err_node_put;
2166 }
2167
2168 label = devm_kasprintf(dev, GFP_KERNEL, "pex-reset-%u", index);
2169 if (!label) {
2170 err = -ENOMEM;
2171 goto err_node_put;
2172 }
2173
2174 /*
2175 * Returns -ENOENT if reset-gpios property is not populated
2176 * and in this case fall back to using AFI per port register
2177 * to toggle PERST# SFIO line.
2178 */
2179 rp->reset_gpio = devm_gpiod_get_from_of_node(dev, port,
2180 "reset-gpios", 0,
2181 GPIOD_OUT_LOW,
2182 label);
2183 if (IS_ERR(rp->reset_gpio)) {
2184 if (PTR_ERR(rp->reset_gpio) == -ENOENT) {
2185 rp->reset_gpio = NULL;
2186 } else {
2187 dev_err(dev, "failed to get reset GPIO: %ld\n",
2188 PTR_ERR(rp->reset_gpio));
2189 err = PTR_ERR(rp->reset_gpio);
2190 goto err_node_put;
2191 }
2192 }
2193
2194 list_add_tail(&rp->list, &pcie->ports);
2195 }
2196
2197 err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
2198 if (err < 0) {
2199 dev_err(dev, "invalid lane configuration\n");
2200 return err;
2201 }
2202
2203 err = tegra_pcie_get_regulators(pcie, mask);
2204 if (err < 0)
2205 return err;
2206
2207 return 0;
2208
2209 err_node_put:
2210 of_node_put(port);
2211 return err;
2212 }
2213
2214 /*
2215 * FIXME: If there are no PCIe cards attached, then calling this function
2216 * can result in the increase of the bootup time as there are big timeout
2217 * loops.
2218 */
2219 #define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */
tegra_pcie_port_check_link(struct tegra_pcie_port * port)2220 static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
2221 {
2222 struct device *dev = port->pcie->dev;
2223 unsigned int retries = 3;
2224 unsigned long value;
2225
2226 /* override presence detection */
2227 value = readl(port->base + RP_PRIV_MISC);
2228 value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
2229 value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
2230 writel(value, port->base + RP_PRIV_MISC);
2231
2232 do {
2233 unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2234
2235 do {
2236 value = readl(port->base + RP_VEND_XP);
2237
2238 if (value & RP_VEND_XP_DL_UP)
2239 break;
2240
2241 usleep_range(1000, 2000);
2242 } while (--timeout);
2243
2244 if (!timeout) {
2245 dev_dbg(dev, "link %u down, retrying\n", port->index);
2246 goto retry;
2247 }
2248
2249 timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2250
2251 do {
2252 value = readl(port->base + RP_LINK_CONTROL_STATUS);
2253
2254 if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2255 return true;
2256
2257 usleep_range(1000, 2000);
2258 } while (--timeout);
2259
2260 retry:
2261 tegra_pcie_port_reset(port);
2262 } while (--retries);
2263
2264 return false;
2265 }
2266
tegra_pcie_change_link_speed(struct tegra_pcie * pcie)2267 static void tegra_pcie_change_link_speed(struct tegra_pcie *pcie)
2268 {
2269 struct device *dev = pcie->dev;
2270 struct tegra_pcie_port *port;
2271 ktime_t deadline;
2272 u32 value;
2273
2274 list_for_each_entry(port, &pcie->ports, list) {
2275 /*
2276 * "Supported Link Speeds Vector" in "Link Capabilities 2"
2277 * is not supported by Tegra. tegra_pcie_change_link_speed()
2278 * is called only for Tegra chips which support Gen2.
2279 * So there no harm if supported link speed is not verified.
2280 */
2281 value = readl(port->base + RP_LINK_CONTROL_STATUS_2);
2282 value &= ~PCI_EXP_LNKSTA_CLS;
2283 value |= PCI_EXP_LNKSTA_CLS_5_0GB;
2284 writel(value, port->base + RP_LINK_CONTROL_STATUS_2);
2285
2286 /*
2287 * Poll until link comes back from recovery to avoid race
2288 * condition.
2289 */
2290 deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
2291
2292 while (ktime_before(ktime_get(), deadline)) {
2293 value = readl(port->base + RP_LINK_CONTROL_STATUS);
2294 if ((value & PCI_EXP_LNKSTA_LT) == 0)
2295 break;
2296
2297 usleep_range(2000, 3000);
2298 }
2299
2300 if (value & PCI_EXP_LNKSTA_LT)
2301 dev_warn(dev, "PCIe port %u link is in recovery\n",
2302 port->index);
2303
2304 /* Retrain the link */
2305 value = readl(port->base + RP_LINK_CONTROL_STATUS);
2306 value |= PCI_EXP_LNKCTL_RL;
2307 writel(value, port->base + RP_LINK_CONTROL_STATUS);
2308
2309 deadline = ktime_add_us(ktime_get(), LINK_RETRAIN_TIMEOUT);
2310
2311 while (ktime_before(ktime_get(), deadline)) {
2312 value = readl(port->base + RP_LINK_CONTROL_STATUS);
2313 if ((value & PCI_EXP_LNKSTA_LT) == 0)
2314 break;
2315
2316 usleep_range(2000, 3000);
2317 }
2318
2319 if (value & PCI_EXP_LNKSTA_LT)
2320 dev_err(dev, "failed to retrain link of port %u\n",
2321 port->index);
2322 }
2323 }
2324
tegra_pcie_enable_ports(struct tegra_pcie * pcie)2325 static void tegra_pcie_enable_ports(struct tegra_pcie *pcie)
2326 {
2327 struct device *dev = pcie->dev;
2328 struct tegra_pcie_port *port, *tmp;
2329
2330 list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2331 dev_info(dev, "probing port %u, using %u lanes\n",
2332 port->index, port->lanes);
2333
2334 tegra_pcie_port_enable(port);
2335 }
2336
2337 /* Start LTSSM from Tegra side */
2338 reset_control_deassert(pcie->pcie_xrst);
2339
2340 list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2341 if (tegra_pcie_port_check_link(port))
2342 continue;
2343
2344 dev_info(dev, "link %u down, ignoring\n", port->index);
2345
2346 tegra_pcie_port_disable(port);
2347 tegra_pcie_port_free(port);
2348 }
2349
2350 if (pcie->soc->has_gen2)
2351 tegra_pcie_change_link_speed(pcie);
2352 }
2353
tegra_pcie_disable_ports(struct tegra_pcie * pcie)2354 static void tegra_pcie_disable_ports(struct tegra_pcie *pcie)
2355 {
2356 struct tegra_pcie_port *port, *tmp;
2357
2358 reset_control_assert(pcie->pcie_xrst);
2359
2360 list_for_each_entry_safe(port, tmp, &pcie->ports, list)
2361 tegra_pcie_port_disable(port);
2362 }
2363
2364 static const struct tegra_pcie_port_soc tegra20_pcie_ports[] = {
2365 { .pme.turnoff_bit = 0, .pme.ack_bit = 5 },
2366 { .pme.turnoff_bit = 8, .pme.ack_bit = 10 },
2367 };
2368
2369 static const struct tegra_pcie_soc tegra20_pcie = {
2370 .num_ports = 2,
2371 .ports = tegra20_pcie_ports,
2372 .msi_base_shift = 0,
2373 .pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
2374 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
2375 .pads_refclk_cfg0 = 0xfa5cfa5c,
2376 .has_pex_clkreq_en = false,
2377 .has_pex_bias_ctrl = false,
2378 .has_intr_prsnt_sense = false,
2379 .has_cml_clk = false,
2380 .has_gen2 = false,
2381 .force_pca_enable = false,
2382 .program_uphy = true,
2383 .update_clamp_threshold = false,
2384 .program_deskew_time = false,
2385 .update_fc_timer = false,
2386 .has_cache_bars = true,
2387 .ectl.enable = false,
2388 };
2389
2390 static const struct tegra_pcie_port_soc tegra30_pcie_ports[] = {
2391 { .pme.turnoff_bit = 0, .pme.ack_bit = 5 },
2392 { .pme.turnoff_bit = 8, .pme.ack_bit = 10 },
2393 { .pme.turnoff_bit = 16, .pme.ack_bit = 18 },
2394 };
2395
2396 static const struct tegra_pcie_soc tegra30_pcie = {
2397 .num_ports = 3,
2398 .ports = tegra30_pcie_ports,
2399 .msi_base_shift = 8,
2400 .afi_pex2_ctrl = 0x128,
2401 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2402 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2403 .pads_refclk_cfg0 = 0xfa5cfa5c,
2404 .pads_refclk_cfg1 = 0xfa5cfa5c,
2405 .has_pex_clkreq_en = true,
2406 .has_pex_bias_ctrl = true,
2407 .has_intr_prsnt_sense = true,
2408 .has_cml_clk = true,
2409 .has_gen2 = false,
2410 .force_pca_enable = false,
2411 .program_uphy = true,
2412 .update_clamp_threshold = false,
2413 .program_deskew_time = false,
2414 .update_fc_timer = false,
2415 .has_cache_bars = false,
2416 .ectl.enable = false,
2417 };
2418
2419 static const struct tegra_pcie_soc tegra124_pcie = {
2420 .num_ports = 2,
2421 .ports = tegra20_pcie_ports,
2422 .msi_base_shift = 8,
2423 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2424 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2425 .pads_refclk_cfg0 = 0x44ac44ac,
2426 .has_pex_clkreq_en = true,
2427 .has_pex_bias_ctrl = true,
2428 .has_intr_prsnt_sense = true,
2429 .has_cml_clk = true,
2430 .has_gen2 = true,
2431 .force_pca_enable = false,
2432 .program_uphy = true,
2433 .update_clamp_threshold = true,
2434 .program_deskew_time = false,
2435 .update_fc_timer = false,
2436 .has_cache_bars = false,
2437 .ectl.enable = false,
2438 };
2439
2440 static const struct tegra_pcie_soc tegra210_pcie = {
2441 .num_ports = 2,
2442 .ports = tegra20_pcie_ports,
2443 .msi_base_shift = 8,
2444 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2445 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2446 .pads_refclk_cfg0 = 0x90b890b8,
2447 /* FC threshold is bit[25:18] */
2448 .update_fc_threshold = 0x01800000,
2449 .has_pex_clkreq_en = true,
2450 .has_pex_bias_ctrl = true,
2451 .has_intr_prsnt_sense = true,
2452 .has_cml_clk = true,
2453 .has_gen2 = true,
2454 .force_pca_enable = true,
2455 .program_uphy = true,
2456 .update_clamp_threshold = true,
2457 .program_deskew_time = true,
2458 .update_fc_timer = true,
2459 .has_cache_bars = false,
2460 .ectl = {
2461 .regs = {
2462 .rp_ectl_2_r1 = 0x0000000f,
2463 .rp_ectl_4_r1 = 0x00000067,
2464 .rp_ectl_5_r1 = 0x55010000,
2465 .rp_ectl_6_r1 = 0x00000001,
2466 .rp_ectl_2_r2 = 0x0000008f,
2467 .rp_ectl_4_r2 = 0x000000c7,
2468 .rp_ectl_5_r2 = 0x55010000,
2469 .rp_ectl_6_r2 = 0x00000001,
2470 },
2471 .enable = true,
2472 },
2473 };
2474
2475 static const struct tegra_pcie_port_soc tegra186_pcie_ports[] = {
2476 { .pme.turnoff_bit = 0, .pme.ack_bit = 5 },
2477 { .pme.turnoff_bit = 8, .pme.ack_bit = 10 },
2478 { .pme.turnoff_bit = 12, .pme.ack_bit = 14 },
2479 };
2480
2481 static const struct tegra_pcie_soc tegra186_pcie = {
2482 .num_ports = 3,
2483 .ports = tegra186_pcie_ports,
2484 .msi_base_shift = 8,
2485 .afi_pex2_ctrl = 0x19c,
2486 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2487 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2488 .pads_refclk_cfg0 = 0x80b880b8,
2489 .pads_refclk_cfg1 = 0x000480b8,
2490 .has_pex_clkreq_en = true,
2491 .has_pex_bias_ctrl = true,
2492 .has_intr_prsnt_sense = true,
2493 .has_cml_clk = false,
2494 .has_gen2 = true,
2495 .force_pca_enable = false,
2496 .program_uphy = false,
2497 .update_clamp_threshold = false,
2498 .program_deskew_time = false,
2499 .update_fc_timer = false,
2500 .has_cache_bars = false,
2501 .ectl.enable = false,
2502 };
2503
2504 static const struct of_device_id tegra_pcie_of_match[] = {
2505 { .compatible = "nvidia,tegra186-pcie", .data = &tegra186_pcie },
2506 { .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie },
2507 { .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
2508 { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
2509 { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
2510 { },
2511 };
2512 MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
2513
tegra_pcie_ports_seq_start(struct seq_file * s,loff_t * pos)2514 static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
2515 {
2516 struct tegra_pcie *pcie = s->private;
2517
2518 if (list_empty(&pcie->ports))
2519 return NULL;
2520
2521 seq_printf(s, "Index Status\n");
2522
2523 return seq_list_start(&pcie->ports, *pos);
2524 }
2525
tegra_pcie_ports_seq_next(struct seq_file * s,void * v,loff_t * pos)2526 static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
2527 {
2528 struct tegra_pcie *pcie = s->private;
2529
2530 return seq_list_next(v, &pcie->ports, pos);
2531 }
2532
tegra_pcie_ports_seq_stop(struct seq_file * s,void * v)2533 static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
2534 {
2535 }
2536
tegra_pcie_ports_seq_show(struct seq_file * s,void * v)2537 static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
2538 {
2539 bool up = false, active = false;
2540 struct tegra_pcie_port *port;
2541 unsigned int value;
2542
2543 port = list_entry(v, struct tegra_pcie_port, list);
2544
2545 value = readl(port->base + RP_VEND_XP);
2546
2547 if (value & RP_VEND_XP_DL_UP)
2548 up = true;
2549
2550 value = readl(port->base + RP_LINK_CONTROL_STATUS);
2551
2552 if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2553 active = true;
2554
2555 seq_printf(s, "%2u ", port->index);
2556
2557 if (up)
2558 seq_printf(s, "up");
2559
2560 if (active) {
2561 if (up)
2562 seq_printf(s, ", ");
2563
2564 seq_printf(s, "active");
2565 }
2566
2567 seq_printf(s, "\n");
2568 return 0;
2569 }
2570
2571 static const struct seq_operations tegra_pcie_ports_sops = {
2572 .start = tegra_pcie_ports_seq_start,
2573 .next = tegra_pcie_ports_seq_next,
2574 .stop = tegra_pcie_ports_seq_stop,
2575 .show = tegra_pcie_ports_seq_show,
2576 };
2577
2578 DEFINE_SEQ_ATTRIBUTE(tegra_pcie_ports);
2579
tegra_pcie_debugfs_exit(struct tegra_pcie * pcie)2580 static void tegra_pcie_debugfs_exit(struct tegra_pcie *pcie)
2581 {
2582 debugfs_remove_recursive(pcie->debugfs);
2583 pcie->debugfs = NULL;
2584 }
2585
tegra_pcie_debugfs_init(struct tegra_pcie * pcie)2586 static void tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
2587 {
2588 pcie->debugfs = debugfs_create_dir("pcie", NULL);
2589
2590 debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs, pcie,
2591 &tegra_pcie_ports_fops);
2592 }
2593
tegra_pcie_probe(struct platform_device * pdev)2594 static int tegra_pcie_probe(struct platform_device *pdev)
2595 {
2596 struct device *dev = &pdev->dev;
2597 struct pci_host_bridge *host;
2598 struct tegra_pcie *pcie;
2599 int err;
2600
2601 host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
2602 if (!host)
2603 return -ENOMEM;
2604
2605 pcie = pci_host_bridge_priv(host);
2606 host->sysdata = pcie;
2607 platform_set_drvdata(pdev, pcie);
2608
2609 pcie->soc = of_device_get_match_data(dev);
2610 INIT_LIST_HEAD(&pcie->ports);
2611 pcie->dev = dev;
2612
2613 err = tegra_pcie_parse_dt(pcie);
2614 if (err < 0)
2615 return err;
2616
2617 err = tegra_pcie_get_resources(pcie);
2618 if (err < 0) {
2619 dev_err(dev, "failed to request resources: %d\n", err);
2620 return err;
2621 }
2622
2623 err = tegra_pcie_msi_setup(pcie);
2624 if (err < 0) {
2625 dev_err(dev, "failed to enable MSI support: %d\n", err);
2626 goto put_resources;
2627 }
2628
2629 pm_runtime_enable(pcie->dev);
2630 err = pm_runtime_get_sync(pcie->dev);
2631 if (err < 0) {
2632 dev_err(dev, "fail to enable pcie controller: %d\n", err);
2633 goto pm_runtime_put;
2634 }
2635
2636 host->ops = &tegra_pcie_ops;
2637 host->map_irq = tegra_pcie_map_irq;
2638
2639 err = pci_host_probe(host);
2640 if (err < 0) {
2641 dev_err(dev, "failed to register host: %d\n", err);
2642 goto pm_runtime_put;
2643 }
2644
2645 if (IS_ENABLED(CONFIG_DEBUG_FS))
2646 tegra_pcie_debugfs_init(pcie);
2647
2648 return 0;
2649
2650 pm_runtime_put:
2651 pm_runtime_put_sync(pcie->dev);
2652 pm_runtime_disable(pcie->dev);
2653 tegra_pcie_msi_teardown(pcie);
2654 put_resources:
2655 tegra_pcie_put_resources(pcie);
2656 return err;
2657 }
2658
tegra_pcie_remove(struct platform_device * pdev)2659 static int tegra_pcie_remove(struct platform_device *pdev)
2660 {
2661 struct tegra_pcie *pcie = platform_get_drvdata(pdev);
2662 struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
2663 struct tegra_pcie_port *port, *tmp;
2664
2665 if (IS_ENABLED(CONFIG_DEBUG_FS))
2666 tegra_pcie_debugfs_exit(pcie);
2667
2668 pci_stop_root_bus(host->bus);
2669 pci_remove_root_bus(host->bus);
2670 pm_runtime_put_sync(pcie->dev);
2671 pm_runtime_disable(pcie->dev);
2672
2673 if (IS_ENABLED(CONFIG_PCI_MSI))
2674 tegra_pcie_msi_teardown(pcie);
2675
2676 tegra_pcie_put_resources(pcie);
2677
2678 list_for_each_entry_safe(port, tmp, &pcie->ports, list)
2679 tegra_pcie_port_free(port);
2680
2681 return 0;
2682 }
2683
tegra_pcie_pm_suspend(struct device * dev)2684 static int __maybe_unused tegra_pcie_pm_suspend(struct device *dev)
2685 {
2686 struct tegra_pcie *pcie = dev_get_drvdata(dev);
2687 struct tegra_pcie_port *port;
2688 int err;
2689
2690 list_for_each_entry(port, &pcie->ports, list)
2691 tegra_pcie_pme_turnoff(port);
2692
2693 tegra_pcie_disable_ports(pcie);
2694
2695 /*
2696 * AFI_INTR is unmasked in tegra_pcie_enable_controller(), mask it to
2697 * avoid unwanted interrupts raised by AFI after pex_rst is asserted.
2698 */
2699 tegra_pcie_disable_interrupts(pcie);
2700
2701 if (pcie->soc->program_uphy) {
2702 err = tegra_pcie_phy_power_off(pcie);
2703 if (err < 0)
2704 dev_err(dev, "failed to power off PHY(s): %d\n", err);
2705 }
2706
2707 reset_control_assert(pcie->pex_rst);
2708 clk_disable_unprepare(pcie->pex_clk);
2709
2710 if (IS_ENABLED(CONFIG_PCI_MSI))
2711 tegra_pcie_disable_msi(pcie);
2712
2713 pinctrl_pm_select_idle_state(dev);
2714 tegra_pcie_power_off(pcie);
2715
2716 return 0;
2717 }
2718
tegra_pcie_pm_resume(struct device * dev)2719 static int __maybe_unused tegra_pcie_pm_resume(struct device *dev)
2720 {
2721 struct tegra_pcie *pcie = dev_get_drvdata(dev);
2722 int err;
2723
2724 err = tegra_pcie_power_on(pcie);
2725 if (err) {
2726 dev_err(dev, "tegra pcie power on fail: %d\n", err);
2727 return err;
2728 }
2729
2730 err = pinctrl_pm_select_default_state(dev);
2731 if (err < 0) {
2732 dev_err(dev, "failed to disable PCIe IO DPD: %d\n", err);
2733 goto poweroff;
2734 }
2735
2736 tegra_pcie_enable_controller(pcie);
2737 tegra_pcie_setup_translations(pcie);
2738
2739 if (IS_ENABLED(CONFIG_PCI_MSI))
2740 tegra_pcie_enable_msi(pcie);
2741
2742 err = clk_prepare_enable(pcie->pex_clk);
2743 if (err) {
2744 dev_err(dev, "failed to enable PEX clock: %d\n", err);
2745 goto pex_dpd_enable;
2746 }
2747
2748 reset_control_deassert(pcie->pex_rst);
2749
2750 if (pcie->soc->program_uphy) {
2751 err = tegra_pcie_phy_power_on(pcie);
2752 if (err < 0) {
2753 dev_err(dev, "failed to power on PHY(s): %d\n", err);
2754 goto disable_pex_clk;
2755 }
2756 }
2757
2758 tegra_pcie_apply_pad_settings(pcie);
2759 tegra_pcie_enable_ports(pcie);
2760
2761 return 0;
2762
2763 disable_pex_clk:
2764 reset_control_assert(pcie->pex_rst);
2765 clk_disable_unprepare(pcie->pex_clk);
2766 pex_dpd_enable:
2767 pinctrl_pm_select_idle_state(dev);
2768 poweroff:
2769 tegra_pcie_power_off(pcie);
2770
2771 return err;
2772 }
2773
2774 static const struct dev_pm_ops tegra_pcie_pm_ops = {
2775 SET_RUNTIME_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume, NULL)
2776 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_pcie_pm_suspend,
2777 tegra_pcie_pm_resume)
2778 };
2779
2780 static struct platform_driver tegra_pcie_driver = {
2781 .driver = {
2782 .name = "tegra-pcie",
2783 .of_match_table = tegra_pcie_of_match,
2784 .suppress_bind_attrs = true,
2785 .pm = &tegra_pcie_pm_ops,
2786 },
2787 .probe = tegra_pcie_probe,
2788 .remove = tegra_pcie_remove,
2789 };
2790 module_platform_driver(tegra_pcie_driver);
2791 MODULE_LICENSE("GPL");
2792