• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * PCIe host controller driver for Tegra SoCs
4  *
5  * Copyright (c) 2010, CompuLab, Ltd.
6  * Author: Mike Rapoport <mike@compulab.co.il>
7  *
8  * Based on NVIDIA PCIe driver
9  * Copyright (c) 2008-2009, NVIDIA Corporation.
10  *
11  * Bits taken from arch/arm/mach-dove/pcie.c
12  *
13  * Author: Thierry Reding <treding@nvidia.com>
14  */
15 
16 #include <linux/clk.h>
17 #include <linux/debugfs.h>
18 #include <linux/delay.h>
19 #include <linux/export.h>
20 #include <linux/interrupt.h>
21 #include <linux/iopoll.h>
22 #include <linux/irq.h>
23 #include <linux/irqdomain.h>
24 #include <linux/kernel.h>
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/msi.h>
28 #include <linux/of_address.h>
29 #include <linux/of_pci.h>
30 #include <linux/of_platform.h>
31 #include <linux/pci.h>
32 #include <linux/phy/phy.h>
33 #include <linux/platform_device.h>
34 #include <linux/reset.h>
35 #include <linux/sizes.h>
36 #include <linux/slab.h>
37 #include <linux/vmalloc.h>
38 #include <linux/regulator/consumer.h>
39 
40 #include <soc/tegra/cpuidle.h>
41 #include <soc/tegra/pmc.h>
42 
43 #include "../pci.h"
44 
45 #define INT_PCI_MSI_NR (8 * 32)
46 
47 /* register definitions */
48 
49 #define AFI_AXI_BAR0_SZ	0x00
50 #define AFI_AXI_BAR1_SZ	0x04
51 #define AFI_AXI_BAR2_SZ	0x08
52 #define AFI_AXI_BAR3_SZ	0x0c
53 #define AFI_AXI_BAR4_SZ	0x10
54 #define AFI_AXI_BAR5_SZ	0x14
55 
56 #define AFI_AXI_BAR0_START	0x18
57 #define AFI_AXI_BAR1_START	0x1c
58 #define AFI_AXI_BAR2_START	0x20
59 #define AFI_AXI_BAR3_START	0x24
60 #define AFI_AXI_BAR4_START	0x28
61 #define AFI_AXI_BAR5_START	0x2c
62 
63 #define AFI_FPCI_BAR0	0x30
64 #define AFI_FPCI_BAR1	0x34
65 #define AFI_FPCI_BAR2	0x38
66 #define AFI_FPCI_BAR3	0x3c
67 #define AFI_FPCI_BAR4	0x40
68 #define AFI_FPCI_BAR5	0x44
69 
70 #define AFI_CACHE_BAR0_SZ	0x48
71 #define AFI_CACHE_BAR0_ST	0x4c
72 #define AFI_CACHE_BAR1_SZ	0x50
73 #define AFI_CACHE_BAR1_ST	0x54
74 
75 #define AFI_MSI_BAR_SZ		0x60
76 #define AFI_MSI_FPCI_BAR_ST	0x64
77 #define AFI_MSI_AXI_BAR_ST	0x68
78 
79 #define AFI_MSI_VEC0		0x6c
80 #define AFI_MSI_VEC1		0x70
81 #define AFI_MSI_VEC2		0x74
82 #define AFI_MSI_VEC3		0x78
83 #define AFI_MSI_VEC4		0x7c
84 #define AFI_MSI_VEC5		0x80
85 #define AFI_MSI_VEC6		0x84
86 #define AFI_MSI_VEC7		0x88
87 
88 #define AFI_MSI_EN_VEC0		0x8c
89 #define AFI_MSI_EN_VEC1		0x90
90 #define AFI_MSI_EN_VEC2		0x94
91 #define AFI_MSI_EN_VEC3		0x98
92 #define AFI_MSI_EN_VEC4		0x9c
93 #define AFI_MSI_EN_VEC5		0xa0
94 #define AFI_MSI_EN_VEC6		0xa4
95 #define AFI_MSI_EN_VEC7		0xa8
96 
97 #define AFI_CONFIGURATION		0xac
98 #define  AFI_CONFIGURATION_EN_FPCI	(1 << 0)
99 
100 #define AFI_FPCI_ERROR_MASKS	0xb0
101 
102 #define AFI_INTR_MASK		0xb4
103 #define  AFI_INTR_MASK_INT_MASK	(1 << 0)
104 #define  AFI_INTR_MASK_MSI_MASK	(1 << 8)
105 
106 #define AFI_INTR_CODE			0xb8
107 #define  AFI_INTR_CODE_MASK		0xf
108 #define  AFI_INTR_INI_SLAVE_ERROR	1
109 #define  AFI_INTR_INI_DECODE_ERROR	2
110 #define  AFI_INTR_TARGET_ABORT		3
111 #define  AFI_INTR_MASTER_ABORT		4
112 #define  AFI_INTR_INVALID_WRITE		5
113 #define  AFI_INTR_LEGACY		6
114 #define  AFI_INTR_FPCI_DECODE_ERROR	7
115 #define  AFI_INTR_AXI_DECODE_ERROR	8
116 #define  AFI_INTR_FPCI_TIMEOUT		9
117 #define  AFI_INTR_PE_PRSNT_SENSE	10
118 #define  AFI_INTR_PE_CLKREQ_SENSE	11
119 #define  AFI_INTR_CLKCLAMP_SENSE	12
120 #define  AFI_INTR_RDY4PD_SENSE		13
121 #define  AFI_INTR_P2P_ERROR		14
122 
123 #define AFI_INTR_SIGNATURE	0xbc
124 #define AFI_UPPER_FPCI_ADDRESS	0xc0
125 #define AFI_SM_INTR_ENABLE	0xc4
126 #define  AFI_SM_INTR_INTA_ASSERT	(1 << 0)
127 #define  AFI_SM_INTR_INTB_ASSERT	(1 << 1)
128 #define  AFI_SM_INTR_INTC_ASSERT	(1 << 2)
129 #define  AFI_SM_INTR_INTD_ASSERT	(1 << 3)
130 #define  AFI_SM_INTR_INTA_DEASSERT	(1 << 4)
131 #define  AFI_SM_INTR_INTB_DEASSERT	(1 << 5)
132 #define  AFI_SM_INTR_INTC_DEASSERT	(1 << 6)
133 #define  AFI_SM_INTR_INTD_DEASSERT	(1 << 7)
134 
135 #define AFI_AFI_INTR_ENABLE		0xc8
136 #define  AFI_INTR_EN_INI_SLVERR		(1 << 0)
137 #define  AFI_INTR_EN_INI_DECERR		(1 << 1)
138 #define  AFI_INTR_EN_TGT_SLVERR		(1 << 2)
139 #define  AFI_INTR_EN_TGT_DECERR		(1 << 3)
140 #define  AFI_INTR_EN_TGT_WRERR		(1 << 4)
141 #define  AFI_INTR_EN_DFPCI_DECERR	(1 << 5)
142 #define  AFI_INTR_EN_AXI_DECERR		(1 << 6)
143 #define  AFI_INTR_EN_FPCI_TIMEOUT	(1 << 7)
144 #define  AFI_INTR_EN_PRSNT_SENSE	(1 << 8)
145 
146 #define AFI_PCIE_PME		0xf0
147 
148 #define AFI_PCIE_CONFIG					0x0f8
149 #define  AFI_PCIE_CONFIG_PCIE_DISABLE(x)		(1 << ((x) + 1))
150 #define  AFI_PCIE_CONFIG_PCIE_DISABLE_ALL		0xe
151 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK	(0xf << 20)
152 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE	(0x0 << 20)
153 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420	(0x0 << 20)
154 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1	(0x0 << 20)
155 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401	(0x0 << 20)
156 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL	(0x1 << 20)
157 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222	(0x1 << 20)
158 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1	(0x1 << 20)
159 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211	(0x1 << 20)
160 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411	(0x2 << 20)
161 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111	(0x2 << 20)
162 
163 #define AFI_FUSE			0x104
164 #define  AFI_FUSE_PCIE_T0_GEN2_DIS	(1 << 2)
165 
166 #define AFI_PEX0_CTRL			0x110
167 #define AFI_PEX1_CTRL			0x118
168 #define AFI_PEX2_CTRL			0x128
169 #define  AFI_PEX_CTRL_RST		(1 << 0)
170 #define  AFI_PEX_CTRL_CLKREQ_EN		(1 << 1)
171 #define  AFI_PEX_CTRL_REFCLK_EN		(1 << 3)
172 #define  AFI_PEX_CTRL_OVERRIDE_EN	(1 << 4)
173 
174 #define AFI_PLLE_CONTROL		0x160
175 #define  AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
176 #define  AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
177 
178 #define AFI_PEXBIAS_CTRL_0		0x168
179 
180 #define RP_VEND_XP	0x00000f00
181 #define  RP_VEND_XP_DL_UP	(1 << 30)
182 
183 #define RP_VEND_CTL2 0x00000fa8
184 #define  RP_VEND_CTL2_PCA_ENABLE (1 << 7)
185 
186 #define RP_PRIV_MISC	0x00000fe0
187 #define  RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0)
188 #define  RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0)
189 
190 #define RP_LINK_CONTROL_STATUS			0x00000090
191 #define  RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE	0x20000000
192 #define  RP_LINK_CONTROL_STATUS_LINKSTAT_MASK	0x3fff0000
193 
194 #define PADS_CTL_SEL		0x0000009c
195 
196 #define PADS_CTL		0x000000a0
197 #define  PADS_CTL_IDDQ_1L	(1 << 0)
198 #define  PADS_CTL_TX_DATA_EN_1L	(1 << 6)
199 #define  PADS_CTL_RX_DATA_EN_1L	(1 << 10)
200 
201 #define PADS_PLL_CTL_TEGRA20			0x000000b8
202 #define PADS_PLL_CTL_TEGRA30			0x000000b4
203 #define  PADS_PLL_CTL_RST_B4SM			(1 << 1)
204 #define  PADS_PLL_CTL_LOCKDET			(1 << 8)
205 #define  PADS_PLL_CTL_REFCLK_MASK		(0x3 << 16)
206 #define  PADS_PLL_CTL_REFCLK_INTERNAL_CML	(0 << 16)
207 #define  PADS_PLL_CTL_REFCLK_INTERNAL_CMOS	(1 << 16)
208 #define  PADS_PLL_CTL_REFCLK_EXTERNAL		(2 << 16)
209 #define  PADS_PLL_CTL_TXCLKREF_MASK		(0x1 << 20)
210 #define  PADS_PLL_CTL_TXCLKREF_DIV10		(0 << 20)
211 #define  PADS_PLL_CTL_TXCLKREF_DIV5		(1 << 20)
212 #define  PADS_PLL_CTL_TXCLKREF_BUF_EN		(1 << 22)
213 
214 #define PADS_REFCLK_CFG0			0x000000c8
215 #define PADS_REFCLK_CFG1			0x000000cc
216 #define PADS_REFCLK_BIAS			0x000000d0
217 
218 /*
219  * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
220  * entries, one entry per PCIe port. These field definitions and desired
221  * values aren't in the TRM, but do come from NVIDIA.
222  */
223 #define PADS_REFCLK_CFG_TERM_SHIFT		2  /* 6:2 */
224 #define PADS_REFCLK_CFG_E_TERM_SHIFT		7
225 #define PADS_REFCLK_CFG_PREDI_SHIFT		8  /* 11:8 */
226 #define PADS_REFCLK_CFG_DRVI_SHIFT		12 /* 15:12 */
227 
228 #define PME_ACK_TIMEOUT 10000
229 
230 struct tegra_msi {
231 	struct msi_controller chip;
232 	DECLARE_BITMAP(used, INT_PCI_MSI_NR);
233 	struct irq_domain *domain;
234 	unsigned long pages;
235 	struct mutex lock;
236 	u64 phys;
237 	int irq;
238 };
239 
240 /* used to differentiate between Tegra SoC generations */
241 struct tegra_pcie_port_soc {
242 	struct {
243 		u8 turnoff_bit;
244 		u8 ack_bit;
245 	} pme;
246 };
247 
248 struct tegra_pcie_soc {
249 	unsigned int num_ports;
250 	const struct tegra_pcie_port_soc *ports;
251 	unsigned int msi_base_shift;
252 	u32 pads_pll_ctl;
253 	u32 tx_ref_sel;
254 	u32 pads_refclk_cfg0;
255 	u32 pads_refclk_cfg1;
256 	bool has_pex_clkreq_en;
257 	bool has_pex_bias_ctrl;
258 	bool has_intr_prsnt_sense;
259 	bool has_cml_clk;
260 	bool has_gen2;
261 	bool force_pca_enable;
262 	bool program_uphy;
263 };
264 
to_tegra_msi(struct msi_controller * chip)265 static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip)
266 {
267 	return container_of(chip, struct tegra_msi, chip);
268 }
269 
270 struct tegra_pcie {
271 	struct device *dev;
272 
273 	void __iomem *pads;
274 	void __iomem *afi;
275 	void __iomem *cfg;
276 	int irq;
277 
278 	struct resource cs;
279 	struct resource io;
280 	struct resource pio;
281 	struct resource mem;
282 	struct resource prefetch;
283 	struct resource busn;
284 
285 	struct {
286 		resource_size_t mem;
287 		resource_size_t io;
288 	} offset;
289 
290 	struct clk *pex_clk;
291 	struct clk *afi_clk;
292 	struct clk *pll_e;
293 	struct clk *cml_clk;
294 
295 	struct reset_control *pex_rst;
296 	struct reset_control *afi_rst;
297 	struct reset_control *pcie_xrst;
298 
299 	bool legacy_phy;
300 	struct phy *phy;
301 
302 	struct tegra_msi msi;
303 
304 	struct list_head ports;
305 	u32 xbar_config;
306 
307 	struct regulator_bulk_data *supplies;
308 	unsigned int num_supplies;
309 
310 	const struct tegra_pcie_soc *soc;
311 	struct dentry *debugfs;
312 };
313 
314 struct tegra_pcie_port {
315 	struct tegra_pcie *pcie;
316 	struct device_node *np;
317 	struct list_head list;
318 	struct resource regs;
319 	void __iomem *base;
320 	unsigned int index;
321 	unsigned int lanes;
322 
323 	struct phy **phys;
324 };
325 
326 struct tegra_pcie_bus {
327 	struct list_head list;
328 	unsigned int nr;
329 };
330 
afi_writel(struct tegra_pcie * pcie,u32 value,unsigned long offset)331 static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
332 			      unsigned long offset)
333 {
334 	writel(value, pcie->afi + offset);
335 }
336 
afi_readl(struct tegra_pcie * pcie,unsigned long offset)337 static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
338 {
339 	return readl(pcie->afi + offset);
340 }
341 
pads_writel(struct tegra_pcie * pcie,u32 value,unsigned long offset)342 static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
343 			       unsigned long offset)
344 {
345 	writel(value, pcie->pads + offset);
346 }
347 
pads_readl(struct tegra_pcie * pcie,unsigned long offset)348 static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
349 {
350 	return readl(pcie->pads + offset);
351 }
352 
353 /*
354  * The configuration space mapping on Tegra is somewhat similar to the ECAM
355  * defined by PCIe. However it deviates a bit in how the 4 bits for extended
356  * register accesses are mapped:
357  *
358  *    [27:24] extended register number
359  *    [23:16] bus number
360  *    [15:11] device number
361  *    [10: 8] function number
362  *    [ 7: 0] register number
363  *
364  * Mapping the whole extended configuration space would require 256 MiB of
365  * virtual address space, only a small part of which will actually be used.
366  *
367  * To work around this, a 4 KiB region is used to generate the required
368  * configuration transaction with relevant B:D:F and register offset values.
369  * This is achieved by dynamically programming base address and size of
370  * AFI_AXI_BAR used for end point config space mapping to make sure that the
371  * address (access to which generates correct config transaction) falls in
372  * this 4 KiB region.
373  */
tegra_pcie_conf_offset(u8 bus,unsigned int devfn,unsigned int where)374 static unsigned int tegra_pcie_conf_offset(u8 bus, unsigned int devfn,
375 					   unsigned int where)
376 {
377 	return ((where & 0xf00) << 16) | (bus << 16) | (PCI_SLOT(devfn) << 11) |
378 	       (PCI_FUNC(devfn) << 8) | (where & 0xff);
379 }
380 
tegra_pcie_map_bus(struct pci_bus * bus,unsigned int devfn,int where)381 static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
382 					unsigned int devfn,
383 					int where)
384 {
385 	struct tegra_pcie *pcie = bus->sysdata;
386 	void __iomem *addr = NULL;
387 
388 	if (bus->number == 0) {
389 		unsigned int slot = PCI_SLOT(devfn);
390 		struct tegra_pcie_port *port;
391 
392 		list_for_each_entry(port, &pcie->ports, list) {
393 			if (port->index + 1 == slot) {
394 				addr = port->base + (where & ~3);
395 				break;
396 			}
397 		}
398 	} else {
399 		unsigned int offset;
400 		u32 base;
401 
402 		offset = tegra_pcie_conf_offset(bus->number, devfn, where);
403 
404 		/* move 4 KiB window to offset within the FPCI region */
405 		base = 0xfe100000 + ((offset & ~(SZ_4K - 1)) >> 8);
406 		afi_writel(pcie, base, AFI_FPCI_BAR0);
407 
408 		/* move to correct offset within the 4 KiB page */
409 		addr = pcie->cfg + (offset & (SZ_4K - 1));
410 	}
411 
412 	return addr;
413 }
414 
tegra_pcie_config_read(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * value)415 static int tegra_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
416 				  int where, int size, u32 *value)
417 {
418 	if (bus->number == 0)
419 		return pci_generic_config_read32(bus, devfn, where, size,
420 						 value);
421 
422 	return pci_generic_config_read(bus, devfn, where, size, value);
423 }
424 
tegra_pcie_config_write(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 value)425 static int tegra_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
426 				   int where, int size, u32 value)
427 {
428 	if (bus->number == 0)
429 		return pci_generic_config_write32(bus, devfn, where, size,
430 						  value);
431 
432 	return pci_generic_config_write(bus, devfn, where, size, value);
433 }
434 
435 static struct pci_ops tegra_pcie_ops = {
436 	.map_bus = tegra_pcie_map_bus,
437 	.read = tegra_pcie_config_read,
438 	.write = tegra_pcie_config_write,
439 };
440 
tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port * port)441 static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
442 {
443 	unsigned long ret = 0;
444 
445 	switch (port->index) {
446 	case 0:
447 		ret = AFI_PEX0_CTRL;
448 		break;
449 
450 	case 1:
451 		ret = AFI_PEX1_CTRL;
452 		break;
453 
454 	case 2:
455 		ret = AFI_PEX2_CTRL;
456 		break;
457 	}
458 
459 	return ret;
460 }
461 
tegra_pcie_port_reset(struct tegra_pcie_port * port)462 static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
463 {
464 	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
465 	unsigned long value;
466 
467 	/* pulse reset signal */
468 	value = afi_readl(port->pcie, ctrl);
469 	value &= ~AFI_PEX_CTRL_RST;
470 	afi_writel(port->pcie, value, ctrl);
471 
472 	usleep_range(1000, 2000);
473 
474 	value = afi_readl(port->pcie, ctrl);
475 	value |= AFI_PEX_CTRL_RST;
476 	afi_writel(port->pcie, value, ctrl);
477 }
478 
tegra_pcie_port_enable(struct tegra_pcie_port * port)479 static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
480 {
481 	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
482 	const struct tegra_pcie_soc *soc = port->pcie->soc;
483 	unsigned long value;
484 
485 	/* enable reference clock */
486 	value = afi_readl(port->pcie, ctrl);
487 	value |= AFI_PEX_CTRL_REFCLK_EN;
488 
489 	if (soc->has_pex_clkreq_en)
490 		value |= AFI_PEX_CTRL_CLKREQ_EN;
491 
492 	value |= AFI_PEX_CTRL_OVERRIDE_EN;
493 
494 	afi_writel(port->pcie, value, ctrl);
495 
496 	tegra_pcie_port_reset(port);
497 
498 	if (soc->force_pca_enable) {
499 		value = readl(port->base + RP_VEND_CTL2);
500 		value |= RP_VEND_CTL2_PCA_ENABLE;
501 		writel(value, port->base + RP_VEND_CTL2);
502 	}
503 }
504 
tegra_pcie_port_disable(struct tegra_pcie_port * port)505 static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
506 {
507 	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
508 	const struct tegra_pcie_soc *soc = port->pcie->soc;
509 	unsigned long value;
510 
511 	/* assert port reset */
512 	value = afi_readl(port->pcie, ctrl);
513 	value &= ~AFI_PEX_CTRL_RST;
514 	afi_writel(port->pcie, value, ctrl);
515 
516 	/* disable reference clock */
517 	value = afi_readl(port->pcie, ctrl);
518 
519 	if (soc->has_pex_clkreq_en)
520 		value &= ~AFI_PEX_CTRL_CLKREQ_EN;
521 
522 	value &= ~AFI_PEX_CTRL_REFCLK_EN;
523 	afi_writel(port->pcie, value, ctrl);
524 }
525 
tegra_pcie_port_free(struct tegra_pcie_port * port)526 static void tegra_pcie_port_free(struct tegra_pcie_port *port)
527 {
528 	struct tegra_pcie *pcie = port->pcie;
529 	struct device *dev = pcie->dev;
530 
531 	devm_iounmap(dev, port->base);
532 	devm_release_mem_region(dev, port->regs.start,
533 				resource_size(&port->regs));
534 	list_del(&port->list);
535 	devm_kfree(dev, port);
536 }
537 
538 /* Tegra PCIE root complex wrongly reports device class */
tegra_pcie_fixup_class(struct pci_dev * dev)539 static void tegra_pcie_fixup_class(struct pci_dev *dev)
540 {
541 	dev->class = PCI_CLASS_BRIDGE_PCI << 8;
542 }
543 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
544 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
545 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
546 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
547 
548 /* Tegra20 and Tegra30 PCIE requires relaxed ordering */
tegra_pcie_relax_enable(struct pci_dev * dev)549 static void tegra_pcie_relax_enable(struct pci_dev *dev)
550 {
551 	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
552 }
553 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_relax_enable);
554 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_relax_enable);
555 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_relax_enable);
556 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_relax_enable);
557 
tegra_pcie_request_resources(struct tegra_pcie * pcie)558 static int tegra_pcie_request_resources(struct tegra_pcie *pcie)
559 {
560 	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
561 	struct list_head *windows = &host->windows;
562 	struct device *dev = pcie->dev;
563 	int err;
564 
565 	pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io);
566 	pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem);
567 	pci_add_resource_offset(windows, &pcie->prefetch, pcie->offset.mem);
568 	pci_add_resource(windows, &pcie->busn);
569 
570 	err = devm_request_pci_bus_resources(dev, windows);
571 	if (err < 0) {
572 		pci_free_resource_list(windows);
573 		return err;
574 	}
575 
576 	pci_remap_iospace(&pcie->pio, pcie->io.start);
577 
578 	return 0;
579 }
580 
tegra_pcie_free_resources(struct tegra_pcie * pcie)581 static void tegra_pcie_free_resources(struct tegra_pcie *pcie)
582 {
583 	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
584 	struct list_head *windows = &host->windows;
585 
586 	pci_unmap_iospace(&pcie->pio);
587 	pci_free_resource_list(windows);
588 }
589 
tegra_pcie_map_irq(const struct pci_dev * pdev,u8 slot,u8 pin)590 static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
591 {
592 	struct tegra_pcie *pcie = pdev->bus->sysdata;
593 	int irq;
594 
595 	tegra_cpuidle_pcie_irqs_in_use();
596 
597 	irq = of_irq_parse_and_map_pci(pdev, slot, pin);
598 	if (!irq)
599 		irq = pcie->irq;
600 
601 	return irq;
602 }
603 
tegra_pcie_isr(int irq,void * arg)604 static irqreturn_t tegra_pcie_isr(int irq, void *arg)
605 {
606 	const char *err_msg[] = {
607 		"Unknown",
608 		"AXI slave error",
609 		"AXI decode error",
610 		"Target abort",
611 		"Master abort",
612 		"Invalid write",
613 		"Legacy interrupt",
614 		"Response decoding error",
615 		"AXI response decoding error",
616 		"Transaction timeout",
617 		"Slot present pin change",
618 		"Slot clock request change",
619 		"TMS clock ramp change",
620 		"TMS ready for power down",
621 		"Peer2Peer error",
622 	};
623 	struct tegra_pcie *pcie = arg;
624 	struct device *dev = pcie->dev;
625 	u32 code, signature;
626 
627 	code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
628 	signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
629 	afi_writel(pcie, 0, AFI_INTR_CODE);
630 
631 	if (code == AFI_INTR_LEGACY)
632 		return IRQ_NONE;
633 
634 	if (code >= ARRAY_SIZE(err_msg))
635 		code = 0;
636 
637 	/*
638 	 * do not pollute kernel log with master abort reports since they
639 	 * happen a lot during enumeration
640 	 */
641 	if (code == AFI_INTR_MASTER_ABORT)
642 		dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature);
643 	else
644 		dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature);
645 
646 	if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
647 	    code == AFI_INTR_FPCI_DECODE_ERROR) {
648 		u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
649 		u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
650 
651 		if (code == AFI_INTR_MASTER_ABORT)
652 			dev_dbg(dev, "  FPCI address: %10llx\n", address);
653 		else
654 			dev_err(dev, "  FPCI address: %10llx\n", address);
655 	}
656 
657 	return IRQ_HANDLED;
658 }
659 
660 /*
661  * FPCI map is as follows:
662  * - 0xfdfc000000: I/O space
663  * - 0xfdfe000000: type 0 configuration space
664  * - 0xfdff000000: type 1 configuration space
665  * - 0xfe00000000: type 0 extended configuration space
666  * - 0xfe10000000: type 1 extended configuration space
667  */
tegra_pcie_setup_translations(struct tegra_pcie * pcie)668 static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
669 {
670 	u32 fpci_bar, size, axi_address;
671 
672 	/* Bar 0: type 1 extended configuration space */
673 	size = resource_size(&pcie->cs);
674 	afi_writel(pcie, pcie->cs.start, AFI_AXI_BAR0_START);
675 	afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
676 
677 	/* Bar 1: downstream IO bar */
678 	fpci_bar = 0xfdfc0000;
679 	size = resource_size(&pcie->io);
680 	axi_address = pcie->io.start;
681 	afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
682 	afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
683 	afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
684 
685 	/* Bar 2: prefetchable memory BAR */
686 	fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1;
687 	size = resource_size(&pcie->prefetch);
688 	axi_address = pcie->prefetch.start;
689 	afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
690 	afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
691 	afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
692 
693 	/* Bar 3: non prefetchable memory BAR */
694 	fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1;
695 	size = resource_size(&pcie->mem);
696 	axi_address = pcie->mem.start;
697 	afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
698 	afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
699 	afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
700 
701 	/* NULL out the remaining BARs as they are not used */
702 	afi_writel(pcie, 0, AFI_AXI_BAR4_START);
703 	afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
704 	afi_writel(pcie, 0, AFI_FPCI_BAR4);
705 
706 	afi_writel(pcie, 0, AFI_AXI_BAR5_START);
707 	afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
708 	afi_writel(pcie, 0, AFI_FPCI_BAR5);
709 
710 	/* map all upstream transactions as uncached */
711 	afi_writel(pcie, 0, AFI_CACHE_BAR0_ST);
712 	afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
713 	afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
714 	afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
715 
716 	/* MSI translations are setup only when needed */
717 	afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
718 	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
719 	afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
720 	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
721 }
722 
tegra_pcie_pll_wait(struct tegra_pcie * pcie,unsigned long timeout)723 static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
724 {
725 	const struct tegra_pcie_soc *soc = pcie->soc;
726 	u32 value;
727 
728 	timeout = jiffies + msecs_to_jiffies(timeout);
729 
730 	while (time_before(jiffies, timeout)) {
731 		value = pads_readl(pcie, soc->pads_pll_ctl);
732 		if (value & PADS_PLL_CTL_LOCKDET)
733 			return 0;
734 	}
735 
736 	return -ETIMEDOUT;
737 }
738 
tegra_pcie_phy_enable(struct tegra_pcie * pcie)739 static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
740 {
741 	struct device *dev = pcie->dev;
742 	const struct tegra_pcie_soc *soc = pcie->soc;
743 	u32 value;
744 	int err;
745 
746 	/* initialize internal PHY, enable up to 16 PCIE lanes */
747 	pads_writel(pcie, 0x0, PADS_CTL_SEL);
748 
749 	/* override IDDQ to 1 on all 4 lanes */
750 	value = pads_readl(pcie, PADS_CTL);
751 	value |= PADS_CTL_IDDQ_1L;
752 	pads_writel(pcie, value, PADS_CTL);
753 
754 	/*
755 	 * Set up PHY PLL inputs select PLLE output as refclock,
756 	 * set TX ref sel to div10 (not div5).
757 	 */
758 	value = pads_readl(pcie, soc->pads_pll_ctl);
759 	value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
760 	value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
761 	pads_writel(pcie, value, soc->pads_pll_ctl);
762 
763 	/* reset PLL */
764 	value = pads_readl(pcie, soc->pads_pll_ctl);
765 	value &= ~PADS_PLL_CTL_RST_B4SM;
766 	pads_writel(pcie, value, soc->pads_pll_ctl);
767 
768 	usleep_range(20, 100);
769 
770 	/* take PLL out of reset  */
771 	value = pads_readl(pcie, soc->pads_pll_ctl);
772 	value |= PADS_PLL_CTL_RST_B4SM;
773 	pads_writel(pcie, value, soc->pads_pll_ctl);
774 
775 	/* wait for the PLL to lock */
776 	err = tegra_pcie_pll_wait(pcie, 500);
777 	if (err < 0) {
778 		dev_err(dev, "PLL failed to lock: %d\n", err);
779 		return err;
780 	}
781 
782 	/* turn off IDDQ override */
783 	value = pads_readl(pcie, PADS_CTL);
784 	value &= ~PADS_CTL_IDDQ_1L;
785 	pads_writel(pcie, value, PADS_CTL);
786 
787 	/* enable TX/RX data */
788 	value = pads_readl(pcie, PADS_CTL);
789 	value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
790 	pads_writel(pcie, value, PADS_CTL);
791 
792 	return 0;
793 }
794 
tegra_pcie_phy_disable(struct tegra_pcie * pcie)795 static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
796 {
797 	const struct tegra_pcie_soc *soc = pcie->soc;
798 	u32 value;
799 
800 	/* disable TX/RX data */
801 	value = pads_readl(pcie, PADS_CTL);
802 	value &= ~(PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L);
803 	pads_writel(pcie, value, PADS_CTL);
804 
805 	/* override IDDQ */
806 	value = pads_readl(pcie, PADS_CTL);
807 	value |= PADS_CTL_IDDQ_1L;
808 	pads_writel(pcie, value, PADS_CTL);
809 
810 	/* reset PLL */
811 	value = pads_readl(pcie, soc->pads_pll_ctl);
812 	value &= ~PADS_PLL_CTL_RST_B4SM;
813 	pads_writel(pcie, value, soc->pads_pll_ctl);
814 
815 	usleep_range(20, 100);
816 
817 	return 0;
818 }
819 
tegra_pcie_port_phy_power_on(struct tegra_pcie_port * port)820 static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port)
821 {
822 	struct device *dev = port->pcie->dev;
823 	unsigned int i;
824 	int err;
825 
826 	for (i = 0; i < port->lanes; i++) {
827 		err = phy_power_on(port->phys[i]);
828 		if (err < 0) {
829 			dev_err(dev, "failed to power on PHY#%u: %d\n", i, err);
830 			return err;
831 		}
832 	}
833 
834 	return 0;
835 }
836 
tegra_pcie_port_phy_power_off(struct tegra_pcie_port * port)837 static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
838 {
839 	struct device *dev = port->pcie->dev;
840 	unsigned int i;
841 	int err;
842 
843 	for (i = 0; i < port->lanes; i++) {
844 		err = phy_power_off(port->phys[i]);
845 		if (err < 0) {
846 			dev_err(dev, "failed to power off PHY#%u: %d\n", i,
847 				err);
848 			return err;
849 		}
850 	}
851 
852 	return 0;
853 }
854 
tegra_pcie_phy_power_on(struct tegra_pcie * pcie)855 static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
856 {
857 	struct device *dev = pcie->dev;
858 	const struct tegra_pcie_soc *soc = pcie->soc;
859 	struct tegra_pcie_port *port;
860 	int err;
861 
862 	if (pcie->legacy_phy) {
863 		if (pcie->phy)
864 			err = phy_power_on(pcie->phy);
865 		else
866 			err = tegra_pcie_phy_enable(pcie);
867 
868 		if (err < 0)
869 			dev_err(dev, "failed to power on PHY: %d\n", err);
870 
871 		return err;
872 	}
873 
874 	list_for_each_entry(port, &pcie->ports, list) {
875 		err = tegra_pcie_port_phy_power_on(port);
876 		if (err < 0) {
877 			dev_err(dev,
878 				"failed to power on PCIe port %u PHY: %d\n",
879 				port->index, err);
880 			return err;
881 		}
882 	}
883 
884 	/* Configure the reference clock driver */
885 	pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0);
886 
887 	if (soc->num_ports > 2)
888 		pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1);
889 
890 	return 0;
891 }
892 
tegra_pcie_phy_power_off(struct tegra_pcie * pcie)893 static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
894 {
895 	struct device *dev = pcie->dev;
896 	struct tegra_pcie_port *port;
897 	int err;
898 
899 	if (pcie->legacy_phy) {
900 		if (pcie->phy)
901 			err = phy_power_off(pcie->phy);
902 		else
903 			err = tegra_pcie_phy_disable(pcie);
904 
905 		if (err < 0)
906 			dev_err(dev, "failed to power off PHY: %d\n", err);
907 
908 		return err;
909 	}
910 
911 	list_for_each_entry(port, &pcie->ports, list) {
912 		err = tegra_pcie_port_phy_power_off(port);
913 		if (err < 0) {
914 			dev_err(dev,
915 				"failed to power off PCIe port %u PHY: %d\n",
916 				port->index, err);
917 			return err;
918 		}
919 	}
920 
921 	return 0;
922 }
923 
tegra_pcie_enable_controller(struct tegra_pcie * pcie)924 static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
925 {
926 	struct device *dev = pcie->dev;
927 	const struct tegra_pcie_soc *soc = pcie->soc;
928 	struct tegra_pcie_port *port;
929 	unsigned long value;
930 	int err;
931 
932 	/* enable PLL power down */
933 	if (pcie->phy) {
934 		value = afi_readl(pcie, AFI_PLLE_CONTROL);
935 		value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
936 		value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
937 		afi_writel(pcie, value, AFI_PLLE_CONTROL);
938 	}
939 
940 	/* power down PCIe slot clock bias pad */
941 	if (soc->has_pex_bias_ctrl)
942 		afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
943 
944 	/* configure mode and disable all ports */
945 	value = afi_readl(pcie, AFI_PCIE_CONFIG);
946 	value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
947 	value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
948 
949 	list_for_each_entry(port, &pcie->ports, list)
950 		value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
951 
952 	afi_writel(pcie, value, AFI_PCIE_CONFIG);
953 
954 	if (soc->has_gen2) {
955 		value = afi_readl(pcie, AFI_FUSE);
956 		value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
957 		afi_writel(pcie, value, AFI_FUSE);
958 	} else {
959 		value = afi_readl(pcie, AFI_FUSE);
960 		value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
961 		afi_writel(pcie, value, AFI_FUSE);
962 	}
963 
964 	if (soc->program_uphy) {
965 		err = tegra_pcie_phy_power_on(pcie);
966 		if (err < 0) {
967 			dev_err(dev, "failed to power on PHY(s): %d\n", err);
968 			return err;
969 		}
970 	}
971 
972 	/* take the PCIe interface module out of reset */
973 	reset_control_deassert(pcie->pcie_xrst);
974 
975 	/* finally enable PCIe */
976 	value = afi_readl(pcie, AFI_CONFIGURATION);
977 	value |= AFI_CONFIGURATION_EN_FPCI;
978 	afi_writel(pcie, value, AFI_CONFIGURATION);
979 
980 	value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
981 		AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
982 		AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
983 
984 	if (soc->has_intr_prsnt_sense)
985 		value |= AFI_INTR_EN_PRSNT_SENSE;
986 
987 	afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
988 	afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
989 
990 	/* don't enable MSI for now, only when needed */
991 	afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
992 
993 	/* disable all exceptions */
994 	afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
995 
996 	return 0;
997 }
998 
tegra_pcie_disable_controller(struct tegra_pcie * pcie)999 static void tegra_pcie_disable_controller(struct tegra_pcie *pcie)
1000 {
1001 	int err;
1002 
1003 	reset_control_assert(pcie->pcie_xrst);
1004 
1005 	if (pcie->soc->program_uphy) {
1006 		err = tegra_pcie_phy_power_off(pcie);
1007 		if (err < 0)
1008 			dev_err(pcie->dev, "failed to power off PHY(s): %d\n",
1009 				err);
1010 	}
1011 }
1012 
tegra_pcie_power_off(struct tegra_pcie * pcie)1013 static void tegra_pcie_power_off(struct tegra_pcie *pcie)
1014 {
1015 	struct device *dev = pcie->dev;
1016 	const struct tegra_pcie_soc *soc = pcie->soc;
1017 	int err;
1018 
1019 	reset_control_assert(pcie->afi_rst);
1020 	reset_control_assert(pcie->pex_rst);
1021 
1022 	clk_disable_unprepare(pcie->pll_e);
1023 	if (soc->has_cml_clk)
1024 		clk_disable_unprepare(pcie->cml_clk);
1025 	clk_disable_unprepare(pcie->afi_clk);
1026 	clk_disable_unprepare(pcie->pex_clk);
1027 
1028 	if (!dev->pm_domain)
1029 		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1030 
1031 	err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1032 	if (err < 0)
1033 		dev_warn(dev, "failed to disable regulators: %d\n", err);
1034 }
1035 
tegra_pcie_power_on(struct tegra_pcie * pcie)1036 static int tegra_pcie_power_on(struct tegra_pcie *pcie)
1037 {
1038 	struct device *dev = pcie->dev;
1039 	const struct tegra_pcie_soc *soc = pcie->soc;
1040 	int err;
1041 
1042 	reset_control_assert(pcie->pcie_xrst);
1043 	reset_control_assert(pcie->afi_rst);
1044 	reset_control_assert(pcie->pex_rst);
1045 
1046 	if (!dev->pm_domain)
1047 		tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1048 
1049 	/* enable regulators */
1050 	err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
1051 	if (err < 0)
1052 		dev_err(dev, "failed to enable regulators: %d\n", err);
1053 
1054 	if (dev->pm_domain) {
1055 		err = clk_prepare_enable(pcie->pex_clk);
1056 		if (err) {
1057 			dev_err(dev, "failed to enable PEX clock: %d\n", err);
1058 			return err;
1059 		}
1060 		reset_control_deassert(pcie->pex_rst);
1061 	} else {
1062 		err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
1063 							pcie->pex_clk,
1064 							pcie->pex_rst);
1065 		if (err) {
1066 			dev_err(dev, "powerup sequence failed: %d\n", err);
1067 			return err;
1068 		}
1069 	}
1070 
1071 	reset_control_deassert(pcie->afi_rst);
1072 
1073 	err = clk_prepare_enable(pcie->afi_clk);
1074 	if (err < 0) {
1075 		dev_err(dev, "failed to enable AFI clock: %d\n", err);
1076 		return err;
1077 	}
1078 
1079 	if (soc->has_cml_clk) {
1080 		err = clk_prepare_enable(pcie->cml_clk);
1081 		if (err < 0) {
1082 			dev_err(dev, "failed to enable CML clock: %d\n", err);
1083 			return err;
1084 		}
1085 	}
1086 
1087 	err = clk_prepare_enable(pcie->pll_e);
1088 	if (err < 0) {
1089 		dev_err(dev, "failed to enable PLLE clock: %d\n", err);
1090 		return err;
1091 	}
1092 
1093 	return 0;
1094 }
1095 
tegra_pcie_clocks_get(struct tegra_pcie * pcie)1096 static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
1097 {
1098 	struct device *dev = pcie->dev;
1099 	const struct tegra_pcie_soc *soc = pcie->soc;
1100 
1101 	pcie->pex_clk = devm_clk_get(dev, "pex");
1102 	if (IS_ERR(pcie->pex_clk))
1103 		return PTR_ERR(pcie->pex_clk);
1104 
1105 	pcie->afi_clk = devm_clk_get(dev, "afi");
1106 	if (IS_ERR(pcie->afi_clk))
1107 		return PTR_ERR(pcie->afi_clk);
1108 
1109 	pcie->pll_e = devm_clk_get(dev, "pll_e");
1110 	if (IS_ERR(pcie->pll_e))
1111 		return PTR_ERR(pcie->pll_e);
1112 
1113 	if (soc->has_cml_clk) {
1114 		pcie->cml_clk = devm_clk_get(dev, "cml");
1115 		if (IS_ERR(pcie->cml_clk))
1116 			return PTR_ERR(pcie->cml_clk);
1117 	}
1118 
1119 	return 0;
1120 }
1121 
tegra_pcie_resets_get(struct tegra_pcie * pcie)1122 static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
1123 {
1124 	struct device *dev = pcie->dev;
1125 
1126 	pcie->pex_rst = devm_reset_control_get_exclusive(dev, "pex");
1127 	if (IS_ERR(pcie->pex_rst))
1128 		return PTR_ERR(pcie->pex_rst);
1129 
1130 	pcie->afi_rst = devm_reset_control_get_exclusive(dev, "afi");
1131 	if (IS_ERR(pcie->afi_rst))
1132 		return PTR_ERR(pcie->afi_rst);
1133 
1134 	pcie->pcie_xrst = devm_reset_control_get_exclusive(dev, "pcie_x");
1135 	if (IS_ERR(pcie->pcie_xrst))
1136 		return PTR_ERR(pcie->pcie_xrst);
1137 
1138 	return 0;
1139 }
1140 
tegra_pcie_phys_get_legacy(struct tegra_pcie * pcie)1141 static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie)
1142 {
1143 	struct device *dev = pcie->dev;
1144 	int err;
1145 
1146 	pcie->phy = devm_phy_optional_get(dev, "pcie");
1147 	if (IS_ERR(pcie->phy)) {
1148 		err = PTR_ERR(pcie->phy);
1149 		dev_err(dev, "failed to get PHY: %d\n", err);
1150 		return err;
1151 	}
1152 
1153 	err = phy_init(pcie->phy);
1154 	if (err < 0) {
1155 		dev_err(dev, "failed to initialize PHY: %d\n", err);
1156 		return err;
1157 	}
1158 
1159 	pcie->legacy_phy = true;
1160 
1161 	return 0;
1162 }
1163 
devm_of_phy_optional_get_index(struct device * dev,struct device_node * np,const char * consumer,unsigned int index)1164 static struct phy *devm_of_phy_optional_get_index(struct device *dev,
1165 						  struct device_node *np,
1166 						  const char *consumer,
1167 						  unsigned int index)
1168 {
1169 	struct phy *phy;
1170 	char *name;
1171 
1172 	name = kasprintf(GFP_KERNEL, "%s-%u", consumer, index);
1173 	if (!name)
1174 		return ERR_PTR(-ENOMEM);
1175 
1176 	phy = devm_of_phy_get(dev, np, name);
1177 	kfree(name);
1178 
1179 	if (IS_ERR(phy) && PTR_ERR(phy) == -ENODEV)
1180 		phy = NULL;
1181 
1182 	return phy;
1183 }
1184 
tegra_pcie_port_get_phys(struct tegra_pcie_port * port)1185 static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
1186 {
1187 	struct device *dev = port->pcie->dev;
1188 	struct phy *phy;
1189 	unsigned int i;
1190 	int err;
1191 
1192 	port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL);
1193 	if (!port->phys)
1194 		return -ENOMEM;
1195 
1196 	for (i = 0; i < port->lanes; i++) {
1197 		phy = devm_of_phy_optional_get_index(dev, port->np, "pcie", i);
1198 		if (IS_ERR(phy)) {
1199 			dev_err(dev, "failed to get PHY#%u: %ld\n", i,
1200 				PTR_ERR(phy));
1201 			return PTR_ERR(phy);
1202 		}
1203 
1204 		err = phy_init(phy);
1205 		if (err < 0) {
1206 			dev_err(dev, "failed to initialize PHY#%u: %d\n", i,
1207 				err);
1208 			return err;
1209 		}
1210 
1211 		port->phys[i] = phy;
1212 	}
1213 
1214 	return 0;
1215 }
1216 
tegra_pcie_phys_get(struct tegra_pcie * pcie)1217 static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
1218 {
1219 	const struct tegra_pcie_soc *soc = pcie->soc;
1220 	struct device_node *np = pcie->dev->of_node;
1221 	struct tegra_pcie_port *port;
1222 	int err;
1223 
1224 	if (!soc->has_gen2 || of_find_property(np, "phys", NULL) != NULL)
1225 		return tegra_pcie_phys_get_legacy(pcie);
1226 
1227 	list_for_each_entry(port, &pcie->ports, list) {
1228 		err = tegra_pcie_port_get_phys(port);
1229 		if (err < 0)
1230 			return err;
1231 	}
1232 
1233 	return 0;
1234 }
1235 
tegra_pcie_phys_put(struct tegra_pcie * pcie)1236 static void tegra_pcie_phys_put(struct tegra_pcie *pcie)
1237 {
1238 	struct tegra_pcie_port *port;
1239 	struct device *dev = pcie->dev;
1240 	int err, i;
1241 
1242 	if (pcie->legacy_phy) {
1243 		err = phy_exit(pcie->phy);
1244 		if (err < 0)
1245 			dev_err(dev, "failed to teardown PHY: %d\n", err);
1246 		return;
1247 	}
1248 
1249 	list_for_each_entry(port, &pcie->ports, list) {
1250 		for (i = 0; i < port->lanes; i++) {
1251 			err = phy_exit(port->phys[i]);
1252 			if (err < 0)
1253 				dev_err(dev, "failed to teardown PHY#%u: %d\n",
1254 					i, err);
1255 		}
1256 	}
1257 }
1258 
1259 
tegra_pcie_get_resources(struct tegra_pcie * pcie)1260 static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1261 {
1262 	struct device *dev = pcie->dev;
1263 	struct platform_device *pdev = to_platform_device(dev);
1264 	struct resource *pads, *afi, *res;
1265 	const struct tegra_pcie_soc *soc = pcie->soc;
1266 	int err;
1267 
1268 	err = tegra_pcie_clocks_get(pcie);
1269 	if (err) {
1270 		dev_err(dev, "failed to get clocks: %d\n", err);
1271 		return err;
1272 	}
1273 
1274 	err = tegra_pcie_resets_get(pcie);
1275 	if (err) {
1276 		dev_err(dev, "failed to get resets: %d\n", err);
1277 		return err;
1278 	}
1279 
1280 	if (soc->program_uphy) {
1281 		err = tegra_pcie_phys_get(pcie);
1282 		if (err < 0) {
1283 			dev_err(dev, "failed to get PHYs: %d\n", err);
1284 			return err;
1285 		}
1286 	}
1287 
1288 	pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
1289 	pcie->pads = devm_ioremap_resource(dev, pads);
1290 	if (IS_ERR(pcie->pads)) {
1291 		err = PTR_ERR(pcie->pads);
1292 		goto phys_put;
1293 	}
1294 
1295 	afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
1296 	pcie->afi = devm_ioremap_resource(dev, afi);
1297 	if (IS_ERR(pcie->afi)) {
1298 		err = PTR_ERR(pcie->afi);
1299 		goto phys_put;
1300 	}
1301 
1302 	/* request configuration space, but remap later, on demand */
1303 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1304 	if (!res) {
1305 		err = -EADDRNOTAVAIL;
1306 		goto phys_put;
1307 	}
1308 
1309 	pcie->cs = *res;
1310 
1311 	/* constrain configuration space to 4 KiB */
1312 	pcie->cs.end = pcie->cs.start + SZ_4K - 1;
1313 
1314 	pcie->cfg = devm_ioremap_resource(dev, &pcie->cs);
1315 	if (IS_ERR(pcie->cfg)) {
1316 		err = PTR_ERR(pcie->cfg);
1317 		goto phys_put;
1318 	}
1319 
1320 	/* request interrupt */
1321 	err = platform_get_irq_byname(pdev, "intr");
1322 	if (err < 0) {
1323 		dev_err(dev, "failed to get IRQ: %d\n", err);
1324 		goto phys_put;
1325 	}
1326 
1327 	pcie->irq = err;
1328 
1329 	err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1330 	if (err) {
1331 		dev_err(dev, "failed to register IRQ: %d\n", err);
1332 		goto phys_put;
1333 	}
1334 
1335 	return 0;
1336 
1337 phys_put:
1338 	if (soc->program_uphy)
1339 		tegra_pcie_phys_put(pcie);
1340 	return err;
1341 }
1342 
tegra_pcie_put_resources(struct tegra_pcie * pcie)1343 static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1344 {
1345 	const struct tegra_pcie_soc *soc = pcie->soc;
1346 
1347 	if (pcie->irq > 0)
1348 		free_irq(pcie->irq, pcie);
1349 
1350 	if (soc->program_uphy)
1351 		tegra_pcie_phys_put(pcie);
1352 
1353 	return 0;
1354 }
1355 
tegra_pcie_pme_turnoff(struct tegra_pcie_port * port)1356 static void tegra_pcie_pme_turnoff(struct tegra_pcie_port *port)
1357 {
1358 	struct tegra_pcie *pcie = port->pcie;
1359 	const struct tegra_pcie_soc *soc = pcie->soc;
1360 	int err;
1361 	u32 val;
1362 	u8 ack_bit;
1363 
1364 	val = afi_readl(pcie, AFI_PCIE_PME);
1365 	val |= (0x1 << soc->ports[port->index].pme.turnoff_bit);
1366 	afi_writel(pcie, val, AFI_PCIE_PME);
1367 
1368 	ack_bit = soc->ports[port->index].pme.ack_bit;
1369 	err = readl_poll_timeout(pcie->afi + AFI_PCIE_PME, val,
1370 				 val & (0x1 << ack_bit), 1, PME_ACK_TIMEOUT);
1371 	if (err)
1372 		dev_err(pcie->dev, "PME Ack is not received on port: %d\n",
1373 			port->index);
1374 
1375 	usleep_range(10000, 11000);
1376 
1377 	val = afi_readl(pcie, AFI_PCIE_PME);
1378 	val &= ~(0x1 << soc->ports[port->index].pme.turnoff_bit);
1379 	afi_writel(pcie, val, AFI_PCIE_PME);
1380 }
1381 
tegra_msi_alloc(struct tegra_msi * chip)1382 static int tegra_msi_alloc(struct tegra_msi *chip)
1383 {
1384 	int msi;
1385 
1386 	mutex_lock(&chip->lock);
1387 
1388 	msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
1389 	if (msi < INT_PCI_MSI_NR)
1390 		set_bit(msi, chip->used);
1391 	else
1392 		msi = -ENOSPC;
1393 
1394 	mutex_unlock(&chip->lock);
1395 
1396 	return msi;
1397 }
1398 
tegra_msi_free(struct tegra_msi * chip,unsigned long irq)1399 static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
1400 {
1401 	struct device *dev = chip->chip.dev;
1402 
1403 	mutex_lock(&chip->lock);
1404 
1405 	if (!test_bit(irq, chip->used))
1406 		dev_err(dev, "trying to free unused MSI#%lu\n", irq);
1407 	else
1408 		clear_bit(irq, chip->used);
1409 
1410 	mutex_unlock(&chip->lock);
1411 }
1412 
tegra_pcie_msi_irq(int irq,void * data)1413 static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
1414 {
1415 	struct tegra_pcie *pcie = data;
1416 	struct device *dev = pcie->dev;
1417 	struct tegra_msi *msi = &pcie->msi;
1418 	unsigned int i, processed = 0;
1419 
1420 	for (i = 0; i < 8; i++) {
1421 		unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1422 
1423 		while (reg) {
1424 			unsigned int offset = find_first_bit(&reg, 32);
1425 			unsigned int index = i * 32 + offset;
1426 			unsigned int irq;
1427 
1428 			/* clear the interrupt */
1429 			afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4);
1430 
1431 			irq = irq_find_mapping(msi->domain, index);
1432 			if (irq) {
1433 				if (test_bit(index, msi->used))
1434 					generic_handle_irq(irq);
1435 				else
1436 					dev_info(dev, "unhandled MSI\n");
1437 			} else {
1438 				/*
1439 				 * that's weird who triggered this?
1440 				 * just clear it
1441 				 */
1442 				dev_info(dev, "unexpected MSI\n");
1443 			}
1444 
1445 			/* see if there's any more pending in this vector */
1446 			reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1447 
1448 			processed++;
1449 		}
1450 	}
1451 
1452 	return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
1453 }
1454 
tegra_msi_setup_irq(struct msi_controller * chip,struct pci_dev * pdev,struct msi_desc * desc)1455 static int tegra_msi_setup_irq(struct msi_controller *chip,
1456 			       struct pci_dev *pdev, struct msi_desc *desc)
1457 {
1458 	struct tegra_msi *msi = to_tegra_msi(chip);
1459 	struct msi_msg msg;
1460 	unsigned int irq;
1461 	int hwirq;
1462 
1463 	hwirq = tegra_msi_alloc(msi);
1464 	if (hwirq < 0)
1465 		return hwirq;
1466 
1467 	irq = irq_create_mapping(msi->domain, hwirq);
1468 	if (!irq) {
1469 		tegra_msi_free(msi, hwirq);
1470 		return -EINVAL;
1471 	}
1472 
1473 	irq_set_msi_desc(irq, desc);
1474 
1475 	msg.address_lo = lower_32_bits(msi->phys);
1476 	msg.address_hi = upper_32_bits(msi->phys);
1477 	msg.data = hwirq;
1478 
1479 	pci_write_msi_msg(irq, &msg);
1480 
1481 	return 0;
1482 }
1483 
tegra_msi_teardown_irq(struct msi_controller * chip,unsigned int irq)1484 static void tegra_msi_teardown_irq(struct msi_controller *chip,
1485 				   unsigned int irq)
1486 {
1487 	struct tegra_msi *msi = to_tegra_msi(chip);
1488 	struct irq_data *d = irq_get_irq_data(irq);
1489 	irq_hw_number_t hwirq = irqd_to_hwirq(d);
1490 
1491 	irq_dispose_mapping(irq);
1492 	tegra_msi_free(msi, hwirq);
1493 }
1494 
1495 static struct irq_chip tegra_msi_irq_chip = {
1496 	.name = "Tegra PCIe MSI",
1497 	.irq_enable = pci_msi_unmask_irq,
1498 	.irq_disable = pci_msi_mask_irq,
1499 	.irq_mask = pci_msi_mask_irq,
1500 	.irq_unmask = pci_msi_unmask_irq,
1501 };
1502 
tegra_msi_map(struct irq_domain * domain,unsigned int irq,irq_hw_number_t hwirq)1503 static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
1504 			 irq_hw_number_t hwirq)
1505 {
1506 	irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
1507 	irq_set_chip_data(irq, domain->host_data);
1508 
1509 	tegra_cpuidle_pcie_irqs_in_use();
1510 
1511 	return 0;
1512 }
1513 
1514 static const struct irq_domain_ops msi_domain_ops = {
1515 	.map = tegra_msi_map,
1516 };
1517 
tegra_pcie_msi_setup(struct tegra_pcie * pcie)1518 static int tegra_pcie_msi_setup(struct tegra_pcie *pcie)
1519 {
1520 	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
1521 	struct platform_device *pdev = to_platform_device(pcie->dev);
1522 	struct tegra_msi *msi = &pcie->msi;
1523 	struct device *dev = pcie->dev;
1524 	int err;
1525 
1526 	mutex_init(&msi->lock);
1527 
1528 	msi->chip.dev = dev;
1529 	msi->chip.setup_irq = tegra_msi_setup_irq;
1530 	msi->chip.teardown_irq = tegra_msi_teardown_irq;
1531 
1532 	msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
1533 					    &msi_domain_ops, &msi->chip);
1534 	if (!msi->domain) {
1535 		dev_err(dev, "failed to create IRQ domain\n");
1536 		return -ENOMEM;
1537 	}
1538 
1539 	err = platform_get_irq_byname(pdev, "msi");
1540 	if (err < 0) {
1541 		dev_err(dev, "failed to get IRQ: %d\n", err);
1542 		goto err;
1543 	}
1544 
1545 	msi->irq = err;
1546 
1547 	err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD,
1548 			  tegra_msi_irq_chip.name, pcie);
1549 	if (err < 0) {
1550 		dev_err(dev, "failed to request IRQ: %d\n", err);
1551 		goto err;
1552 	}
1553 
1554 	/* setup AFI/FPCI range */
1555 	msi->pages = __get_free_pages(GFP_KERNEL, 0);
1556 	msi->phys = virt_to_phys((void *)msi->pages);
1557 	host->msi = &msi->chip;
1558 
1559 	return 0;
1560 
1561 err:
1562 	irq_domain_remove(msi->domain);
1563 	return err;
1564 }
1565 
tegra_pcie_enable_msi(struct tegra_pcie * pcie)1566 static void tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1567 {
1568 	const struct tegra_pcie_soc *soc = pcie->soc;
1569 	struct tegra_msi *msi = &pcie->msi;
1570 	u32 reg;
1571 
1572 	afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
1573 	afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
1574 	/* this register is in 4K increments */
1575 	afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1576 
1577 	/* enable all MSI vectors */
1578 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0);
1579 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1);
1580 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2);
1581 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3);
1582 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4);
1583 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5);
1584 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6);
1585 	afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7);
1586 
1587 	/* and unmask the MSI interrupt */
1588 	reg = afi_readl(pcie, AFI_INTR_MASK);
1589 	reg |= AFI_INTR_MASK_MSI_MASK;
1590 	afi_writel(pcie, reg, AFI_INTR_MASK);
1591 }
1592 
tegra_pcie_msi_teardown(struct tegra_pcie * pcie)1593 static void tegra_pcie_msi_teardown(struct tegra_pcie *pcie)
1594 {
1595 	struct tegra_msi *msi = &pcie->msi;
1596 	unsigned int i, irq;
1597 
1598 	free_pages(msi->pages, 0);
1599 
1600 	if (msi->irq > 0)
1601 		free_irq(msi->irq, pcie);
1602 
1603 	for (i = 0; i < INT_PCI_MSI_NR; i++) {
1604 		irq = irq_find_mapping(msi->domain, i);
1605 		if (irq > 0)
1606 			irq_dispose_mapping(irq);
1607 	}
1608 
1609 	irq_domain_remove(msi->domain);
1610 }
1611 
tegra_pcie_disable_msi(struct tegra_pcie * pcie)1612 static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1613 {
1614 	u32 value;
1615 
1616 	/* mask the MSI interrupt */
1617 	value = afi_readl(pcie, AFI_INTR_MASK);
1618 	value &= ~AFI_INTR_MASK_MSI_MASK;
1619 	afi_writel(pcie, value, AFI_INTR_MASK);
1620 
1621 	/* disable all MSI vectors */
1622 	afi_writel(pcie, 0, AFI_MSI_EN_VEC0);
1623 	afi_writel(pcie, 0, AFI_MSI_EN_VEC1);
1624 	afi_writel(pcie, 0, AFI_MSI_EN_VEC2);
1625 	afi_writel(pcie, 0, AFI_MSI_EN_VEC3);
1626 	afi_writel(pcie, 0, AFI_MSI_EN_VEC4);
1627 	afi_writel(pcie, 0, AFI_MSI_EN_VEC5);
1628 	afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
1629 	afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
1630 
1631 	return 0;
1632 }
1633 
tegra_pcie_get_xbar_config(struct tegra_pcie * pcie,u32 lanes,u32 * xbar)1634 static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1635 				      u32 *xbar)
1636 {
1637 	struct device *dev = pcie->dev;
1638 	struct device_node *np = dev->of_node;
1639 
1640 	if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
1641 		switch (lanes) {
1642 		case 0x010004:
1643 			dev_info(dev, "4x1, 1x1 configuration\n");
1644 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401;
1645 			return 0;
1646 
1647 		case 0x010102:
1648 			dev_info(dev, "2x1, 1X1, 1x1 configuration\n");
1649 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1650 			return 0;
1651 
1652 		case 0x010101:
1653 			dev_info(dev, "1x1, 1x1, 1x1 configuration\n");
1654 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111;
1655 			return 0;
1656 
1657 		default:
1658 			dev_info(dev, "wrong configuration updated in DT, "
1659 				 "switching to default 2x1, 1x1, 1x1 "
1660 				 "configuration\n");
1661 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1662 			return 0;
1663 		}
1664 	} else if (of_device_is_compatible(np, "nvidia,tegra124-pcie") ||
1665 		   of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
1666 		switch (lanes) {
1667 		case 0x0000104:
1668 			dev_info(dev, "4x1, 1x1 configuration\n");
1669 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
1670 			return 0;
1671 
1672 		case 0x0000102:
1673 			dev_info(dev, "2x1, 1x1 configuration\n");
1674 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
1675 			return 0;
1676 		}
1677 	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1678 		switch (lanes) {
1679 		case 0x00000204:
1680 			dev_info(dev, "4x1, 2x1 configuration\n");
1681 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1682 			return 0;
1683 
1684 		case 0x00020202:
1685 			dev_info(dev, "2x3 configuration\n");
1686 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1687 			return 0;
1688 
1689 		case 0x00010104:
1690 			dev_info(dev, "4x1, 1x2 configuration\n");
1691 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1692 			return 0;
1693 		}
1694 	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1695 		switch (lanes) {
1696 		case 0x00000004:
1697 			dev_info(dev, "single-mode configuration\n");
1698 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1699 			return 0;
1700 
1701 		case 0x00000202:
1702 			dev_info(dev, "dual-mode configuration\n");
1703 			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1704 			return 0;
1705 		}
1706 	}
1707 
1708 	return -EINVAL;
1709 }
1710 
1711 /*
1712  * Check whether a given set of supplies is available in a device tree node.
1713  * This is used to check whether the new or the legacy device tree bindings
1714  * should be used.
1715  */
of_regulator_bulk_available(struct device_node * np,struct regulator_bulk_data * supplies,unsigned int num_supplies)1716 static bool of_regulator_bulk_available(struct device_node *np,
1717 					struct regulator_bulk_data *supplies,
1718 					unsigned int num_supplies)
1719 {
1720 	char property[32];
1721 	unsigned int i;
1722 
1723 	for (i = 0; i < num_supplies; i++) {
1724 		snprintf(property, 32, "%s-supply", supplies[i].supply);
1725 
1726 		if (of_find_property(np, property, NULL) == NULL)
1727 			return false;
1728 	}
1729 
1730 	return true;
1731 }
1732 
1733 /*
1734  * Old versions of the device tree binding for this device used a set of power
1735  * supplies that didn't match the hardware inputs. This happened to work for a
1736  * number of cases but is not future proof. However to preserve backwards-
1737  * compatibility with old device trees, this function will try to use the old
1738  * set of supplies.
1739  */
tegra_pcie_get_legacy_regulators(struct tegra_pcie * pcie)1740 static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
1741 {
1742 	struct device *dev = pcie->dev;
1743 	struct device_node *np = dev->of_node;
1744 
1745 	if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
1746 		pcie->num_supplies = 3;
1747 	else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
1748 		pcie->num_supplies = 2;
1749 
1750 	if (pcie->num_supplies == 0) {
1751 		dev_err(dev, "device %pOF not supported in legacy mode\n", np);
1752 		return -ENODEV;
1753 	}
1754 
1755 	pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1756 				      sizeof(*pcie->supplies),
1757 				      GFP_KERNEL);
1758 	if (!pcie->supplies)
1759 		return -ENOMEM;
1760 
1761 	pcie->supplies[0].supply = "pex-clk";
1762 	pcie->supplies[1].supply = "vdd";
1763 
1764 	if (pcie->num_supplies > 2)
1765 		pcie->supplies[2].supply = "avdd";
1766 
1767 	return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies);
1768 }
1769 
1770 /*
1771  * Obtains the list of regulators required for a particular generation of the
1772  * IP block.
1773  *
1774  * This would've been nice to do simply by providing static tables for use
1775  * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
1776  * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
1777  * and either seems to be optional depending on which ports are being used.
1778  */
tegra_pcie_get_regulators(struct tegra_pcie * pcie,u32 lane_mask)1779 static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
1780 {
1781 	struct device *dev = pcie->dev;
1782 	struct device_node *np = dev->of_node;
1783 	unsigned int i = 0;
1784 
1785 	if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
1786 		pcie->num_supplies = 4;
1787 
1788 		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1789 					      sizeof(*pcie->supplies),
1790 					      GFP_KERNEL);
1791 		if (!pcie->supplies)
1792 			return -ENOMEM;
1793 
1794 		pcie->supplies[i++].supply = "dvdd-pex";
1795 		pcie->supplies[i++].supply = "hvdd-pex-pll";
1796 		pcie->supplies[i++].supply = "hvdd-pex";
1797 		pcie->supplies[i++].supply = "vddio-pexctl-aud";
1798 	} else if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
1799 		pcie->num_supplies = 6;
1800 
1801 		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1802 					      sizeof(*pcie->supplies),
1803 					      GFP_KERNEL);
1804 		if (!pcie->supplies)
1805 			return -ENOMEM;
1806 
1807 		pcie->supplies[i++].supply = "avdd-pll-uerefe";
1808 		pcie->supplies[i++].supply = "hvddio-pex";
1809 		pcie->supplies[i++].supply = "dvddio-pex";
1810 		pcie->supplies[i++].supply = "dvdd-pex-pll";
1811 		pcie->supplies[i++].supply = "hvdd-pex-pll-e";
1812 		pcie->supplies[i++].supply = "vddio-pex-ctl";
1813 	} else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
1814 		pcie->num_supplies = 7;
1815 
1816 		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1817 					      sizeof(*pcie->supplies),
1818 					      GFP_KERNEL);
1819 		if (!pcie->supplies)
1820 			return -ENOMEM;
1821 
1822 		pcie->supplies[i++].supply = "avddio-pex";
1823 		pcie->supplies[i++].supply = "dvddio-pex";
1824 		pcie->supplies[i++].supply = "avdd-pex-pll";
1825 		pcie->supplies[i++].supply = "hvdd-pex";
1826 		pcie->supplies[i++].supply = "hvdd-pex-pll-e";
1827 		pcie->supplies[i++].supply = "vddio-pex-ctl";
1828 		pcie->supplies[i++].supply = "avdd-pll-erefe";
1829 	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1830 		bool need_pexa = false, need_pexb = false;
1831 
1832 		/* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
1833 		if (lane_mask & 0x0f)
1834 			need_pexa = true;
1835 
1836 		/* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
1837 		if (lane_mask & 0x30)
1838 			need_pexb = true;
1839 
1840 		pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
1841 					 (need_pexb ? 2 : 0);
1842 
1843 		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1844 					      sizeof(*pcie->supplies),
1845 					      GFP_KERNEL);
1846 		if (!pcie->supplies)
1847 			return -ENOMEM;
1848 
1849 		pcie->supplies[i++].supply = "avdd-pex-pll";
1850 		pcie->supplies[i++].supply = "hvdd-pex";
1851 		pcie->supplies[i++].supply = "vddio-pex-ctl";
1852 		pcie->supplies[i++].supply = "avdd-plle";
1853 
1854 		if (need_pexa) {
1855 			pcie->supplies[i++].supply = "avdd-pexa";
1856 			pcie->supplies[i++].supply = "vdd-pexa";
1857 		}
1858 
1859 		if (need_pexb) {
1860 			pcie->supplies[i++].supply = "avdd-pexb";
1861 			pcie->supplies[i++].supply = "vdd-pexb";
1862 		}
1863 	} else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1864 		pcie->num_supplies = 5;
1865 
1866 		pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1867 					      sizeof(*pcie->supplies),
1868 					      GFP_KERNEL);
1869 		if (!pcie->supplies)
1870 			return -ENOMEM;
1871 
1872 		pcie->supplies[0].supply = "avdd-pex";
1873 		pcie->supplies[1].supply = "vdd-pex";
1874 		pcie->supplies[2].supply = "avdd-pex-pll";
1875 		pcie->supplies[3].supply = "avdd-plle";
1876 		pcie->supplies[4].supply = "vddio-pex-clk";
1877 	}
1878 
1879 	if (of_regulator_bulk_available(dev->of_node, pcie->supplies,
1880 					pcie->num_supplies))
1881 		return devm_regulator_bulk_get(dev, pcie->num_supplies,
1882 					       pcie->supplies);
1883 
1884 	/*
1885 	 * If not all regulators are available for this new scheme, assume
1886 	 * that the device tree complies with an older version of the device
1887 	 * tree binding.
1888 	 */
1889 	dev_info(dev, "using legacy DT binding for power supplies\n");
1890 
1891 	devm_kfree(dev, pcie->supplies);
1892 	pcie->num_supplies = 0;
1893 
1894 	return tegra_pcie_get_legacy_regulators(pcie);
1895 }
1896 
tegra_pcie_parse_dt(struct tegra_pcie * pcie)1897 static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
1898 {
1899 	struct device *dev = pcie->dev;
1900 	struct device_node *np = dev->of_node, *port;
1901 	const struct tegra_pcie_soc *soc = pcie->soc;
1902 	struct of_pci_range_parser parser;
1903 	struct of_pci_range range;
1904 	u32 lanes = 0, mask = 0;
1905 	unsigned int lane = 0;
1906 	struct resource res;
1907 	int err;
1908 
1909 	if (of_pci_range_parser_init(&parser, np)) {
1910 		dev_err(dev, "missing \"ranges\" property\n");
1911 		return -EINVAL;
1912 	}
1913 
1914 	for_each_of_pci_range(&parser, &range) {
1915 		err = of_pci_range_to_resource(&range, np, &res);
1916 		if (err < 0)
1917 			return err;
1918 
1919 		switch (res.flags & IORESOURCE_TYPE_BITS) {
1920 		case IORESOURCE_IO:
1921 			/* Track the bus -> CPU I/O mapping offset. */
1922 			pcie->offset.io = res.start - range.pci_addr;
1923 
1924 			memcpy(&pcie->pio, &res, sizeof(res));
1925 			pcie->pio.name = np->full_name;
1926 
1927 			/*
1928 			 * The Tegra PCIe host bridge uses this to program the
1929 			 * mapping of the I/O space to the physical address,
1930 			 * so we override the .start and .end fields here that
1931 			 * of_pci_range_to_resource() converted to I/O space.
1932 			 * We also set the IORESOURCE_MEM type to clarify that
1933 			 * the resource is in the physical memory space.
1934 			 */
1935 			pcie->io.start = range.cpu_addr;
1936 			pcie->io.end = range.cpu_addr + range.size - 1;
1937 			pcie->io.flags = IORESOURCE_MEM;
1938 			pcie->io.name = "I/O";
1939 
1940 			memcpy(&res, &pcie->io, sizeof(res));
1941 			break;
1942 
1943 		case IORESOURCE_MEM:
1944 			/*
1945 			 * Track the bus -> CPU memory mapping offset. This
1946 			 * assumes that the prefetchable and non-prefetchable
1947 			 * regions will be the last of type IORESOURCE_MEM in
1948 			 * the ranges property.
1949 			 * */
1950 			pcie->offset.mem = res.start - range.pci_addr;
1951 
1952 			if (res.flags & IORESOURCE_PREFETCH) {
1953 				memcpy(&pcie->prefetch, &res, sizeof(res));
1954 				pcie->prefetch.name = "prefetchable";
1955 			} else {
1956 				memcpy(&pcie->mem, &res, sizeof(res));
1957 				pcie->mem.name = "non-prefetchable";
1958 			}
1959 			break;
1960 		}
1961 	}
1962 
1963 	err = of_pci_parse_bus_range(np, &pcie->busn);
1964 	if (err < 0) {
1965 		dev_err(dev, "failed to parse ranges property: %d\n", err);
1966 		pcie->busn.name = np->name;
1967 		pcie->busn.start = 0;
1968 		pcie->busn.end = 0xff;
1969 		pcie->busn.flags = IORESOURCE_BUS;
1970 	}
1971 
1972 	/* parse root ports */
1973 	for_each_child_of_node(np, port) {
1974 		struct tegra_pcie_port *rp;
1975 		unsigned int index;
1976 		u32 value;
1977 
1978 		err = of_pci_get_devfn(port);
1979 		if (err < 0) {
1980 			dev_err(dev, "failed to parse address: %d\n", err);
1981 			goto err_node_put;
1982 		}
1983 
1984 		index = PCI_SLOT(err);
1985 
1986 		if (index < 1 || index > soc->num_ports) {
1987 			dev_err(dev, "invalid port number: %d\n", index);
1988 			err = -EINVAL;
1989 			goto err_node_put;
1990 		}
1991 
1992 		index--;
1993 
1994 		err = of_property_read_u32(port, "nvidia,num-lanes", &value);
1995 		if (err < 0) {
1996 			dev_err(dev, "failed to parse # of lanes: %d\n",
1997 				err);
1998 			goto err_node_put;
1999 		}
2000 
2001 		if (value > 16) {
2002 			dev_err(dev, "invalid # of lanes: %u\n", value);
2003 			err = -EINVAL;
2004 			goto err_node_put;
2005 		}
2006 
2007 		lanes |= value << (index << 3);
2008 
2009 		if (!of_device_is_available(port)) {
2010 			lane += value;
2011 			continue;
2012 		}
2013 
2014 		mask |= ((1 << value) - 1) << lane;
2015 		lane += value;
2016 
2017 		rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
2018 		if (!rp) {
2019 			err = -ENOMEM;
2020 			goto err_node_put;
2021 		}
2022 
2023 		err = of_address_to_resource(port, 0, &rp->regs);
2024 		if (err < 0) {
2025 			dev_err(dev, "failed to parse address: %d\n", err);
2026 			goto err_node_put;
2027 		}
2028 
2029 		INIT_LIST_HEAD(&rp->list);
2030 		rp->index = index;
2031 		rp->lanes = value;
2032 		rp->pcie = pcie;
2033 		rp->np = port;
2034 
2035 		rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
2036 		if (IS_ERR(rp->base))
2037 			return PTR_ERR(rp->base);
2038 
2039 		list_add_tail(&rp->list, &pcie->ports);
2040 	}
2041 
2042 	err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
2043 	if (err < 0) {
2044 		dev_err(dev, "invalid lane configuration\n");
2045 		return err;
2046 	}
2047 
2048 	err = tegra_pcie_get_regulators(pcie, mask);
2049 	if (err < 0)
2050 		return err;
2051 
2052 	return 0;
2053 
2054 err_node_put:
2055 	of_node_put(port);
2056 	return err;
2057 }
2058 
2059 /*
2060  * FIXME: If there are no PCIe cards attached, then calling this function
2061  * can result in the increase of the bootup time as there are big timeout
2062  * loops.
2063  */
2064 #define TEGRA_PCIE_LINKUP_TIMEOUT	200	/* up to 1.2 seconds */
tegra_pcie_port_check_link(struct tegra_pcie_port * port)2065 static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
2066 {
2067 	struct device *dev = port->pcie->dev;
2068 	unsigned int retries = 3;
2069 	unsigned long value;
2070 
2071 	/* override presence detection */
2072 	value = readl(port->base + RP_PRIV_MISC);
2073 	value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
2074 	value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
2075 	writel(value, port->base + RP_PRIV_MISC);
2076 
2077 	do {
2078 		unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2079 
2080 		do {
2081 			value = readl(port->base + RP_VEND_XP);
2082 
2083 			if (value & RP_VEND_XP_DL_UP)
2084 				break;
2085 
2086 			usleep_range(1000, 2000);
2087 		} while (--timeout);
2088 
2089 		if (!timeout) {
2090 			dev_err(dev, "link %u down, retrying\n", port->index);
2091 			goto retry;
2092 		}
2093 
2094 		timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2095 
2096 		do {
2097 			value = readl(port->base + RP_LINK_CONTROL_STATUS);
2098 
2099 			if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2100 				return true;
2101 
2102 			usleep_range(1000, 2000);
2103 		} while (--timeout);
2104 
2105 retry:
2106 		tegra_pcie_port_reset(port);
2107 	} while (--retries);
2108 
2109 	return false;
2110 }
2111 
tegra_pcie_enable_ports(struct tegra_pcie * pcie)2112 static void tegra_pcie_enable_ports(struct tegra_pcie *pcie)
2113 {
2114 	struct device *dev = pcie->dev;
2115 	struct tegra_pcie_port *port, *tmp;
2116 
2117 	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2118 		dev_info(dev, "probing port %u, using %u lanes\n",
2119 			 port->index, port->lanes);
2120 
2121 		tegra_pcie_port_enable(port);
2122 
2123 		if (tegra_pcie_port_check_link(port))
2124 			continue;
2125 
2126 		dev_info(dev, "link %u down, ignoring\n", port->index);
2127 
2128 		tegra_pcie_port_disable(port);
2129 		tegra_pcie_port_free(port);
2130 	}
2131 }
2132 
tegra_pcie_disable_ports(struct tegra_pcie * pcie)2133 static void tegra_pcie_disable_ports(struct tegra_pcie *pcie)
2134 {
2135 	struct tegra_pcie_port *port, *tmp;
2136 
2137 	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
2138 		tegra_pcie_port_disable(port);
2139 }
2140 
2141 static const struct tegra_pcie_port_soc tegra20_pcie_ports[] = {
2142 	{ .pme.turnoff_bit = 0, .pme.ack_bit =  5 },
2143 	{ .pme.turnoff_bit = 8, .pme.ack_bit = 10 },
2144 };
2145 
2146 static const struct tegra_pcie_soc tegra20_pcie = {
2147 	.num_ports = 2,
2148 	.ports = tegra20_pcie_ports,
2149 	.msi_base_shift = 0,
2150 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
2151 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
2152 	.pads_refclk_cfg0 = 0xfa5cfa5c,
2153 	.has_pex_clkreq_en = false,
2154 	.has_pex_bias_ctrl = false,
2155 	.has_intr_prsnt_sense = false,
2156 	.has_cml_clk = false,
2157 	.has_gen2 = false,
2158 	.force_pca_enable = false,
2159 	.program_uphy = true,
2160 };
2161 
2162 static const struct tegra_pcie_port_soc tegra30_pcie_ports[] = {
2163 	{ .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
2164 	{ .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
2165 	{ .pme.turnoff_bit = 16, .pme.ack_bit = 18 },
2166 };
2167 
2168 static const struct tegra_pcie_soc tegra30_pcie = {
2169 	.num_ports = 3,
2170 	.ports = tegra30_pcie_ports,
2171 	.msi_base_shift = 8,
2172 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2173 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2174 	.pads_refclk_cfg0 = 0xfa5cfa5c,
2175 	.pads_refclk_cfg1 = 0xfa5cfa5c,
2176 	.has_pex_clkreq_en = true,
2177 	.has_pex_bias_ctrl = true,
2178 	.has_intr_prsnt_sense = true,
2179 	.has_cml_clk = true,
2180 	.has_gen2 = false,
2181 	.force_pca_enable = false,
2182 	.program_uphy = true,
2183 };
2184 
2185 static const struct tegra_pcie_soc tegra124_pcie = {
2186 	.num_ports = 2,
2187 	.ports = tegra20_pcie_ports,
2188 	.msi_base_shift = 8,
2189 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2190 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2191 	.pads_refclk_cfg0 = 0x44ac44ac,
2192 	.has_pex_clkreq_en = true,
2193 	.has_pex_bias_ctrl = true,
2194 	.has_intr_prsnt_sense = true,
2195 	.has_cml_clk = true,
2196 	.has_gen2 = true,
2197 	.force_pca_enable = false,
2198 	.program_uphy = true,
2199 };
2200 
2201 static const struct tegra_pcie_soc tegra210_pcie = {
2202 	.num_ports = 2,
2203 	.ports = tegra20_pcie_ports,
2204 	.msi_base_shift = 8,
2205 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2206 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2207 	.pads_refclk_cfg0 = 0x90b890b8,
2208 	.has_pex_clkreq_en = true,
2209 	.has_pex_bias_ctrl = true,
2210 	.has_intr_prsnt_sense = true,
2211 	.has_cml_clk = true,
2212 	.has_gen2 = true,
2213 	.force_pca_enable = true,
2214 	.program_uphy = true,
2215 };
2216 
2217 static const struct tegra_pcie_port_soc tegra186_pcie_ports[] = {
2218 	{ .pme.turnoff_bit =  0, .pme.ack_bit =  5 },
2219 	{ .pme.turnoff_bit =  8, .pme.ack_bit = 10 },
2220 	{ .pme.turnoff_bit = 12, .pme.ack_bit = 14 },
2221 };
2222 
2223 static const struct tegra_pcie_soc tegra186_pcie = {
2224 	.num_ports = 3,
2225 	.ports = tegra186_pcie_ports,
2226 	.msi_base_shift = 8,
2227 	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2228 	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2229 	.pads_refclk_cfg0 = 0x80b880b8,
2230 	.pads_refclk_cfg1 = 0x000480b8,
2231 	.has_pex_clkreq_en = true,
2232 	.has_pex_bias_ctrl = true,
2233 	.has_intr_prsnt_sense = true,
2234 	.has_cml_clk = false,
2235 	.has_gen2 = true,
2236 	.force_pca_enable = false,
2237 	.program_uphy = false,
2238 };
2239 
2240 static const struct of_device_id tegra_pcie_of_match[] = {
2241 	{ .compatible = "nvidia,tegra186-pcie", .data = &tegra186_pcie },
2242 	{ .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie },
2243 	{ .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
2244 	{ .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
2245 	{ .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
2246 	{ },
2247 };
2248 
tegra_pcie_ports_seq_start(struct seq_file * s,loff_t * pos)2249 static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
2250 {
2251 	struct tegra_pcie *pcie = s->private;
2252 
2253 	if (list_empty(&pcie->ports))
2254 		return NULL;
2255 
2256 	seq_printf(s, "Index  Status\n");
2257 
2258 	return seq_list_start(&pcie->ports, *pos);
2259 }
2260 
tegra_pcie_ports_seq_next(struct seq_file * s,void * v,loff_t * pos)2261 static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
2262 {
2263 	struct tegra_pcie *pcie = s->private;
2264 
2265 	return seq_list_next(v, &pcie->ports, pos);
2266 }
2267 
tegra_pcie_ports_seq_stop(struct seq_file * s,void * v)2268 static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
2269 {
2270 }
2271 
tegra_pcie_ports_seq_show(struct seq_file * s,void * v)2272 static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
2273 {
2274 	bool up = false, active = false;
2275 	struct tegra_pcie_port *port;
2276 	unsigned int value;
2277 
2278 	port = list_entry(v, struct tegra_pcie_port, list);
2279 
2280 	value = readl(port->base + RP_VEND_XP);
2281 
2282 	if (value & RP_VEND_XP_DL_UP)
2283 		up = true;
2284 
2285 	value = readl(port->base + RP_LINK_CONTROL_STATUS);
2286 
2287 	if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2288 		active = true;
2289 
2290 	seq_printf(s, "%2u     ", port->index);
2291 
2292 	if (up)
2293 		seq_printf(s, "up");
2294 
2295 	if (active) {
2296 		if (up)
2297 			seq_printf(s, ", ");
2298 
2299 		seq_printf(s, "active");
2300 	}
2301 
2302 	seq_printf(s, "\n");
2303 	return 0;
2304 }
2305 
2306 static const struct seq_operations tegra_pcie_ports_seq_ops = {
2307 	.start = tegra_pcie_ports_seq_start,
2308 	.next = tegra_pcie_ports_seq_next,
2309 	.stop = tegra_pcie_ports_seq_stop,
2310 	.show = tegra_pcie_ports_seq_show,
2311 };
2312 
tegra_pcie_ports_open(struct inode * inode,struct file * file)2313 static int tegra_pcie_ports_open(struct inode *inode, struct file *file)
2314 {
2315 	struct tegra_pcie *pcie = inode->i_private;
2316 	struct seq_file *s;
2317 	int err;
2318 
2319 	err = seq_open(file, &tegra_pcie_ports_seq_ops);
2320 	if (err)
2321 		return err;
2322 
2323 	s = file->private_data;
2324 	s->private = pcie;
2325 
2326 	return 0;
2327 }
2328 
2329 static const struct file_operations tegra_pcie_ports_ops = {
2330 	.owner = THIS_MODULE,
2331 	.open = tegra_pcie_ports_open,
2332 	.read = seq_read,
2333 	.llseek = seq_lseek,
2334 	.release = seq_release,
2335 };
2336 
tegra_pcie_debugfs_exit(struct tegra_pcie * pcie)2337 static void tegra_pcie_debugfs_exit(struct tegra_pcie *pcie)
2338 {
2339 	debugfs_remove_recursive(pcie->debugfs);
2340 	pcie->debugfs = NULL;
2341 }
2342 
tegra_pcie_debugfs_init(struct tegra_pcie * pcie)2343 static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
2344 {
2345 	struct dentry *file;
2346 
2347 	pcie->debugfs = debugfs_create_dir("pcie", NULL);
2348 	if (!pcie->debugfs)
2349 		return -ENOMEM;
2350 
2351 	file = debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs,
2352 				   pcie, &tegra_pcie_ports_ops);
2353 	if (!file)
2354 		goto remove;
2355 
2356 	return 0;
2357 
2358 remove:
2359 	tegra_pcie_debugfs_exit(pcie);
2360 	return -ENOMEM;
2361 }
2362 
tegra_pcie_probe(struct platform_device * pdev)2363 static int tegra_pcie_probe(struct platform_device *pdev)
2364 {
2365 	struct device *dev = &pdev->dev;
2366 	struct pci_host_bridge *host;
2367 	struct tegra_pcie *pcie;
2368 	struct pci_bus *child;
2369 	int err;
2370 
2371 	host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
2372 	if (!host)
2373 		return -ENOMEM;
2374 
2375 	pcie = pci_host_bridge_priv(host);
2376 	host->sysdata = pcie;
2377 	platform_set_drvdata(pdev, pcie);
2378 
2379 	pcie->soc = of_device_get_match_data(dev);
2380 	INIT_LIST_HEAD(&pcie->ports);
2381 	pcie->dev = dev;
2382 
2383 	err = tegra_pcie_parse_dt(pcie);
2384 	if (err < 0)
2385 		return err;
2386 
2387 	err = tegra_pcie_get_resources(pcie);
2388 	if (err < 0) {
2389 		dev_err(dev, "failed to request resources: %d\n", err);
2390 		return err;
2391 	}
2392 
2393 	err = tegra_pcie_msi_setup(pcie);
2394 	if (err < 0) {
2395 		dev_err(dev, "failed to enable MSI support: %d\n", err);
2396 		goto put_resources;
2397 	}
2398 
2399 	pm_runtime_enable(pcie->dev);
2400 	err = pm_runtime_get_sync(pcie->dev);
2401 	if (err < 0) {
2402 		dev_err(dev, "fail to enable pcie controller: %d\n", err);
2403 		goto pm_runtime_put;
2404 	}
2405 
2406 	err = tegra_pcie_request_resources(pcie);
2407 	if (err)
2408 		goto pm_runtime_put;
2409 
2410 	host->busnr = pcie->busn.start;
2411 	host->dev.parent = &pdev->dev;
2412 	host->ops = &tegra_pcie_ops;
2413 	host->map_irq = tegra_pcie_map_irq;
2414 	host->swizzle_irq = pci_common_swizzle;
2415 
2416 	err = pci_scan_root_bus_bridge(host);
2417 	if (err < 0) {
2418 		dev_err(dev, "failed to register host: %d\n", err);
2419 		goto free_resources;
2420 	}
2421 
2422 	pci_bus_size_bridges(host->bus);
2423 	pci_bus_assign_resources(host->bus);
2424 
2425 	list_for_each_entry(child, &host->bus->children, node)
2426 		pcie_bus_configure_settings(child);
2427 
2428 	pci_bus_add_devices(host->bus);
2429 
2430 	if (IS_ENABLED(CONFIG_DEBUG_FS)) {
2431 		err = tegra_pcie_debugfs_init(pcie);
2432 		if (err < 0)
2433 			dev_err(dev, "failed to setup debugfs: %d\n", err);
2434 	}
2435 
2436 	return 0;
2437 
2438 free_resources:
2439 	tegra_pcie_free_resources(pcie);
2440 pm_runtime_put:
2441 	pm_runtime_put_sync(pcie->dev);
2442 	pm_runtime_disable(pcie->dev);
2443 	tegra_pcie_msi_teardown(pcie);
2444 put_resources:
2445 	tegra_pcie_put_resources(pcie);
2446 	return err;
2447 }
2448 
tegra_pcie_remove(struct platform_device * pdev)2449 static int tegra_pcie_remove(struct platform_device *pdev)
2450 {
2451 	struct tegra_pcie *pcie = platform_get_drvdata(pdev);
2452 	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
2453 	struct tegra_pcie_port *port, *tmp;
2454 
2455 	if (IS_ENABLED(CONFIG_DEBUG_FS))
2456 		tegra_pcie_debugfs_exit(pcie);
2457 
2458 	pci_stop_root_bus(host->bus);
2459 	pci_remove_root_bus(host->bus);
2460 	tegra_pcie_free_resources(pcie);
2461 	pm_runtime_put_sync(pcie->dev);
2462 	pm_runtime_disable(pcie->dev);
2463 
2464 	if (IS_ENABLED(CONFIG_PCI_MSI))
2465 		tegra_pcie_msi_teardown(pcie);
2466 
2467 	tegra_pcie_put_resources(pcie);
2468 
2469 	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
2470 		tegra_pcie_port_free(port);
2471 
2472 	return 0;
2473 }
2474 
tegra_pcie_pm_suspend(struct device * dev)2475 static int __maybe_unused tegra_pcie_pm_suspend(struct device *dev)
2476 {
2477 	struct tegra_pcie *pcie = dev_get_drvdata(dev);
2478 	struct tegra_pcie_port *port;
2479 
2480 	list_for_each_entry(port, &pcie->ports, list)
2481 		tegra_pcie_pme_turnoff(port);
2482 
2483 	tegra_pcie_disable_ports(pcie);
2484 
2485 	if (IS_ENABLED(CONFIG_PCI_MSI))
2486 		tegra_pcie_disable_msi(pcie);
2487 
2488 	tegra_pcie_disable_controller(pcie);
2489 	tegra_pcie_power_off(pcie);
2490 
2491 	return 0;
2492 }
2493 
tegra_pcie_pm_resume(struct device * dev)2494 static int __maybe_unused tegra_pcie_pm_resume(struct device *dev)
2495 {
2496 	struct tegra_pcie *pcie = dev_get_drvdata(dev);
2497 	int err;
2498 
2499 	err = tegra_pcie_power_on(pcie);
2500 	if (err) {
2501 		dev_err(dev, "tegra pcie power on fail: %d\n", err);
2502 		return err;
2503 	}
2504 	err = tegra_pcie_enable_controller(pcie);
2505 	if (err) {
2506 		dev_err(dev, "tegra pcie controller enable fail: %d\n", err);
2507 		goto poweroff;
2508 	}
2509 	tegra_pcie_setup_translations(pcie);
2510 
2511 	if (IS_ENABLED(CONFIG_PCI_MSI))
2512 		tegra_pcie_enable_msi(pcie);
2513 
2514 	tegra_pcie_enable_ports(pcie);
2515 
2516 	return 0;
2517 
2518 poweroff:
2519 	tegra_pcie_power_off(pcie);
2520 
2521 	return err;
2522 }
2523 
2524 static const struct dev_pm_ops tegra_pcie_pm_ops = {
2525 	SET_RUNTIME_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume, NULL)
2526 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_pcie_pm_suspend,
2527 				      tegra_pcie_pm_resume)
2528 };
2529 
2530 static struct platform_driver tegra_pcie_driver = {
2531 	.driver = {
2532 		.name = "tegra-pcie",
2533 		.of_match_table = tegra_pcie_of_match,
2534 		.suppress_bind_attrs = true,
2535 		.pm = &tegra_pcie_pm_ops,
2536 	},
2537 	.probe = tegra_pcie_probe,
2538 	.remove = tegra_pcie_remove,
2539 };
2540 module_platform_driver(tegra_pcie_driver);
2541 MODULE_LICENSE("GPL");
2542