• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 // Copyright (c) 2017 Cadence
3 // Cadence PCIe controller driver.
4 // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
5 
6 #ifndef _PCIE_CADENCE_H
7 #define _PCIE_CADENCE_H
8 
9 #include <linux/kernel.h>
10 #include <linux/pci.h>
11 #include <linux/phy/phy.h>
12 
13 /* Parameters for the waiting for link up routine */
14 #define LINK_WAIT_MAX_RETRIES	10
15 #define LINK_WAIT_USLEEP_MIN	90000
16 #define LINK_WAIT_USLEEP_MAX	100000
17 
18 /*
19  * Local Management Registers
20  */
21 #define CDNS_PCIE_LM_BASE	0x00100000
22 
23 /* Vendor ID Register */
24 #define CDNS_PCIE_LM_ID		(CDNS_PCIE_LM_BASE + 0x0044)
25 #define  CDNS_PCIE_LM_ID_VENDOR_MASK	GENMASK(15, 0)
26 #define  CDNS_PCIE_LM_ID_VENDOR_SHIFT	0
27 #define  CDNS_PCIE_LM_ID_VENDOR(vid) \
28 	(((vid) << CDNS_PCIE_LM_ID_VENDOR_SHIFT) & CDNS_PCIE_LM_ID_VENDOR_MASK)
29 #define  CDNS_PCIE_LM_ID_SUBSYS_MASK	GENMASK(31, 16)
30 #define  CDNS_PCIE_LM_ID_SUBSYS_SHIFT	16
31 #define  CDNS_PCIE_LM_ID_SUBSYS(sub) \
32 	(((sub) << CDNS_PCIE_LM_ID_SUBSYS_SHIFT) & CDNS_PCIE_LM_ID_SUBSYS_MASK)
33 
34 /* Root Port Requestor ID Register */
35 #define CDNS_PCIE_LM_RP_RID	(CDNS_PCIE_LM_BASE + 0x0228)
36 #define  CDNS_PCIE_LM_RP_RID_MASK	GENMASK(15, 0)
37 #define  CDNS_PCIE_LM_RP_RID_SHIFT	0
38 #define  CDNS_PCIE_LM_RP_RID_(rid) \
39 	(((rid) << CDNS_PCIE_LM_RP_RID_SHIFT) & CDNS_PCIE_LM_RP_RID_MASK)
40 
41 /* Endpoint Bus and Device Number Register */
42 #define CDNS_PCIE_LM_EP_ID	(CDNS_PCIE_LM_BASE + 0x022c)
43 #define  CDNS_PCIE_LM_EP_ID_DEV_MASK	GENMASK(4, 0)
44 #define  CDNS_PCIE_LM_EP_ID_DEV_SHIFT	0
45 #define  CDNS_PCIE_LM_EP_ID_BUS_MASK	GENMASK(15, 8)
46 #define  CDNS_PCIE_LM_EP_ID_BUS_SHIFT	8
47 
48 /* Endpoint Function f BAR b Configuration Registers */
49 #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) \
50 	(CDNS_PCIE_LM_BASE + 0x0240 + (fn) * 0x0008)
51 #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn) \
52 	(CDNS_PCIE_LM_BASE + 0x0244 + (fn) * 0x0008)
53 #define  CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \
54 	(GENMASK(4, 0) << ((b) * 8))
55 #define  CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \
56 	(((a) << ((b) * 8)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b))
57 #define  CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b) \
58 	(GENMASK(7, 5) << ((b) * 8))
59 #define  CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \
60 	(((c) << ((b) * 8 + 5)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b))
61 
62 /* Endpoint Function Configuration Register */
63 #define CDNS_PCIE_LM_EP_FUNC_CFG	(CDNS_PCIE_LM_BASE + 0x02c0)
64 
65 /* Root Complex BAR Configuration Register */
66 #define CDNS_PCIE_LM_RC_BAR_CFG	(CDNS_PCIE_LM_BASE + 0x0300)
67 #define  CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK	GENMASK(5, 0)
68 #define  CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE(a) \
69 	(((a) << 0) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK)
70 #define  CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK		GENMASK(8, 6)
71 #define  CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(c) \
72 	(((c) << 6) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK)
73 #define  CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK	GENMASK(13, 9)
74 #define  CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE(a) \
75 	(((a) << 9) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK)
76 #define  CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK		GENMASK(16, 14)
77 #define  CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(c) \
78 	(((c) << 14) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK)
79 #define  CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE	BIT(17)
80 #define  CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_32BITS	0
81 #define  CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS	BIT(18)
82 #define  CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE		BIT(19)
83 #define  CDNS_PCIE_LM_RC_BAR_CFG_IO_16BITS		0
84 #define  CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS		BIT(20)
85 #define  CDNS_PCIE_LM_RC_BAR_CFG_CHECK_ENABLE		BIT(31)
86 
87 /* BAR control values applicable to both Endpoint Function and Root Complex */
88 #define  CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED		0x0
89 #define  CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS		0x1
90 #define  CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS		0x4
91 #define  CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS	0x5
92 #define  CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS		0x6
93 #define  CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS	0x7
94 
95 #define LM_RC_BAR_CFG_CTRL_DISABLED(bar)		\
96 		(CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED << (((bar) * 8) + 6))
97 #define LM_RC_BAR_CFG_CTRL_IO_32BITS(bar)		\
98 		(CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS << (((bar) * 8) + 6))
99 #define LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar)		\
100 		(CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS << (((bar) * 8) + 6))
101 #define LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar)	\
102 	(CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS << (((bar) * 8) + 6))
103 #define LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar)		\
104 		(CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS << (((bar) * 8) + 6))
105 #define LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar)	\
106 	(CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS << (((bar) * 8) + 6))
107 #define LM_RC_BAR_CFG_APERTURE(bar, aperture)		\
108 					(((aperture) - 2) << ((bar) * 8))
109 
110 /*
111  * Endpoint Function Registers (PCI configuration space for endpoint functions)
112  */
113 #define CDNS_PCIE_EP_FUNC_BASE(fn)	(((fn) << 12) & GENMASK(19, 12))
114 
115 #define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET	0x90
116 #define CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET	0xb0
117 
118 /*
119  * Root Port Registers (PCI configuration space for the root port function)
120  */
121 #define CDNS_PCIE_RP_BASE	0x00200000
122 #define CDNS_PCIE_RP_CAP_OFFSET 0xc0
123 
124 /*
125  * Address Translation Registers
126  */
127 #define CDNS_PCIE_AT_BASE	0x00400000
128 
129 /* Region r Outbound AXI to PCIe Address Translation Register 0 */
130 #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r) \
131 	(CDNS_PCIE_AT_BASE + 0x0000 + ((r) & 0x1f) * 0x0020)
132 #define  CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK	GENMASK(5, 0)
133 #define  CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) \
134 	(((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK)
135 #define  CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK	GENMASK(19, 12)
136 #define  CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \
137 	(((devfn) << 12) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK)
138 #define  CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK	GENMASK(27, 20)
139 #define  CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \
140 	(((bus) << 20) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK)
141 
142 /* Region r Outbound AXI to PCIe Address Translation Register 1 */
143 #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r) \
144 	(CDNS_PCIE_AT_BASE + 0x0004 + ((r) & 0x1f) * 0x0020)
145 
146 /* Region r Outbound PCIe Descriptor Register 0 */
147 #define CDNS_PCIE_AT_OB_REGION_DESC0(r) \
148 	(CDNS_PCIE_AT_BASE + 0x0008 + ((r) & 0x1f) * 0x0020)
149 #define  CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MASK		GENMASK(3, 0)
150 #define  CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM		0x2
151 #define  CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO		0x6
152 #define  CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0	0xa
153 #define  CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1	0xb
154 #define  CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG	0xc
155 #define  CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_VENDOR_MSG	0xd
156 /* Bit 23 MUST be set in RC mode. */
157 #define  CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID	BIT(23)
158 #define  CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK	GENMASK(31, 24)
159 #define  CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \
160 	(((devfn) << 24) & CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK)
161 
162 /* Region r Outbound PCIe Descriptor Register 1 */
163 #define CDNS_PCIE_AT_OB_REGION_DESC1(r)	\
164 	(CDNS_PCIE_AT_BASE + 0x000c + ((r) & 0x1f) * 0x0020)
165 #define  CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK	GENMASK(7, 0)
166 #define  CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus) \
167 	((bus) & CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK)
168 
169 /* Region r AXI Region Base Address Register 0 */
170 #define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r) \
171 	(CDNS_PCIE_AT_BASE + 0x0018 + ((r) & 0x1f) * 0x0020)
172 #define  CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK	GENMASK(5, 0)
173 #define  CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) \
174 	(((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK)
175 
176 /* Region r AXI Region Base Address Register 1 */
177 #define CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r) \
178 	(CDNS_PCIE_AT_BASE + 0x001c + ((r) & 0x1f) * 0x0020)
179 
180 /* Root Port BAR Inbound PCIe to AXI Address Translation Register */
181 #define CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar) \
182 	(CDNS_PCIE_AT_BASE + 0x0800 + (bar) * 0x0008)
183 #define  CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK	GENMASK(5, 0)
184 #define  CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(nbits) \
185 	(((nbits) - 1) & CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK)
186 #define CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar) \
187 	(CDNS_PCIE_AT_BASE + 0x0804 + (bar) * 0x0008)
188 
189 /* AXI link down register */
190 #define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824)
191 
192 /* LTSSM Capabilities register */
193 #define CDNS_PCIE_LTSSM_CONTROL_CAP             (CDNS_PCIE_LM_BASE + 0x0054)
194 #define  CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK  GENMASK(2, 1)
195 #define  CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT 1
196 #define  CDNS_PCIE_DETECT_QUIET_MIN_DELAY(delay) \
197 	 (((delay) << CDNS_PCIE_DETECT_QUIET_MIN_DELAY_SHIFT) & \
198 	 CDNS_PCIE_DETECT_QUIET_MIN_DELAY_MASK)
199 
200 enum cdns_pcie_rp_bar {
201 	RP_BAR_UNDEFINED = -1,
202 	RP_BAR0,
203 	RP_BAR1,
204 	RP_NO_BAR
205 };
206 
207 #define CDNS_PCIE_RP_MAX_IB	0x3
208 
209 struct cdns_pcie_rp_ib_bar {
210 	u64 size;
211 	bool free;
212 };
213 
214 /* Endpoint Function BAR Inbound PCIe to AXI Address Translation Register */
215 #define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \
216 	(CDNS_PCIE_AT_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008)
217 #define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \
218 	(CDNS_PCIE_AT_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008)
219 
220 /* Normal/Vendor specific message access: offset inside some outbound region */
221 #define CDNS_PCIE_NORMAL_MSG_ROUTING_MASK	GENMASK(7, 5)
222 #define CDNS_PCIE_NORMAL_MSG_ROUTING(route) \
223 	(((route) << 5) & CDNS_PCIE_NORMAL_MSG_ROUTING_MASK)
224 #define CDNS_PCIE_NORMAL_MSG_CODE_MASK		GENMASK(15, 8)
225 #define CDNS_PCIE_NORMAL_MSG_CODE(code) \
226 	(((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK)
227 #define CDNS_PCIE_MSG_NO_DATA			BIT(16)
228 
229 struct cdns_pcie;
230 
231 enum cdns_pcie_msg_code {
232 	MSG_CODE_ASSERT_INTA	= 0x20,
233 	MSG_CODE_ASSERT_INTB	= 0x21,
234 	MSG_CODE_ASSERT_INTC	= 0x22,
235 	MSG_CODE_ASSERT_INTD	= 0x23,
236 	MSG_CODE_DEASSERT_INTA	= 0x24,
237 	MSG_CODE_DEASSERT_INTB	= 0x25,
238 	MSG_CODE_DEASSERT_INTC	= 0x26,
239 	MSG_CODE_DEASSERT_INTD	= 0x27,
240 };
241 
242 enum cdns_pcie_msg_routing {
243 	/* Route to Root Complex */
244 	MSG_ROUTING_TO_RC,
245 
246 	/* Use Address Routing */
247 	MSG_ROUTING_BY_ADDR,
248 
249 	/* Use ID Routing */
250 	MSG_ROUTING_BY_ID,
251 
252 	/* Route as Broadcast Message from Root Complex */
253 	MSG_ROUTING_BCAST,
254 
255 	/* Local message; terminate at receiver (INTx messages) */
256 	MSG_ROUTING_LOCAL,
257 
258 	/* Gather & route to Root Complex (PME_TO_Ack message) */
259 	MSG_ROUTING_GATHER,
260 };
261 
262 struct cdns_pcie_ops {
263 	int	(*start_link)(struct cdns_pcie *pcie);
264 	void	(*stop_link)(struct cdns_pcie *pcie);
265 	bool	(*link_up)(struct cdns_pcie *pcie);
266 	u64     (*cpu_addr_fixup)(struct cdns_pcie *pcie, u64 cpu_addr);
267 };
268 
269 /**
270  * struct cdns_pcie - private data for Cadence PCIe controller drivers
271  * @reg_base: IO mapped register base
272  * @mem_res: start/end offsets in the physical system memory to map PCI accesses
273  * @is_rc: tell whether the PCIe controller mode is Root Complex or Endpoint.
274  * @bus: In Root Complex mode, the bus number
275  * @ops: Platform specific ops to control various inputs from Cadence PCIe
276  *       wrapper
277  */
278 struct cdns_pcie {
279 	void __iomem		*reg_base;
280 	struct resource		*mem_res;
281 	struct device		*dev;
282 	bool			is_rc;
283 	int			phy_count;
284 	struct phy		**phy;
285 	struct device_link	**link;
286 	const struct cdns_pcie_ops *ops;
287 };
288 
289 /**
290  * struct cdns_pcie_rc - private data for this PCIe Root Complex driver
291  * @pcie: Cadence PCIe controller
292  * @dev: pointer to PCIe device
293  * @cfg_res: start/end offsets in the physical system memory to map PCI
294  *           configuration space accesses
295  * @cfg_base: IO mapped window to access the PCI configuration space of a
296  *            single function at a time
297  * @vendor_id: PCI vendor ID
298  * @device_id: PCI device ID
299  * @avail_ib_bar: Satus of RP_BAR0, RP_BAR1 and	RP_NO_BAR if it's free or
300  *                available
301  * @quirk_retrain_flag: Retrain link as quirk for PCIe Gen2
302  * @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
303  */
304 struct cdns_pcie_rc {
305 	struct cdns_pcie	pcie;
306 	struct resource		*cfg_res;
307 	void __iomem		*cfg_base;
308 	u32			vendor_id;
309 	u32			device_id;
310 	bool			avail_ib_bar[CDNS_PCIE_RP_MAX_IB];
311 	unsigned int		quirk_retrain_flag:1;
312 	unsigned int		quirk_detect_quiet_flag:1;
313 };
314 
315 /**
316  * struct cdns_pcie_epf - Structure to hold info about endpoint function
317  * @epf_bar: reference to the pci_epf_bar for the six Base Address Registers
318  */
319 struct cdns_pcie_epf {
320 	struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS];
321 };
322 
323 /**
324  * struct cdns_pcie_ep - private data for this PCIe endpoint controller driver
325  * @pcie: Cadence PCIe controller
326  * @max_regions: maximum number of regions supported by hardware
327  * @ob_region_map: bitmask of mapped outbound regions
328  * @ob_addr: base addresses in the AXI bus where the outbound regions start
329  * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ
330  *		   dedicated outbound regions is mapped.
331  * @irq_cpu_addr: base address in the CPU space where a write access triggers
332  *		  the sending of a memory write (MSI) / normal message (legacy
333  *		  IRQ) TLP through the PCIe bus.
334  * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ
335  *		  dedicated outbound region.
336  * @irq_pci_fn: the latest PCI function that has updated the mapping of
337  *		the MSI/legacy IRQ dedicated outbound region.
338  * @irq_pending: bitmask of asserted legacy IRQs.
339  * @lock: spin lock to disable interrupts while modifying PCIe controller
340  *        registers fields (RMW) accessible by both remote RC and EP to
341  *        minimize time between read and write
342  * @epf: Structure to hold info about endpoint function
343  * @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
344  */
345 struct cdns_pcie_ep {
346 	struct cdns_pcie	pcie;
347 	u32			max_regions;
348 	unsigned long		ob_region_map;
349 	phys_addr_t		*ob_addr;
350 	phys_addr_t		irq_phys_addr;
351 	void __iomem		*irq_cpu_addr;
352 	u64			irq_pci_addr;
353 	u8			irq_pci_fn;
354 	u8			irq_pending;
355 	/* protect writing to PCI_STATUS while raising legacy interrupts */
356 	spinlock_t		lock;
357 	struct cdns_pcie_epf	*epf;
358 	unsigned int		quirk_detect_quiet_flag:1;
359 };
360 
361 
362 /* Register access */
cdns_pcie_writel(struct cdns_pcie * pcie,u32 reg,u32 value)363 static inline void cdns_pcie_writel(struct cdns_pcie *pcie, u32 reg, u32 value)
364 {
365 	writel(value, pcie->reg_base + reg);
366 }
367 
cdns_pcie_readl(struct cdns_pcie * pcie,u32 reg)368 static inline u32 cdns_pcie_readl(struct cdns_pcie *pcie, u32 reg)
369 {
370 	return readl(pcie->reg_base + reg);
371 }
372 
cdns_pcie_read_sz(void __iomem * addr,int size)373 static inline u32 cdns_pcie_read_sz(void __iomem *addr, int size)
374 {
375 	void __iomem *aligned_addr = PTR_ALIGN_DOWN(addr, 0x4);
376 	unsigned int offset = (unsigned long)addr & 0x3;
377 	u32 val = readl(aligned_addr);
378 
379 	if (!IS_ALIGNED((uintptr_t)addr, size)) {
380 		pr_warn("Address %p and size %d are not aligned\n", addr, size);
381 		return 0;
382 	}
383 
384 	if (size > 2)
385 		return val;
386 
387 	return (val >> (8 * offset)) & ((1 << (size * 8)) - 1);
388 }
389 
cdns_pcie_write_sz(void __iomem * addr,int size,u32 value)390 static inline void cdns_pcie_write_sz(void __iomem *addr, int size, u32 value)
391 {
392 	void __iomem *aligned_addr = PTR_ALIGN_DOWN(addr, 0x4);
393 	unsigned int offset = (unsigned long)addr & 0x3;
394 	u32 mask;
395 	u32 val;
396 
397 	if (!IS_ALIGNED((uintptr_t)addr, size)) {
398 		pr_warn("Address %p and size %d are not aligned\n", addr, size);
399 		return;
400 	}
401 
402 	if (size > 2) {
403 		writel(value, addr);
404 		return;
405 	}
406 
407 	mask = ~(((1 << (size * 8)) - 1) << (offset * 8));
408 	val = readl(aligned_addr) & mask;
409 	val |= value << (offset * 8);
410 	writel(val, aligned_addr);
411 }
412 
413 /* Root Port register access */
cdns_pcie_rp_writeb(struct cdns_pcie * pcie,u32 reg,u8 value)414 static inline void cdns_pcie_rp_writeb(struct cdns_pcie *pcie,
415 				       u32 reg, u8 value)
416 {
417 	void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg;
418 
419 	cdns_pcie_write_sz(addr, 0x1, value);
420 }
421 
cdns_pcie_rp_writew(struct cdns_pcie * pcie,u32 reg,u16 value)422 static inline void cdns_pcie_rp_writew(struct cdns_pcie *pcie,
423 				       u32 reg, u16 value)
424 {
425 	void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg;
426 
427 	cdns_pcie_write_sz(addr, 0x2, value);
428 }
429 
cdns_pcie_rp_readw(struct cdns_pcie * pcie,u32 reg)430 static inline u16 cdns_pcie_rp_readw(struct cdns_pcie *pcie, u32 reg)
431 {
432 	void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg;
433 
434 	return cdns_pcie_read_sz(addr, 0x2);
435 }
436 
437 /* Endpoint Function register access */
cdns_pcie_ep_fn_writeb(struct cdns_pcie * pcie,u8 fn,u32 reg,u8 value)438 static inline void cdns_pcie_ep_fn_writeb(struct cdns_pcie *pcie, u8 fn,
439 					  u32 reg, u8 value)
440 {
441 	void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg;
442 
443 	cdns_pcie_write_sz(addr, 0x1, value);
444 }
445 
cdns_pcie_ep_fn_writew(struct cdns_pcie * pcie,u8 fn,u32 reg,u16 value)446 static inline void cdns_pcie_ep_fn_writew(struct cdns_pcie *pcie, u8 fn,
447 					  u32 reg, u16 value)
448 {
449 	void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg;
450 
451 	cdns_pcie_write_sz(addr, 0x2, value);
452 }
453 
cdns_pcie_ep_fn_writel(struct cdns_pcie * pcie,u8 fn,u32 reg,u32 value)454 static inline void cdns_pcie_ep_fn_writel(struct cdns_pcie *pcie, u8 fn,
455 					  u32 reg, u32 value)
456 {
457 	writel(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
458 }
459 
cdns_pcie_ep_fn_readw(struct cdns_pcie * pcie,u8 fn,u32 reg)460 static inline u16 cdns_pcie_ep_fn_readw(struct cdns_pcie *pcie, u8 fn, u32 reg)
461 {
462 	void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg;
463 
464 	return cdns_pcie_read_sz(addr, 0x2);
465 }
466 
cdns_pcie_ep_fn_readl(struct cdns_pcie * pcie,u8 fn,u32 reg)467 static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg)
468 {
469 	return readl(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg);
470 }
471 
cdns_pcie_start_link(struct cdns_pcie * pcie)472 static inline int cdns_pcie_start_link(struct cdns_pcie *pcie)
473 {
474 	if (pcie->ops->start_link)
475 		return pcie->ops->start_link(pcie);
476 
477 	return 0;
478 }
479 
cdns_pcie_stop_link(struct cdns_pcie * pcie)480 static inline void cdns_pcie_stop_link(struct cdns_pcie *pcie)
481 {
482 	if (pcie->ops->stop_link)
483 		pcie->ops->stop_link(pcie);
484 }
485 
cdns_pcie_link_up(struct cdns_pcie * pcie)486 static inline bool cdns_pcie_link_up(struct cdns_pcie *pcie)
487 {
488 	if (pcie->ops->link_up)
489 		return pcie->ops->link_up(pcie);
490 
491 	return true;
492 }
493 
494 #ifdef CONFIG_PCIE_CADENCE_HOST
495 int cdns_pcie_host_setup(struct cdns_pcie_rc *rc);
496 void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
497 			       int where);
498 #else
cdns_pcie_host_setup(struct cdns_pcie_rc * rc)499 static inline int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
500 {
501 	return 0;
502 }
503 
cdns_pci_map_bus(struct pci_bus * bus,unsigned int devfn,int where)504 static inline void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
505 					     int where)
506 {
507 	return NULL;
508 }
509 #endif
510 
511 #ifdef CONFIG_PCIE_CADENCE_EP
512 int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep);
513 #else
cdns_pcie_ep_setup(struct cdns_pcie_ep * ep)514 static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep)
515 {
516 	return 0;
517 }
518 #endif
519 
520 void cdns_pcie_detect_quiet_min_delay_set(struct cdns_pcie *pcie);
521 
522 void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn,
523 				   u32 r, bool is_io,
524 				   u64 cpu_addr, u64 pci_addr, size_t size);
525 
526 void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie,
527 						  u8 busnr, u8 fn,
528 						  u32 r, u64 cpu_addr);
529 
530 void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r);
531 void cdns_pcie_disable_phy(struct cdns_pcie *pcie);
532 int cdns_pcie_enable_phy(struct cdns_pcie *pcie);
533 int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie);
534 extern const struct dev_pm_ops cdns_pcie_pm_ops;
535 
536 #endif /* _PCIE_CADENCE_H */
537