• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2017 Cadence
3 // Cadence PCIe host controller driver.
4 // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
5 
6 #include <linux/delay.h>
7 #include <linux/kernel.h>
8 #include <linux/list_sort.h>
9 #include <linux/of_address.h>
10 #include <linux/of_pci.h>
11 #include <linux/platform_device.h>
12 
13 #include "pcie-cadence.h"
14 
15 #define LINK_RETRAIN_TIMEOUT HZ
16 
17 static u64 bar_max_size[] = {
18 	[RP_BAR0] = _ULL(128 * SZ_2G),
19 	[RP_BAR1] = SZ_2G,
20 	[RP_NO_BAR] = _BITULL(63),
21 };
22 
23 static u8 bar_aperture_mask[] = {
24 	[RP_BAR0] = 0x1F,
25 	[RP_BAR1] = 0xF,
26 };
27 
cdns_pci_map_bus(struct pci_bus * bus,unsigned int devfn,int where)28 void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
29 			       int where)
30 {
31 	struct pci_host_bridge *bridge = pci_find_host_bridge(bus);
32 	struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge);
33 	struct cdns_pcie *pcie = &rc->pcie;
34 	unsigned int busn = bus->number;
35 	u32 addr0, desc0;
36 
37 	if (pci_is_root_bus(bus)) {
38 		/*
39 		 * Only the root port (devfn == 0) is connected to this bus.
40 		 * All other PCI devices are behind some bridge hence on another
41 		 * bus.
42 		 */
43 		if (devfn)
44 			return NULL;
45 
46 		return pcie->reg_base + (where & 0xfff);
47 	}
48 	/* Check that the link is up */
49 	if (!(cdns_pcie_readl(pcie, CDNS_PCIE_LM_BASE) & 0x1))
50 		return NULL;
51 	/* Clear AXI link-down status */
52 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_LINKDOWN, 0x0);
53 
54 	/* Update Output registers for AXI region 0. */
55 	addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(12) |
56 		CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) |
57 		CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(busn);
58 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(0), addr0);
59 
60 	/* Configuration Type 0 or Type 1 access. */
61 	desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
62 		CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
63 	/*
64 	 * The bus number was already set once for all in desc1 by
65 	 * cdns_pcie_host_init_address_translation().
66 	 */
67 	if (busn == bridge->busnr + 1)
68 		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0;
69 	else
70 		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1;
71 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(0), desc0);
72 
73 	return rc->cfg_base + (where & 0xfff);
74 }
75 
76 static struct pci_ops cdns_pcie_host_ops = {
77 	.map_bus	= cdns_pci_map_bus,
78 	.read		= pci_generic_config_read,
79 	.write		= pci_generic_config_write,
80 };
81 
cdns_pcie_host_training_complete(struct cdns_pcie * pcie)82 static int cdns_pcie_host_training_complete(struct cdns_pcie *pcie)
83 {
84 	u32 pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET;
85 	unsigned long end_jiffies;
86 	u16 lnk_stat;
87 
88 	/* Wait for link training to complete. Exit after timeout. */
89 	end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT;
90 	do {
91 		lnk_stat = cdns_pcie_rp_readw(pcie, pcie_cap_off + PCI_EXP_LNKSTA);
92 		if (!(lnk_stat & PCI_EXP_LNKSTA_LT))
93 			break;
94 		usleep_range(0, 1000);
95 	} while (time_before(jiffies, end_jiffies));
96 
97 	if (!(lnk_stat & PCI_EXP_LNKSTA_LT))
98 		return 0;
99 
100 	return -ETIMEDOUT;
101 }
102 
cdns_pcie_host_wait_for_link(struct cdns_pcie * pcie)103 static int cdns_pcie_host_wait_for_link(struct cdns_pcie *pcie)
104 {
105 	struct device *dev = pcie->dev;
106 	int retries;
107 
108 	/* Check if the link is up or not */
109 	for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
110 		if (cdns_pcie_link_up(pcie)) {
111 			dev_info(dev, "Link up\n");
112 			return 0;
113 		}
114 		usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
115 	}
116 
117 	return -ETIMEDOUT;
118 }
119 
cdns_pcie_retrain(struct cdns_pcie * pcie)120 static int cdns_pcie_retrain(struct cdns_pcie *pcie)
121 {
122 	u32 lnk_cap_sls, pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET;
123 	u16 lnk_stat, lnk_ctl;
124 	int ret = 0;
125 
126 	/*
127 	 * Set retrain bit if current speed is 2.5 GB/s,
128 	 * but the PCIe root port support is > 2.5 GB/s.
129 	 */
130 
131 	lnk_cap_sls = cdns_pcie_readl(pcie, (CDNS_PCIE_RP_BASE + pcie_cap_off +
132 					     PCI_EXP_LNKCAP));
133 	if ((lnk_cap_sls & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB)
134 		return ret;
135 
136 	lnk_stat = cdns_pcie_rp_readw(pcie, pcie_cap_off + PCI_EXP_LNKSTA);
137 	if ((lnk_stat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) {
138 		lnk_ctl = cdns_pcie_rp_readw(pcie,
139 					     pcie_cap_off + PCI_EXP_LNKCTL);
140 		lnk_ctl |= PCI_EXP_LNKCTL_RL;
141 		cdns_pcie_rp_writew(pcie, pcie_cap_off + PCI_EXP_LNKCTL,
142 				    lnk_ctl);
143 
144 		ret = cdns_pcie_host_training_complete(pcie);
145 		if (ret)
146 			return ret;
147 
148 		ret = cdns_pcie_host_wait_for_link(pcie);
149 	}
150 	return ret;
151 }
152 
cdns_pcie_host_start_link(struct cdns_pcie_rc * rc)153 static int cdns_pcie_host_start_link(struct cdns_pcie_rc *rc)
154 {
155 	struct cdns_pcie *pcie = &rc->pcie;
156 	int ret;
157 
158 	ret = cdns_pcie_host_wait_for_link(pcie);
159 
160 	/*
161 	 * Retrain link for Gen2 training defect
162 	 * if quirk flag is set.
163 	 */
164 	if (!ret && rc->quirk_retrain_flag)
165 		ret = cdns_pcie_retrain(pcie);
166 
167 	return ret;
168 }
169 
cdns_pcie_host_init_root_port(struct cdns_pcie_rc * rc)170 static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
171 {
172 	struct cdns_pcie *pcie = &rc->pcie;
173 	u32 value, ctrl;
174 	u32 id;
175 
176 	/*
177 	 * Set the root complex BAR configuration register:
178 	 * - disable both BAR0 and BAR1.
179 	 * - enable Prefetchable Memory Base and Limit registers in type 1
180 	 *   config space (64 bits).
181 	 * - enable IO Base and Limit registers in type 1 config
182 	 *   space (32 bits).
183 	 */
184 	ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
185 	value = CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) |
186 		CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(ctrl) |
187 		CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE |
188 		CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS |
189 		CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE |
190 		CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS;
191 	cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
192 
193 	/* Set root port configuration space */
194 	if (rc->vendor_id != 0xffff) {
195 		id = CDNS_PCIE_LM_ID_VENDOR(rc->vendor_id) |
196 			CDNS_PCIE_LM_ID_SUBSYS(rc->vendor_id);
197 		cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id);
198 	}
199 
200 	if (rc->device_id != 0xffff)
201 		cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id);
202 
203 	cdns_pcie_rp_writeb(pcie, PCI_CLASS_REVISION, 0);
204 	cdns_pcie_rp_writeb(pcie, PCI_CLASS_PROG, 0);
205 	cdns_pcie_rp_writew(pcie, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
206 
207 	return 0;
208 }
209 
cdns_pcie_host_bar_ib_config(struct cdns_pcie_rc * rc,enum cdns_pcie_rp_bar bar,u64 cpu_addr,u64 size,unsigned long flags)210 static int cdns_pcie_host_bar_ib_config(struct cdns_pcie_rc *rc,
211 					enum cdns_pcie_rp_bar bar,
212 					u64 cpu_addr, u64 size,
213 					unsigned long flags)
214 {
215 	struct cdns_pcie *pcie = &rc->pcie;
216 	u32 addr0, addr1, aperture, value;
217 
218 	if (!rc->avail_ib_bar[bar])
219 		return -EBUSY;
220 
221 	rc->avail_ib_bar[bar] = false;
222 
223 	aperture = ilog2(size);
224 	addr0 = CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(aperture) |
225 		(lower_32_bits(cpu_addr) & GENMASK(31, 8));
226 	addr1 = upper_32_bits(cpu_addr);
227 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar), addr0);
228 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar), addr1);
229 
230 	if (bar == RP_NO_BAR)
231 		return 0;
232 
233 	value = cdns_pcie_readl(pcie, CDNS_PCIE_LM_RC_BAR_CFG);
234 	value &= ~(LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) |
235 		   LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) |
236 		   LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) |
237 		   LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) |
238 		   LM_RC_BAR_CFG_APERTURE(bar, bar_aperture_mask[bar] + 2));
239 	if (size + cpu_addr >= SZ_4G) {
240 		if (!(flags & IORESOURCE_PREFETCH))
241 			value |= LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar);
242 		value |= LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar);
243 	} else {
244 		if (!(flags & IORESOURCE_PREFETCH))
245 			value |= LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar);
246 		value |= LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar);
247 	}
248 
249 	value |= LM_RC_BAR_CFG_APERTURE(bar, aperture);
250 	cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
251 
252 	return 0;
253 }
254 
255 static enum cdns_pcie_rp_bar
cdns_pcie_host_find_min_bar(struct cdns_pcie_rc * rc,u64 size)256 cdns_pcie_host_find_min_bar(struct cdns_pcie_rc *rc, u64 size)
257 {
258 	enum cdns_pcie_rp_bar bar, sel_bar;
259 
260 	sel_bar = RP_BAR_UNDEFINED;
261 	for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) {
262 		if (!rc->avail_ib_bar[bar])
263 			continue;
264 
265 		if (size <= bar_max_size[bar]) {
266 			if (sel_bar == RP_BAR_UNDEFINED) {
267 				sel_bar = bar;
268 				continue;
269 			}
270 
271 			if (bar_max_size[bar] < bar_max_size[sel_bar])
272 				sel_bar = bar;
273 		}
274 	}
275 
276 	return sel_bar;
277 }
278 
279 static enum cdns_pcie_rp_bar
cdns_pcie_host_find_max_bar(struct cdns_pcie_rc * rc,u64 size)280 cdns_pcie_host_find_max_bar(struct cdns_pcie_rc *rc, u64 size)
281 {
282 	enum cdns_pcie_rp_bar bar, sel_bar;
283 
284 	sel_bar = RP_BAR_UNDEFINED;
285 	for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) {
286 		if (!rc->avail_ib_bar[bar])
287 			continue;
288 
289 		if (size >= bar_max_size[bar]) {
290 			if (sel_bar == RP_BAR_UNDEFINED) {
291 				sel_bar = bar;
292 				continue;
293 			}
294 
295 			if (bar_max_size[bar] > bar_max_size[sel_bar])
296 				sel_bar = bar;
297 		}
298 	}
299 
300 	return sel_bar;
301 }
302 
cdns_pcie_host_bar_config(struct cdns_pcie_rc * rc,struct resource_entry * entry)303 static int cdns_pcie_host_bar_config(struct cdns_pcie_rc *rc,
304 				     struct resource_entry *entry)
305 {
306 	u64 cpu_addr, pci_addr, size, winsize;
307 	struct cdns_pcie *pcie = &rc->pcie;
308 	struct device *dev = pcie->dev;
309 	enum cdns_pcie_rp_bar bar;
310 	unsigned long flags;
311 	int ret;
312 
313 	cpu_addr = entry->res->start;
314 	pci_addr = entry->res->start - entry->offset;
315 	flags = entry->res->flags;
316 	size = resource_size(entry->res);
317 
318 	if (entry->offset) {
319 		dev_err(dev, "PCI addr: %llx must be equal to CPU addr: %llx\n",
320 			pci_addr, cpu_addr);
321 		return -EINVAL;
322 	}
323 
324 	while (size > 0) {
325 		/*
326 		 * Try to find a minimum BAR whose size is greater than
327 		 * or equal to the remaining resource_entry size. This will
328 		 * fail if the size of each of the available BARs is less than
329 		 * the remaining resource_entry size.
330 		 * If a minimum BAR is found, IB ATU will be configured and
331 		 * exited.
332 		 */
333 		bar = cdns_pcie_host_find_min_bar(rc, size);
334 		if (bar != RP_BAR_UNDEFINED) {
335 			ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr,
336 							   size, flags);
337 			if (ret)
338 				dev_err(dev, "IB BAR: %d config failed\n", bar);
339 			return ret;
340 		}
341 
342 		/*
343 		 * If the control reaches here, it would mean the remaining
344 		 * resource_entry size cannot be fitted in a single BAR. So we
345 		 * find a maximum BAR whose size is less than or equal to the
346 		 * remaining resource_entry size and split the resource entry
347 		 * so that part of resource entry is fitted inside the maximum
348 		 * BAR. The remaining size would be fitted during the next
349 		 * iteration of the loop.
350 		 * If a maximum BAR is not found, there is no way we can fit
351 		 * this resource_entry, so we error out.
352 		 */
353 		bar = cdns_pcie_host_find_max_bar(rc, size);
354 		if (bar == RP_BAR_UNDEFINED) {
355 			dev_err(dev, "No free BAR to map cpu_addr %llx\n",
356 				cpu_addr);
357 			return -EINVAL;
358 		}
359 
360 		winsize = bar_max_size[bar];
361 		ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr, winsize,
362 						   flags);
363 		if (ret) {
364 			dev_err(dev, "IB BAR: %d config failed\n", bar);
365 			return ret;
366 		}
367 
368 		size -= winsize;
369 		cpu_addr += winsize;
370 	}
371 
372 	return 0;
373 }
374 
cdns_pcie_host_dma_ranges_cmp(void * priv,const struct list_head * a,const struct list_head * b)375 static int cdns_pcie_host_dma_ranges_cmp(void *priv, const struct list_head *a,
376 					 const struct list_head *b)
377 {
378 	struct resource_entry *entry1, *entry2;
379 
380         entry1 = container_of(a, struct resource_entry, node);
381         entry2 = container_of(b, struct resource_entry, node);
382 
383         return resource_size(entry2->res) - resource_size(entry1->res);
384 }
385 
cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc * rc)386 static int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc)
387 {
388 	struct cdns_pcie *pcie = &rc->pcie;
389 	struct device *dev = pcie->dev;
390 	struct device_node *np = dev->of_node;
391 	struct pci_host_bridge *bridge;
392 	struct resource_entry *entry;
393 	u32 no_bar_nbits = 32;
394 	int err;
395 
396 	bridge = pci_host_bridge_from_priv(rc);
397 	if (!bridge)
398 		return -ENOMEM;
399 
400 	if (list_empty(&bridge->dma_ranges)) {
401 		of_property_read_u32(np, "cdns,no-bar-match-nbits",
402 				     &no_bar_nbits);
403 		err = cdns_pcie_host_bar_ib_config(rc, RP_NO_BAR, 0x0,
404 						   (u64)1 << no_bar_nbits, 0);
405 		if (err)
406 			dev_err(dev, "IB BAR: %d config failed\n", RP_NO_BAR);
407 		return err;
408 	}
409 
410 	list_sort(NULL, &bridge->dma_ranges, cdns_pcie_host_dma_ranges_cmp);
411 
412 	resource_list_for_each_entry(entry, &bridge->dma_ranges) {
413 		err = cdns_pcie_host_bar_config(rc, entry);
414 		if (err) {
415 			dev_err(dev, "Fail to configure IB using dma-ranges\n");
416 			return err;
417 		}
418 	}
419 
420 	return 0;
421 }
422 
cdns_pcie_host_init_address_translation(struct cdns_pcie_rc * rc)423 static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
424 {
425 	struct cdns_pcie *pcie = &rc->pcie;
426 	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rc);
427 	struct resource *cfg_res = rc->cfg_res;
428 	struct resource_entry *entry;
429 	u64 cpu_addr = cfg_res->start;
430 	u32 addr0, addr1, desc1;
431 	int r, busnr = 0;
432 
433 	entry = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
434 	if (entry)
435 		busnr = entry->res->start;
436 
437 	/*
438 	 * Reserve region 0 for PCI configure space accesses:
439 	 * OB_REGION_PCI_ADDR0 and OB_REGION_DESC0 are updated dynamically by
440 	 * cdns_pci_map_bus(), other region registers are set here once for all.
441 	 */
442 	addr1 = 0; /* Should be programmed to zero. */
443 	desc1 = CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr);
444 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(0), addr1);
445 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(0), desc1);
446 
447 	if (pcie->ops->cpu_addr_fixup)
448 		cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr);
449 
450 	addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(12) |
451 		(lower_32_bits(cpu_addr) & GENMASK(31, 8));
452 	addr1 = upper_32_bits(cpu_addr);
453 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(0), addr0);
454 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(0), addr1);
455 
456 	r = 1;
457 	resource_list_for_each_entry(entry, &bridge->windows) {
458 		struct resource *res = entry->res;
459 		u64 pci_addr = res->start - entry->offset;
460 
461 		if (resource_type(res) == IORESOURCE_IO)
462 			cdns_pcie_set_outbound_region(pcie, busnr, 0, r,
463 						      true,
464 						      pci_pio_to_address(res->start),
465 						      pci_addr,
466 						      resource_size(res));
467 		else
468 			cdns_pcie_set_outbound_region(pcie, busnr, 0, r,
469 						      false,
470 						      res->start,
471 						      pci_addr,
472 						      resource_size(res));
473 
474 		r++;
475 	}
476 
477 	return cdns_pcie_host_map_dma_ranges(rc);
478 }
479 
cdns_pcie_host_init(struct device * dev,struct cdns_pcie_rc * rc)480 static int cdns_pcie_host_init(struct device *dev,
481 			       struct cdns_pcie_rc *rc)
482 {
483 	int err;
484 
485 	err = cdns_pcie_host_init_root_port(rc);
486 	if (err)
487 		return err;
488 
489 	return cdns_pcie_host_init_address_translation(rc);
490 }
491 
cdns_pcie_host_setup(struct cdns_pcie_rc * rc)492 int cdns_pcie_host_setup(struct cdns_pcie_rc *rc)
493 {
494 	struct device *dev = rc->pcie.dev;
495 	struct platform_device *pdev = to_platform_device(dev);
496 	struct device_node *np = dev->of_node;
497 	struct pci_host_bridge *bridge;
498 	enum cdns_pcie_rp_bar bar;
499 	struct cdns_pcie *pcie;
500 	struct resource *res;
501 	int ret;
502 
503 	bridge = pci_host_bridge_from_priv(rc);
504 	if (!bridge)
505 		return -ENOMEM;
506 
507 	pcie = &rc->pcie;
508 	pcie->is_rc = true;
509 
510 	rc->vendor_id = 0xffff;
511 	of_property_read_u32(np, "vendor-id", &rc->vendor_id);
512 
513 	rc->device_id = 0xffff;
514 	of_property_read_u32(np, "device-id", &rc->device_id);
515 
516 	pcie->reg_base = devm_platform_ioremap_resource_byname(pdev, "reg");
517 	if (IS_ERR(pcie->reg_base)) {
518 		dev_err(dev, "missing \"reg\"\n");
519 		return PTR_ERR(pcie->reg_base);
520 	}
521 
522 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
523 	rc->cfg_base = devm_pci_remap_cfg_resource(dev, res);
524 	if (IS_ERR(rc->cfg_base))
525 		return PTR_ERR(rc->cfg_base);
526 	rc->cfg_res = res;
527 
528 	if (rc->quirk_detect_quiet_flag)
529 		cdns_pcie_detect_quiet_min_delay_set(&rc->pcie);
530 
531 	ret = cdns_pcie_start_link(pcie);
532 	if (ret) {
533 		dev_err(dev, "Failed to start link\n");
534 		return ret;
535 	}
536 
537 	ret = cdns_pcie_host_start_link(rc);
538 	if (ret)
539 		dev_dbg(dev, "PCIe link never came up\n");
540 
541 	for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++)
542 		rc->avail_ib_bar[bar] = true;
543 
544 	ret = cdns_pcie_host_init(dev, rc);
545 	if (ret)
546 		return ret;
547 
548 	if (!bridge->ops)
549 		bridge->ops = &cdns_pcie_host_ops;
550 
551 	ret = pci_host_probe(bridge);
552 	if (ret < 0)
553 		goto err_init;
554 
555 	return 0;
556 
557  err_init:
558 	pm_runtime_put_sync(dev);
559 
560 	return ret;
561 }
562