• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2017 Cadence
3 // Cadence PCIe host controller driver.
4 // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
5 
6 #include <linux/kernel.h>
7 #include <linux/of_address.h>
8 #include <linux/of_pci.h>
9 #include <linux/platform_device.h>
10 #include <linux/pm_runtime.h>
11 
12 #include "pcie-cadence.h"
13 
14 /**
15  * struct cdns_pcie_rc - private data for this PCIe Root Complex driver
16  * @pcie: Cadence PCIe controller
17  * @dev: pointer to PCIe device
18  * @cfg_res: start/end offsets in the physical system memory to map PCI
19  *           configuration space accesses
20  * @bus_range: first/last buses behind the PCIe host controller
21  * @cfg_base: IO mapped window to access the PCI configuration space of a
22  *            single function at a time
23  * @max_regions: maximum number of regions supported by the hardware
24  * @no_bar_nbits: Number of bits to keep for inbound (PCIe -> CPU) address
25  *                translation (nbits sets into the "no BAR match" register)
26  * @vendor_id: PCI vendor ID
27  * @device_id: PCI device ID
28  */
29 struct cdns_pcie_rc {
30 	struct cdns_pcie	pcie;
31 	struct device		*dev;
32 	struct resource		*cfg_res;
33 	struct resource		*bus_range;
34 	void __iomem		*cfg_base;
35 	u32			max_regions;
36 	u32			no_bar_nbits;
37 	u16			vendor_id;
38 	u16			device_id;
39 };
40 
cdns_pci_map_bus(struct pci_bus * bus,unsigned int devfn,int where)41 static void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
42 				      int where)
43 {
44 	struct pci_host_bridge *bridge = pci_find_host_bridge(bus);
45 	struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge);
46 	struct cdns_pcie *pcie = &rc->pcie;
47 	unsigned int busn = bus->number;
48 	u32 addr0, desc0;
49 
50 	if (busn == rc->bus_range->start) {
51 		/*
52 		 * Only the root port (devfn == 0) is connected to this bus.
53 		 * All other PCI devices are behind some bridge hence on another
54 		 * bus.
55 		 */
56 		if (devfn)
57 			return NULL;
58 
59 		return pcie->reg_base + (where & 0xfff);
60 	}
61 	/* Check that the link is up */
62 	if (!(cdns_pcie_readl(pcie, CDNS_PCIE_LM_BASE) & 0x1))
63 		return NULL;
64 	/* Clear AXI link-down status */
65 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_LINKDOWN, 0x0);
66 
67 	/* Update Output registers for AXI region 0. */
68 	addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(12) |
69 		CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) |
70 		CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(busn);
71 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(0), addr0);
72 
73 	/* Configuration Type 0 or Type 1 access. */
74 	desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
75 		CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
76 	/*
77 	 * The bus number was already set once for all in desc1 by
78 	 * cdns_pcie_host_init_address_translation().
79 	 */
80 	if (busn == rc->bus_range->start + 1)
81 		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0;
82 	else
83 		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1;
84 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(0), desc0);
85 
86 	return rc->cfg_base + (where & 0xfff);
87 }
88 
89 static struct pci_ops cdns_pcie_host_ops = {
90 	.map_bus	= cdns_pci_map_bus,
91 	.read		= pci_generic_config_read,
92 	.write		= pci_generic_config_write,
93 };
94 
95 static const struct of_device_id cdns_pcie_host_of_match[] = {
96 	{ .compatible = "cdns,cdns-pcie-host" },
97 
98 	{ },
99 };
100 
cdns_pcie_host_init_root_port(struct cdns_pcie_rc * rc)101 static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
102 {
103 	struct cdns_pcie *pcie = &rc->pcie;
104 	u32 value, ctrl;
105 	u32 id;
106 
107 	/*
108 	 * Set the root complex BAR configuration register:
109 	 * - disable both BAR0 and BAR1.
110 	 * - enable Prefetchable Memory Base and Limit registers in type 1
111 	 *   config space (64 bits).
112 	 * - enable IO Base and Limit registers in type 1 config
113 	 *   space (32 bits).
114 	 */
115 	ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
116 	value = CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) |
117 		CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(ctrl) |
118 		CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE |
119 		CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS |
120 		CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE |
121 		CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS;
122 	cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
123 
124 	/* Set root port configuration space */
125 	if (rc->vendor_id != 0xffff) {
126 		id = CDNS_PCIE_LM_ID_VENDOR(rc->vendor_id) |
127 			CDNS_PCIE_LM_ID_SUBSYS(rc->vendor_id);
128 		cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id);
129 	}
130 
131 	if (rc->device_id != 0xffff)
132 		cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id);
133 
134 	cdns_pcie_rp_writeb(pcie, PCI_CLASS_REVISION, 0);
135 	cdns_pcie_rp_writeb(pcie, PCI_CLASS_PROG, 0);
136 	cdns_pcie_rp_writew(pcie, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
137 
138 	return 0;
139 }
140 
cdns_pcie_host_init_address_translation(struct cdns_pcie_rc * rc)141 static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
142 {
143 	struct cdns_pcie *pcie = &rc->pcie;
144 	struct resource *cfg_res = rc->cfg_res;
145 	struct resource *mem_res = pcie->mem_res;
146 	struct resource *bus_range = rc->bus_range;
147 	struct device *dev = rc->dev;
148 	struct device_node *np = dev->of_node;
149 	struct of_pci_range_parser parser;
150 	struct of_pci_range range;
151 	u32 addr0, addr1, desc1;
152 	u64 cpu_addr;
153 	int r, err;
154 
155 	/*
156 	 * Reserve region 0 for PCI configure space accesses:
157 	 * OB_REGION_PCI_ADDR0 and OB_REGION_DESC0 are updated dynamically by
158 	 * cdns_pci_map_bus(), other region registers are set here once for all.
159 	 */
160 	addr1 = 0; /* Should be programmed to zero. */
161 	desc1 = CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus_range->start);
162 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(0), addr1);
163 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(0), desc1);
164 
165 	cpu_addr = cfg_res->start - mem_res->start;
166 	addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(12) |
167 		(lower_32_bits(cpu_addr) & GENMASK(31, 8));
168 	addr1 = upper_32_bits(cpu_addr);
169 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(0), addr0);
170 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(0), addr1);
171 
172 	err = of_pci_range_parser_init(&parser, np);
173 	if (err)
174 		return err;
175 
176 	r = 1;
177 	for_each_of_pci_range(&parser, &range) {
178 		bool is_io;
179 
180 		if (r >= rc->max_regions)
181 			break;
182 
183 		if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM)
184 			is_io = false;
185 		else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO)
186 			is_io = true;
187 		else
188 			continue;
189 
190 		cdns_pcie_set_outbound_region(pcie, 0, r, is_io,
191 					      range.cpu_addr,
192 					      range.pci_addr,
193 					      range.size);
194 		r++;
195 	}
196 
197 	/*
198 	 * Set Root Port no BAR match Inbound Translation registers:
199 	 * needed for MSI and DMA.
200 	 * Root Port BAR0 and BAR1 are disabled, hence no need to set their
201 	 * inbound translation registers.
202 	 */
203 	addr0 = CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(rc->no_bar_nbits);
204 	addr1 = 0;
205 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(RP_NO_BAR), addr0);
206 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(RP_NO_BAR), addr1);
207 
208 	return 0;
209 }
210 
cdns_pcie_host_init(struct device * dev,struct list_head * resources,struct cdns_pcie_rc * rc)211 static int cdns_pcie_host_init(struct device *dev,
212 			       struct list_head *resources,
213 			       struct cdns_pcie_rc *rc)
214 {
215 	struct resource *bus_range = NULL;
216 	int err;
217 
218 	/* Parse our PCI ranges and request their resources */
219 	err = pci_parse_request_of_pci_ranges(dev, resources, &bus_range);
220 	if (err)
221 		return err;
222 
223 	rc->bus_range = bus_range;
224 	rc->pcie.bus = bus_range->start;
225 
226 	err = cdns_pcie_host_init_root_port(rc);
227 	if (err)
228 		goto err_out;
229 
230 	err = cdns_pcie_host_init_address_translation(rc);
231 	if (err)
232 		goto err_out;
233 
234 	return 0;
235 
236  err_out:
237 	pci_free_resource_list(resources);
238 	return err;
239 }
240 
cdns_pcie_host_probe(struct platform_device * pdev)241 static int cdns_pcie_host_probe(struct platform_device *pdev)
242 {
243 	struct device *dev = &pdev->dev;
244 	struct device_node *np = dev->of_node;
245 	struct pci_host_bridge *bridge;
246 	struct list_head resources;
247 	struct cdns_pcie_rc *rc;
248 	struct cdns_pcie *pcie;
249 	struct resource *res;
250 	int ret;
251 	int phy_count;
252 
253 	bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
254 	if (!bridge)
255 		return -ENOMEM;
256 
257 	rc = pci_host_bridge_priv(bridge);
258 	rc->dev = dev;
259 
260 	pcie = &rc->pcie;
261 	pcie->is_rc = true;
262 
263 	rc->max_regions = 32;
264 	of_property_read_u32(np, "cdns,max-outbound-regions", &rc->max_regions);
265 
266 	rc->no_bar_nbits = 32;
267 	of_property_read_u32(np, "cdns,no-bar-match-nbits", &rc->no_bar_nbits);
268 
269 	rc->vendor_id = 0xffff;
270 	of_property_read_u16(np, "vendor-id", &rc->vendor_id);
271 
272 	rc->device_id = 0xffff;
273 	of_property_read_u16(np, "device-id", &rc->device_id);
274 
275 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg");
276 	pcie->reg_base = devm_ioremap_resource(dev, res);
277 	if (IS_ERR(pcie->reg_base)) {
278 		dev_err(dev, "missing \"reg\"\n");
279 		return PTR_ERR(pcie->reg_base);
280 	}
281 
282 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
283 	rc->cfg_base = devm_pci_remap_cfg_resource(dev, res);
284 	if (IS_ERR(rc->cfg_base)) {
285 		dev_err(dev, "missing \"cfg\"\n");
286 		return PTR_ERR(rc->cfg_base);
287 	}
288 	rc->cfg_res = res;
289 
290 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem");
291 	if (!res) {
292 		dev_err(dev, "missing \"mem\"\n");
293 		return -EINVAL;
294 	}
295 	pcie->mem_res = res;
296 
297 	ret = cdns_pcie_init_phy(dev, pcie);
298 	if (ret) {
299 		dev_err(dev, "failed to init phy\n");
300 		return ret;
301 	}
302 	platform_set_drvdata(pdev, pcie);
303 
304 	pm_runtime_enable(dev);
305 	ret = pm_runtime_get_sync(dev);
306 	if (ret < 0) {
307 		dev_err(dev, "pm_runtime_get_sync() failed\n");
308 		goto err_get_sync;
309 	}
310 
311 	ret = cdns_pcie_host_init(dev, &resources, rc);
312 	if (ret)
313 		goto err_init;
314 
315 	list_splice_init(&resources, &bridge->windows);
316 	bridge->dev.parent = dev;
317 	bridge->busnr = pcie->bus;
318 	bridge->ops = &cdns_pcie_host_ops;
319 	bridge->map_irq = of_irq_parse_and_map_pci;
320 	bridge->swizzle_irq = pci_common_swizzle;
321 
322 	ret = pci_host_probe(bridge);
323 	if (ret < 0)
324 		goto err_host_probe;
325 
326 	return 0;
327 
328  err_host_probe:
329 	pci_free_resource_list(&resources);
330 
331  err_init:
332 	pm_runtime_put_sync(dev);
333 
334  err_get_sync:
335 	pm_runtime_disable(dev);
336 	cdns_pcie_disable_phy(pcie);
337 	phy_count = pcie->phy_count;
338 	while (phy_count--)
339 		device_link_del(pcie->link[phy_count]);
340 
341 	return ret;
342 }
343 
cdns_pcie_shutdown(struct platform_device * pdev)344 static void cdns_pcie_shutdown(struct platform_device *pdev)
345 {
346 	struct device *dev = &pdev->dev;
347 	struct cdns_pcie *pcie = dev_get_drvdata(dev);
348 	int ret;
349 
350 	ret = pm_runtime_put_sync(dev);
351 	if (ret < 0)
352 		dev_dbg(dev, "pm_runtime_put_sync failed\n");
353 
354 	pm_runtime_disable(dev);
355 	cdns_pcie_disable_phy(pcie);
356 }
357 
358 static struct platform_driver cdns_pcie_host_driver = {
359 	.driver = {
360 		.name = "cdns-pcie-host",
361 		.of_match_table = cdns_pcie_host_of_match,
362 		.pm	= &cdns_pcie_pm_ops,
363 	},
364 	.probe = cdns_pcie_host_probe,
365 	.shutdown = cdns_pcie_shutdown,
366 };
367 builtin_platform_driver(cdns_pcie_host_driver);
368