• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2017 Cadence
3 // Cadence PCIe host controller driver.
4 // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com>
5 
6 #include <linux/kernel.h>
7 #include <linux/of_address.h>
8 #include <linux/of_pci.h>
9 #include <linux/platform_device.h>
10 #include <linux/pm_runtime.h>
11 
12 #include "pcie-cadence.h"
13 
14 /**
15  * struct cdns_pcie_rc - private data for this PCIe Root Complex driver
16  * @pcie: Cadence PCIe controller
17  * @dev: pointer to PCIe device
18  * @cfg_res: start/end offsets in the physical system memory to map PCI
19  *           configuration space accesses
20  * @bus_range: first/last buses behind the PCIe host controller
21  * @cfg_base: IO mapped window to access the PCI configuration space of a
22  *            single function at a time
23  * @max_regions: maximum number of regions supported by the hardware
24  * @no_bar_nbits: Number of bits to keep for inbound (PCIe -> CPU) address
25  *                translation (nbits sets into the "no BAR match" register)
26  * @vendor_id: PCI vendor ID
27  * @device_id: PCI device ID
28  */
29 struct cdns_pcie_rc {
30 	struct cdns_pcie	pcie;
31 	struct device		*dev;
32 	struct resource		*cfg_res;
33 	struct resource		*bus_range;
34 	void __iomem		*cfg_base;
35 	u32			max_regions;
36 	u32			no_bar_nbits;
37 	u16			vendor_id;
38 	u16			device_id;
39 };
40 
cdns_pci_map_bus(struct pci_bus * bus,unsigned int devfn,int where)41 static void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
42 				      int where)
43 {
44 	struct pci_host_bridge *bridge = pci_find_host_bridge(bus);
45 	struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge);
46 	struct cdns_pcie *pcie = &rc->pcie;
47 	unsigned int busn = bus->number;
48 	u32 addr0, desc0;
49 
50 	if (busn == rc->bus_range->start) {
51 		/*
52 		 * Only the root port (devfn == 0) is connected to this bus.
53 		 * All other PCI devices are behind some bridge hence on another
54 		 * bus.
55 		 */
56 		if (devfn)
57 			return NULL;
58 
59 		return pcie->reg_base + (where & 0xfff);
60 	}
61 	/* Check that the link is up */
62 	if (!(cdns_pcie_readl(pcie, CDNS_PCIE_LM_BASE) & 0x1))
63 		return NULL;
64 	/* Clear AXI link-down status */
65 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_LINKDOWN, 0x0);
66 
67 	/* Update Output registers for AXI region 0. */
68 	addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(12) |
69 		CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) |
70 		CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(busn);
71 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(0), addr0);
72 
73 	/* Configuration Type 0 or Type 1 access. */
74 	desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID |
75 		CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0);
76 	/*
77 	 * The bus number was already set once for all in desc1 by
78 	 * cdns_pcie_host_init_address_translation().
79 	 */
80 	if (busn == rc->bus_range->start + 1)
81 		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0;
82 	else
83 		desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1;
84 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(0), desc0);
85 
86 	return rc->cfg_base + (where & 0xfff);
87 }
88 
89 static struct pci_ops cdns_pcie_host_ops = {
90 	.map_bus	= cdns_pci_map_bus,
91 	.read		= pci_generic_config_read,
92 	.write		= pci_generic_config_write,
93 };
94 
95 static const struct of_device_id cdns_pcie_host_of_match[] = {
96 	{ .compatible = "cdns,cdns-pcie-host" },
97 
98 	{ },
99 };
100 
cdns_pcie_host_init_root_port(struct cdns_pcie_rc * rc)101 static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc)
102 {
103 	struct cdns_pcie *pcie = &rc->pcie;
104 	u32 value, ctrl;
105 	u32 id;
106 
107 	/*
108 	 * Set the root complex BAR configuration register:
109 	 * - disable both BAR0 and BAR1.
110 	 * - enable Prefetchable Memory Base and Limit registers in type 1
111 	 *   config space (64 bits).
112 	 * - enable IO Base and Limit registers in type 1 config
113 	 *   space (32 bits).
114 	 */
115 	ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED;
116 	value = CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) |
117 		CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(ctrl) |
118 		CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE |
119 		CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS |
120 		CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE |
121 		CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS;
122 	cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value);
123 
124 	/* Set root port configuration space */
125 	if (rc->vendor_id != 0xffff) {
126 		id = CDNS_PCIE_LM_ID_VENDOR(rc->vendor_id) |
127 			CDNS_PCIE_LM_ID_SUBSYS(rc->vendor_id);
128 		cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id);
129 	}
130 
131 	if (rc->device_id != 0xffff)
132 		cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id);
133 
134 	cdns_pcie_rp_writeb(pcie, PCI_CLASS_REVISION, 0);
135 	cdns_pcie_rp_writeb(pcie, PCI_CLASS_PROG, 0);
136 	cdns_pcie_rp_writew(pcie, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI);
137 
138 	return 0;
139 }
140 
cdns_pcie_host_init_address_translation(struct cdns_pcie_rc * rc)141 static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
142 {
143 	struct cdns_pcie *pcie = &rc->pcie;
144 	struct resource *cfg_res = rc->cfg_res;
145 	struct resource *mem_res = pcie->mem_res;
146 	struct resource *bus_range = rc->bus_range;
147 	struct device *dev = rc->dev;
148 	struct device_node *np = dev->of_node;
149 	struct of_pci_range_parser parser;
150 	struct of_pci_range range;
151 	u32 addr0, addr1, desc1;
152 	u64 cpu_addr;
153 	int r, err;
154 
155 	/*
156 	 * Reserve region 0 for PCI configure space accesses:
157 	 * OB_REGION_PCI_ADDR0 and OB_REGION_DESC0 are updated dynamically by
158 	 * cdns_pci_map_bus(), other region registers are set here once for all.
159 	 */
160 	addr1 = 0; /* Should be programmed to zero. */
161 	desc1 = CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus_range->start);
162 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(0), addr1);
163 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(0), desc1);
164 
165 	cpu_addr = cfg_res->start - mem_res->start;
166 	addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(12) |
167 		(lower_32_bits(cpu_addr) & GENMASK(31, 8));
168 	addr1 = upper_32_bits(cpu_addr);
169 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(0), addr0);
170 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(0), addr1);
171 
172 	err = of_pci_range_parser_init(&parser, np);
173 	if (err)
174 		return err;
175 
176 	r = 1;
177 	for_each_of_pci_range(&parser, &range) {
178 		bool is_io;
179 
180 		if (r >= rc->max_regions)
181 			break;
182 
183 		if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM)
184 			is_io = false;
185 		else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO)
186 			is_io = true;
187 		else
188 			continue;
189 
190 		cdns_pcie_set_outbound_region(pcie, 0, r, is_io,
191 					      range.cpu_addr,
192 					      range.pci_addr,
193 					      range.size);
194 		r++;
195 	}
196 
197 	/*
198 	 * Set Root Port no BAR match Inbound Translation registers:
199 	 * needed for MSI and DMA.
200 	 * Root Port BAR0 and BAR1 are disabled, hence no need to set their
201 	 * inbound translation registers.
202 	 */
203 	addr0 = CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(rc->no_bar_nbits);
204 	addr1 = 0;
205 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(RP_NO_BAR), addr0);
206 	cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(RP_NO_BAR), addr1);
207 
208 	return 0;
209 }
210 
cdns_pcie_host_init(struct device * dev,struct list_head * resources,struct cdns_pcie_rc * rc)211 static int cdns_pcie_host_init(struct device *dev,
212 			       struct list_head *resources,
213 			       struct cdns_pcie_rc *rc)
214 {
215 	struct resource *bus_range = NULL;
216 	int err;
217 
218 	/* Parse our PCI ranges and request their resources */
219 	err = pci_parse_request_of_pci_ranges(dev, resources, &bus_range);
220 	if (err)
221 		return err;
222 
223 	rc->bus_range = bus_range;
224 	rc->pcie.bus = bus_range->start;
225 
226 	err = cdns_pcie_host_init_root_port(rc);
227 	if (err)
228 		goto err_out;
229 
230 	err = cdns_pcie_host_init_address_translation(rc);
231 	if (err)
232 		goto err_out;
233 
234 	return 0;
235 
236  err_out:
237 	pci_free_resource_list(resources);
238 	return err;
239 }
240 
cdns_pcie_host_probe(struct platform_device * pdev)241 static int cdns_pcie_host_probe(struct platform_device *pdev)
242 {
243 	const char *type;
244 	struct device *dev = &pdev->dev;
245 	struct device_node *np = dev->of_node;
246 	struct pci_host_bridge *bridge;
247 	struct list_head resources;
248 	struct cdns_pcie_rc *rc;
249 	struct cdns_pcie *pcie;
250 	struct resource *res;
251 	int ret;
252 	int phy_count;
253 
254 	bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
255 	if (!bridge)
256 		return -ENOMEM;
257 
258 	rc = pci_host_bridge_priv(bridge);
259 	rc->dev = dev;
260 
261 	pcie = &rc->pcie;
262 	pcie->is_rc = true;
263 
264 	rc->max_regions = 32;
265 	of_property_read_u32(np, "cdns,max-outbound-regions", &rc->max_regions);
266 
267 	rc->no_bar_nbits = 32;
268 	of_property_read_u32(np, "cdns,no-bar-match-nbits", &rc->no_bar_nbits);
269 
270 	rc->vendor_id = 0xffff;
271 	of_property_read_u16(np, "vendor-id", &rc->vendor_id);
272 
273 	rc->device_id = 0xffff;
274 	of_property_read_u16(np, "device-id", &rc->device_id);
275 
276 	type = of_get_property(np, "device_type", NULL);
277 	if (!type || strcmp(type, "pci")) {
278 		dev_err(dev, "invalid \"device_type\" %s\n", type);
279 		return -EINVAL;
280 	}
281 
282 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg");
283 	pcie->reg_base = devm_ioremap_resource(dev, res);
284 	if (IS_ERR(pcie->reg_base)) {
285 		dev_err(dev, "missing \"reg\"\n");
286 		return PTR_ERR(pcie->reg_base);
287 	}
288 
289 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
290 	rc->cfg_base = devm_pci_remap_cfg_resource(dev, res);
291 	if (IS_ERR(rc->cfg_base)) {
292 		dev_err(dev, "missing \"cfg\"\n");
293 		return PTR_ERR(rc->cfg_base);
294 	}
295 	rc->cfg_res = res;
296 
297 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem");
298 	if (!res) {
299 		dev_err(dev, "missing \"mem\"\n");
300 		return -EINVAL;
301 	}
302 	pcie->mem_res = res;
303 
304 	ret = cdns_pcie_init_phy(dev, pcie);
305 	if (ret) {
306 		dev_err(dev, "failed to init phy\n");
307 		return ret;
308 	}
309 	platform_set_drvdata(pdev, pcie);
310 
311 	pm_runtime_enable(dev);
312 	ret = pm_runtime_get_sync(dev);
313 	if (ret < 0) {
314 		dev_err(dev, "pm_runtime_get_sync() failed\n");
315 		goto err_get_sync;
316 	}
317 
318 	ret = cdns_pcie_host_init(dev, &resources, rc);
319 	if (ret)
320 		goto err_init;
321 
322 	list_splice_init(&resources, &bridge->windows);
323 	bridge->dev.parent = dev;
324 	bridge->busnr = pcie->bus;
325 	bridge->ops = &cdns_pcie_host_ops;
326 	bridge->map_irq = of_irq_parse_and_map_pci;
327 	bridge->swizzle_irq = pci_common_swizzle;
328 
329 	ret = pci_host_probe(bridge);
330 	if (ret < 0)
331 		goto err_host_probe;
332 
333 	return 0;
334 
335  err_host_probe:
336 	pci_free_resource_list(&resources);
337 
338  err_init:
339 	pm_runtime_put_sync(dev);
340 
341  err_get_sync:
342 	pm_runtime_disable(dev);
343 	cdns_pcie_disable_phy(pcie);
344 	phy_count = pcie->phy_count;
345 	while (phy_count--)
346 		device_link_del(pcie->link[phy_count]);
347 
348 	return ret;
349 }
350 
cdns_pcie_shutdown(struct platform_device * pdev)351 static void cdns_pcie_shutdown(struct platform_device *pdev)
352 {
353 	struct device *dev = &pdev->dev;
354 	struct cdns_pcie *pcie = dev_get_drvdata(dev);
355 	int ret;
356 
357 	ret = pm_runtime_put_sync(dev);
358 	if (ret < 0)
359 		dev_dbg(dev, "pm_runtime_put_sync failed\n");
360 
361 	pm_runtime_disable(dev);
362 	cdns_pcie_disable_phy(pcie);
363 }
364 
365 static struct platform_driver cdns_pcie_host_driver = {
366 	.driver = {
367 		.name = "cdns-pcie-host",
368 		.of_match_table = cdns_pcie_host_of_match,
369 		.pm	= &cdns_pcie_pm_ops,
370 	},
371 	.probe = cdns_pcie_host_probe,
372 	.shutdown = cdns_pcie_shutdown,
373 };
374 builtin_platform_driver(cdns_pcie_host_driver);
375