• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright 2014 IBM Corp.
4  */
5 
6 #include <linux/pci.h>
7 #include <misc/cxl.h>
8 #include "cxl.h"
9 
cxl_pci_probe_mode(struct pci_bus * bus)10 static int cxl_pci_probe_mode(struct pci_bus *bus)
11 {
12 	return PCI_PROBE_NORMAL;
13 }
14 
cxl_setup_msi_irqs(struct pci_dev * pdev,int nvec,int type)15 static int cxl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
16 {
17 	return -ENODEV;
18 }
19 
cxl_teardown_msi_irqs(struct pci_dev * pdev)20 static void cxl_teardown_msi_irqs(struct pci_dev *pdev)
21 {
22 	/*
23 	 * MSI should never be set but need still need to provide this call
24 	 * back.
25 	 */
26 }
27 
cxl_pci_enable_device_hook(struct pci_dev * dev)28 static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
29 {
30 	struct pci_controller *phb;
31 	struct cxl_afu *afu;
32 	struct cxl_context *ctx;
33 
34 	phb = pci_bus_to_host(dev->bus);
35 	afu = (struct cxl_afu *)phb->private_data;
36 
37 	if (!cxl_ops->link_ok(afu->adapter, afu)) {
38 		dev_warn(&dev->dev, "%s: Device link is down, refusing to enable AFU\n", __func__);
39 		return false;
40 	}
41 
42 	dev->dev.archdata.dma_offset = PAGE_OFFSET;
43 
44 	/*
45 	 * Allocate a context to do cxl things too.  If we eventually do real
46 	 * DMA ops, we'll need a default context to attach them to
47 	 */
48 	ctx = cxl_dev_context_init(dev);
49 	if (IS_ERR(ctx))
50 		return false;
51 	dev->dev.archdata.cxl_ctx = ctx;
52 
53 	return (cxl_ops->afu_check_and_enable(afu) == 0);
54 }
55 
cxl_pci_disable_device(struct pci_dev * dev)56 static void cxl_pci_disable_device(struct pci_dev *dev)
57 {
58 	struct cxl_context *ctx = cxl_get_context(dev);
59 
60 	if (ctx) {
61 		if (ctx->status == STARTED) {
62 			dev_err(&dev->dev, "Default context started\n");
63 			return;
64 		}
65 		dev->dev.archdata.cxl_ctx = NULL;
66 		cxl_release_context(ctx);
67 	}
68 }
69 
cxl_pci_window_alignment(struct pci_bus * bus,unsigned long type)70 static resource_size_t cxl_pci_window_alignment(struct pci_bus *bus,
71 						unsigned long type)
72 {
73 	return 1;
74 }
75 
cxl_pci_reset_secondary_bus(struct pci_dev * dev)76 static void cxl_pci_reset_secondary_bus(struct pci_dev *dev)
77 {
78 	/* Should we do an AFU reset here ? */
79 }
80 
cxl_pcie_cfg_record(u8 bus,u8 devfn)81 static int cxl_pcie_cfg_record(u8 bus, u8 devfn)
82 {
83 	return (bus << 8) + devfn;
84 }
85 
pci_bus_to_afu(struct pci_bus * bus)86 static inline struct cxl_afu *pci_bus_to_afu(struct pci_bus *bus)
87 {
88 	struct pci_controller *phb = bus ? pci_bus_to_host(bus) : NULL;
89 
90 	return phb ? phb->private_data : NULL;
91 }
92 
cxl_afu_configured_put(struct cxl_afu * afu)93 static void cxl_afu_configured_put(struct cxl_afu *afu)
94 {
95 	atomic_dec_if_positive(&afu->configured_state);
96 }
97 
cxl_afu_configured_get(struct cxl_afu * afu)98 static bool cxl_afu_configured_get(struct cxl_afu *afu)
99 {
100 	return atomic_inc_unless_negative(&afu->configured_state);
101 }
102 
cxl_pcie_config_info(struct pci_bus * bus,unsigned int devfn,struct cxl_afu * afu,int * _record)103 static inline int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn,
104 				       struct cxl_afu *afu, int *_record)
105 {
106 	int record;
107 
108 	record = cxl_pcie_cfg_record(bus->number, devfn);
109 	if (record > afu->crs_num)
110 		return PCIBIOS_DEVICE_NOT_FOUND;
111 
112 	*_record = record;
113 	return 0;
114 }
115 
cxl_pcie_read_config(struct pci_bus * bus,unsigned int devfn,int offset,int len,u32 * val)116 static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
117 				int offset, int len, u32 *val)
118 {
119 	int rc, record;
120 	struct cxl_afu *afu;
121 	u8 val8;
122 	u16 val16;
123 	u32 val32;
124 
125 	afu = pci_bus_to_afu(bus);
126 	/* Grab a reader lock on afu. */
127 	if (afu == NULL || !cxl_afu_configured_get(afu))
128 		return PCIBIOS_DEVICE_NOT_FOUND;
129 
130 	rc = cxl_pcie_config_info(bus, devfn, afu, &record);
131 	if (rc)
132 		goto out;
133 
134 	switch (len) {
135 	case 1:
136 		rc = cxl_ops->afu_cr_read8(afu, record, offset,	&val8);
137 		*val = val8;
138 		break;
139 	case 2:
140 		rc = cxl_ops->afu_cr_read16(afu, record, offset, &val16);
141 		*val = val16;
142 		break;
143 	case 4:
144 		rc = cxl_ops->afu_cr_read32(afu, record, offset, &val32);
145 		*val = val32;
146 		break;
147 	default:
148 		WARN_ON(1);
149 	}
150 
151 out:
152 	cxl_afu_configured_put(afu);
153 	return rc ? PCIBIOS_DEVICE_NOT_FOUND : 0;
154 }
155 
cxl_pcie_write_config(struct pci_bus * bus,unsigned int devfn,int offset,int len,u32 val)156 static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
157 				 int offset, int len, u32 val)
158 {
159 	int rc, record;
160 	struct cxl_afu *afu;
161 
162 	afu = pci_bus_to_afu(bus);
163 	/* Grab a reader lock on afu. */
164 	if (afu == NULL || !cxl_afu_configured_get(afu))
165 		return PCIBIOS_DEVICE_NOT_FOUND;
166 
167 	rc = cxl_pcie_config_info(bus, devfn, afu, &record);
168 	if (rc)
169 		goto out;
170 
171 	switch (len) {
172 	case 1:
173 		rc = cxl_ops->afu_cr_write8(afu, record, offset, val & 0xff);
174 		break;
175 	case 2:
176 		rc = cxl_ops->afu_cr_write16(afu, record, offset, val & 0xffff);
177 		break;
178 	case 4:
179 		rc = cxl_ops->afu_cr_write32(afu, record, offset, val);
180 		break;
181 	default:
182 		WARN_ON(1);
183 	}
184 
185 out:
186 	cxl_afu_configured_put(afu);
187 	return rc ? PCIBIOS_SET_FAILED : 0;
188 }
189 
190 static struct pci_ops cxl_pcie_pci_ops =
191 {
192 	.read = cxl_pcie_read_config,
193 	.write = cxl_pcie_write_config,
194 };
195 
196 
197 static struct pci_controller_ops cxl_pci_controller_ops =
198 {
199 	.probe_mode = cxl_pci_probe_mode,
200 	.enable_device_hook = cxl_pci_enable_device_hook,
201 	.disable_device = cxl_pci_disable_device,
202 	.release_device = cxl_pci_disable_device,
203 	.window_alignment = cxl_pci_window_alignment,
204 	.reset_secondary_bus = cxl_pci_reset_secondary_bus,
205 	.setup_msi_irqs = cxl_setup_msi_irqs,
206 	.teardown_msi_irqs = cxl_teardown_msi_irqs,
207 };
208 
cxl_pci_vphb_add(struct cxl_afu * afu)209 int cxl_pci_vphb_add(struct cxl_afu *afu)
210 {
211 	struct pci_controller *phb;
212 	struct device_node *vphb_dn;
213 	struct device *parent;
214 
215 	/*
216 	 * If there are no AFU configuration records we won't have anything to
217 	 * expose under the vPHB, so skip creating one, returning success since
218 	 * this is still a valid case. This will also opt us out of EEH
219 	 * handling since we won't have anything special to do if there are no
220 	 * kernel drivers attached to the vPHB, and EEH handling is not yet
221 	 * supported in the peer model.
222 	 */
223 	if (!afu->crs_num)
224 		return 0;
225 
226 	/* The parent device is the adapter. Reuse the device node of
227 	 * the adapter.
228 	 * We don't seem to care what device node is used for the vPHB,
229 	 * but tools such as lsvpd walk up the device parents looking
230 	 * for a valid location code, so we might as well show devices
231 	 * attached to the adapter as being located on that adapter.
232 	 */
233 	parent = afu->adapter->dev.parent;
234 	vphb_dn = parent->of_node;
235 
236 	/* Alloc and setup PHB data structure */
237 	phb = pcibios_alloc_controller(vphb_dn);
238 	if (!phb)
239 		return -ENODEV;
240 
241 	/* Setup parent in sysfs */
242 	phb->parent = parent;
243 
244 	/* Setup the PHB using arch provided callback */
245 	phb->ops = &cxl_pcie_pci_ops;
246 	phb->cfg_addr = NULL;
247 	phb->cfg_data = NULL;
248 	phb->private_data = afu;
249 	phb->controller_ops = cxl_pci_controller_ops;
250 
251 	/* Scan the bus */
252 	pcibios_scan_phb(phb);
253 	if (phb->bus == NULL)
254 		return -ENXIO;
255 
256 	/* Set release hook on root bus */
257 	pci_set_host_bridge_release(to_pci_host_bridge(phb->bus->bridge),
258 				    pcibios_free_controller_deferred,
259 				    (void *) phb);
260 
261 	/* Claim resources. This might need some rework as well depending
262 	 * whether we are doing probe-only or not, like assigning unassigned
263 	 * resources etc...
264 	 */
265 	pcibios_claim_one_bus(phb->bus);
266 
267 	/* Add probed PCI devices to the device model */
268 	pci_bus_add_devices(phb->bus);
269 
270 	afu->phb = phb;
271 
272 	return 0;
273 }
274 
cxl_pci_vphb_remove(struct cxl_afu * afu)275 void cxl_pci_vphb_remove(struct cxl_afu *afu)
276 {
277 	struct pci_controller *phb;
278 
279 	/* If there is no configuration record we won't have one of these */
280 	if (!afu || !afu->phb)
281 		return;
282 
283 	phb = afu->phb;
284 	afu->phb = NULL;
285 
286 	pci_remove_root_bus(phb->bus);
287 	/*
288 	 * We don't free phb here - that's handled by
289 	 * pcibios_free_controller_deferred()
290 	 */
291 }
292 
cxl_pci_is_vphb_device(struct pci_dev * dev)293 bool cxl_pci_is_vphb_device(struct pci_dev *dev)
294 {
295 	struct pci_controller *phb;
296 
297 	phb = pci_bus_to_host(dev->bus);
298 
299 	return (phb->ops == &cxl_pcie_pci_ops);
300 }
301 
cxl_pci_to_afu(struct pci_dev * dev)302 struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev)
303 {
304 	struct pci_controller *phb;
305 
306 	phb = pci_bus_to_host(dev->bus);
307 
308 	return (struct cxl_afu *)phb->private_data;
309 }
310 EXPORT_SYMBOL_GPL(cxl_pci_to_afu);
311 
cxl_pci_to_cfg_record(struct pci_dev * dev)312 unsigned int cxl_pci_to_cfg_record(struct pci_dev *dev)
313 {
314 	return cxl_pcie_cfg_record(dev->bus->number, dev->devfn);
315 }
316 EXPORT_SYMBOL_GPL(cxl_pci_to_cfg_record);
317