• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2014 IBM Corp.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version
7  * 2 of the License, or (at your option) any later version.
8  */
9 
10 #include <linux/pci.h>
11 #include <misc/cxl.h>
12 #include "cxl.h"
13 
cxl_dma_set_mask(struct pci_dev * pdev,u64 dma_mask)14 static int cxl_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
15 {
16 	if (dma_mask < DMA_BIT_MASK(64)) {
17 		pr_info("%s only 64bit DMA supported on CXL", __func__);
18 		return -EIO;
19 	}
20 
21 	*(pdev->dev.dma_mask) = dma_mask;
22 	return 0;
23 }
24 
cxl_pci_probe_mode(struct pci_bus * bus)25 static int cxl_pci_probe_mode(struct pci_bus *bus)
26 {
27 	return PCI_PROBE_NORMAL;
28 }
29 
cxl_setup_msi_irqs(struct pci_dev * pdev,int nvec,int type)30 static int cxl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
31 {
32 	return -ENODEV;
33 }
34 
cxl_teardown_msi_irqs(struct pci_dev * pdev)35 static void cxl_teardown_msi_irqs(struct pci_dev *pdev)
36 {
37 	/*
38 	 * MSI should never be set but need still need to provide this call
39 	 * back.
40 	 */
41 }
42 
cxl_pci_enable_device_hook(struct pci_dev * dev)43 static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
44 {
45 	struct pci_controller *phb;
46 	struct cxl_afu *afu;
47 	struct cxl_context *ctx;
48 
49 	phb = pci_bus_to_host(dev->bus);
50 	afu = (struct cxl_afu *)phb->private_data;
51 
52 	if (!cxl_adapter_link_ok(afu->adapter)) {
53 		dev_warn(&dev->dev, "%s: Device link is down, refusing to enable AFU\n", __func__);
54 		return false;
55 	}
56 
57 	set_dma_ops(&dev->dev, &dma_direct_ops);
58 	set_dma_offset(&dev->dev, PAGE_OFFSET);
59 
60 	/*
61 	 * Allocate a context to do cxl things too.  If we eventually do real
62 	 * DMA ops, we'll need a default context to attach them to
63 	 */
64 	ctx = cxl_dev_context_init(dev);
65 	if (!ctx)
66 		return false;
67 	dev->dev.archdata.cxl_ctx = ctx;
68 
69 	return (cxl_afu_check_and_enable(afu) == 0);
70 }
71 
cxl_pci_disable_device(struct pci_dev * dev)72 static void cxl_pci_disable_device(struct pci_dev *dev)
73 {
74 	struct cxl_context *ctx = cxl_get_context(dev);
75 
76 	if (ctx) {
77 		if (ctx->status == STARTED) {
78 			dev_err(&dev->dev, "Default context started\n");
79 			return;
80 		}
81 		dev->dev.archdata.cxl_ctx = NULL;
82 		cxl_release_context(ctx);
83 	}
84 }
85 
cxl_pci_window_alignment(struct pci_bus * bus,unsigned long type)86 static resource_size_t cxl_pci_window_alignment(struct pci_bus *bus,
87 						unsigned long type)
88 {
89 	return 1;
90 }
91 
cxl_pci_reset_secondary_bus(struct pci_dev * dev)92 static void cxl_pci_reset_secondary_bus(struct pci_dev *dev)
93 {
94 	/* Should we do an AFU reset here ? */
95 }
96 
cxl_pcie_cfg_record(u8 bus,u8 devfn)97 static int cxl_pcie_cfg_record(u8 bus, u8 devfn)
98 {
99 	return (bus << 8) + devfn;
100 }
101 
cxl_pcie_cfg_addr(struct pci_controller * phb,u8 bus,u8 devfn,int offset)102 static unsigned long cxl_pcie_cfg_addr(struct pci_controller* phb,
103 				       u8 bus, u8 devfn, int offset)
104 {
105 	int record = cxl_pcie_cfg_record(bus, devfn);
106 
107 	return (unsigned long)phb->cfg_addr + ((unsigned long)phb->cfg_data * record) + offset;
108 }
109 
110 
cxl_pcie_config_info(struct pci_bus * bus,unsigned int devfn,int offset,int len,volatile void __iomem ** ioaddr,u32 * mask,int * shift)111 static int cxl_pcie_config_info(struct pci_bus *bus, unsigned int devfn,
112 				int offset, int len,
113 				volatile void __iomem **ioaddr,
114 				u32 *mask, int *shift)
115 {
116 	struct pci_controller *phb;
117 	struct cxl_afu *afu;
118 	unsigned long addr;
119 
120 	phb = pci_bus_to_host(bus);
121 	if (phb == NULL)
122 		return PCIBIOS_DEVICE_NOT_FOUND;
123 	afu = (struct cxl_afu *)phb->private_data;
124 
125 	if (cxl_pcie_cfg_record(bus->number, devfn) > afu->crs_num)
126 		return PCIBIOS_DEVICE_NOT_FOUND;
127 	if (offset >= (unsigned long)phb->cfg_data)
128 		return PCIBIOS_BAD_REGISTER_NUMBER;
129 	addr = cxl_pcie_cfg_addr(phb, bus->number, devfn, offset);
130 
131 	*ioaddr = (void *)(addr & ~0x3ULL);
132 	*shift = ((addr & 0x3) * 8);
133 	switch (len) {
134 	case 1:
135 		*mask = 0xff;
136 		break;
137 	case 2:
138 		*mask = 0xffff;
139 		break;
140 	default:
141 		*mask = 0xffffffff;
142 		break;
143 	}
144 	return 0;
145 }
146 
147 
cxl_config_link_ok(struct pci_bus * bus)148 static inline bool cxl_config_link_ok(struct pci_bus *bus)
149 {
150 	struct pci_controller *phb;
151 	struct cxl_afu *afu;
152 
153 	/* Config space IO is based on phb->cfg_addr, which is based on
154 	 * afu_desc_mmio. This isn't safe to read/write when the link
155 	 * goes down, as EEH tears down MMIO space.
156 	 *
157 	 * Check if the link is OK before proceeding.
158 	 */
159 
160 	phb = pci_bus_to_host(bus);
161 	if (phb == NULL)
162 		return false;
163 	afu = (struct cxl_afu *)phb->private_data;
164 	return cxl_adapter_link_ok(afu->adapter);
165 }
166 
cxl_pcie_read_config(struct pci_bus * bus,unsigned int devfn,int offset,int len,u32 * val)167 static int cxl_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
168 				int offset, int len, u32 *val)
169 {
170 	volatile void __iomem *ioaddr;
171 	int shift, rc;
172 	u32 mask;
173 
174 	rc = cxl_pcie_config_info(bus, devfn, offset, len, &ioaddr,
175 				  &mask, &shift);
176 	if (rc)
177 		return rc;
178 
179 	if (!cxl_config_link_ok(bus))
180 		return PCIBIOS_DEVICE_NOT_FOUND;
181 
182 	/* Can only read 32 bits */
183 	*val = (in_le32(ioaddr) >> shift) & mask;
184 	return PCIBIOS_SUCCESSFUL;
185 }
186 
cxl_pcie_write_config(struct pci_bus * bus,unsigned int devfn,int offset,int len,u32 val)187 static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
188 				 int offset, int len, u32 val)
189 {
190 	volatile void __iomem *ioaddr;
191 	u32 v, mask;
192 	int shift, rc;
193 
194 	rc = cxl_pcie_config_info(bus, devfn, offset, len, &ioaddr,
195 				  &mask, &shift);
196 	if (rc)
197 		return rc;
198 
199 	if (!cxl_config_link_ok(bus))
200 		return PCIBIOS_DEVICE_NOT_FOUND;
201 
202 	/* Can only write 32 bits so do read-modify-write */
203 	mask <<= shift;
204 	val <<= shift;
205 
206 	v = (in_le32(ioaddr) & ~mask) | (val & mask);
207 
208 	out_le32(ioaddr, v);
209 	return PCIBIOS_SUCCESSFUL;
210 }
211 
212 static struct pci_ops cxl_pcie_pci_ops =
213 {
214 	.read = cxl_pcie_read_config,
215 	.write = cxl_pcie_write_config,
216 };
217 
218 
219 static struct pci_controller_ops cxl_pci_controller_ops =
220 {
221 	.probe_mode = cxl_pci_probe_mode,
222 	.enable_device_hook = cxl_pci_enable_device_hook,
223 	.disable_device = cxl_pci_disable_device,
224 	.release_device = cxl_pci_disable_device,
225 	.window_alignment = cxl_pci_window_alignment,
226 	.reset_secondary_bus = cxl_pci_reset_secondary_bus,
227 	.setup_msi_irqs = cxl_setup_msi_irqs,
228 	.teardown_msi_irqs = cxl_teardown_msi_irqs,
229 	.dma_set_mask = cxl_dma_set_mask,
230 };
231 
cxl_pci_vphb_add(struct cxl_afu * afu)232 int cxl_pci_vphb_add(struct cxl_afu *afu)
233 {
234 	struct pci_dev *phys_dev;
235 	struct pci_controller *phb, *phys_phb;
236 
237 	phys_dev = to_pci_dev(afu->adapter->dev.parent);
238 	phys_phb = pci_bus_to_host(phys_dev->bus);
239 
240 	/* Alloc and setup PHB data structure */
241 	phb = pcibios_alloc_controller(phys_phb->dn);
242 
243 	if (!phb)
244 		return -ENODEV;
245 
246 	/* Setup parent in sysfs */
247 	phb->parent = &phys_dev->dev;
248 
249 	/* Setup the PHB using arch provided callback */
250 	phb->ops = &cxl_pcie_pci_ops;
251 	phb->cfg_addr = afu->afu_desc_mmio + afu->crs_offset;
252 	phb->cfg_data = (void *)(u64)afu->crs_len;
253 	phb->private_data = afu;
254 	phb->controller_ops = cxl_pci_controller_ops;
255 
256 	/* Scan the bus */
257 	pcibios_scan_phb(phb);
258 	if (phb->bus == NULL)
259 		return -ENXIO;
260 
261 	/* Claim resources. This might need some rework as well depending
262 	 * whether we are doing probe-only or not, like assigning unassigned
263 	 * resources etc...
264 	 */
265 	pcibios_claim_one_bus(phb->bus);
266 
267 	/* Add probed PCI devices to the device model */
268 	pci_bus_add_devices(phb->bus);
269 
270 	afu->phb = phb;
271 
272 	return 0;
273 }
274 
cxl_pci_vphb_reconfigure(struct cxl_afu * afu)275 void cxl_pci_vphb_reconfigure(struct cxl_afu *afu)
276 {
277 	/* When we are reconfigured, the AFU's MMIO space is unmapped
278 	 * and remapped. We need to reflect this in the PHB's view of
279 	 * the world.
280 	 */
281 	afu->phb->cfg_addr = afu->afu_desc_mmio + afu->crs_offset;
282 }
283 
cxl_pci_vphb_remove(struct cxl_afu * afu)284 void cxl_pci_vphb_remove(struct cxl_afu *afu)
285 {
286 	struct pci_controller *phb;
287 
288 	/* If there is no configuration record we won't have one of these */
289 	if (!afu || !afu->phb)
290 		return;
291 
292 	phb = afu->phb;
293 	afu->phb = NULL;
294 
295 	pci_remove_root_bus(phb->bus);
296 	pcibios_free_controller(phb);
297 }
298 
cxl_pci_to_afu(struct pci_dev * dev)299 struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev)
300 {
301 	struct pci_controller *phb;
302 
303 	phb = pci_bus_to_host(dev->bus);
304 
305 	return (struct cxl_afu *)phb->private_data;
306 }
307 EXPORT_SYMBOL_GPL(cxl_pci_to_afu);
308 
cxl_pci_to_cfg_record(struct pci_dev * dev)309 unsigned int cxl_pci_to_cfg_record(struct pci_dev *dev)
310 {
311 	return cxl_pcie_cfg_record(dev->bus->number, dev->devfn);
312 }
313 EXPORT_SYMBOL_GPL(cxl_pci_to_cfg_record);
314