• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright IBM Corp. 2012
4  *
5  * Author(s):
6  *   Jan Glauber <jang@linux.vnet.ibm.com>
7  *
8  * The System z PCI code is a rewrite from a prototype by
9  * the following people (Kudoz!):
10  *   Alexander Schmidt
11  *   Christoph Raisch
12  *   Hannes Hering
13  *   Hoang-Nam Nguyen
14  *   Jan-Bernd Themann
15  *   Stefan Roscher
16  *   Thomas Klein
17  */
18 
19 #define KMSG_COMPONENT "zpci"
20 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
21 
22 #include <linux/kernel.h>
23 #include <linux/slab.h>
24 #include <linux/err.h>
25 #include <linux/export.h>
26 #include <linux/delay.h>
27 #include <linux/seq_file.h>
28 #include <linux/jump_label.h>
29 #include <linux/pci.h>
30 #include <linux/printk.h>
31 
32 #include <asm/isc.h>
33 #include <asm/airq.h>
34 #include <asm/facility.h>
35 #include <asm/pci_insn.h>
36 #include <asm/pci_clp.h>
37 #include <asm/pci_dma.h>
38 
39 #include "pci_bus.h"
40 #include "pci_iov.h"
41 
42 /* list of all detected zpci devices */
43 static LIST_HEAD(zpci_list);
44 static DEFINE_SPINLOCK(zpci_list_lock);
45 
46 static DECLARE_BITMAP(zpci_domain, ZPCI_DOMAIN_BITMAP_SIZE);
47 static DEFINE_SPINLOCK(zpci_domain_lock);
48 
49 #define ZPCI_IOMAP_ENTRIES						\
50 	min(((unsigned long) ZPCI_NR_DEVICES * PCI_STD_NUM_BARS / 2),	\
51 	    ZPCI_IOMAP_MAX_ENTRIES)
52 
53 unsigned int s390_pci_no_rid;
54 
55 static DEFINE_SPINLOCK(zpci_iomap_lock);
56 static unsigned long *zpci_iomap_bitmap;
57 struct zpci_iomap_entry *zpci_iomap_start;
58 EXPORT_SYMBOL_GPL(zpci_iomap_start);
59 
60 DEFINE_STATIC_KEY_FALSE(have_mio);
61 
62 static struct kmem_cache *zdev_fmb_cache;
63 
get_zdev_by_fid(u32 fid)64 struct zpci_dev *get_zdev_by_fid(u32 fid)
65 {
66 	struct zpci_dev *tmp, *zdev = NULL;
67 
68 	spin_lock(&zpci_list_lock);
69 	list_for_each_entry(tmp, &zpci_list, entry) {
70 		if (tmp->fid == fid) {
71 			zdev = tmp;
72 			zpci_zdev_get(zdev);
73 			break;
74 		}
75 	}
76 	spin_unlock(&zpci_list_lock);
77 	return zdev;
78 }
79 
zpci_remove_reserved_devices(void)80 void zpci_remove_reserved_devices(void)
81 {
82 	struct zpci_dev *tmp, *zdev;
83 	enum zpci_state state;
84 	LIST_HEAD(remove);
85 
86 	spin_lock(&zpci_list_lock);
87 	list_for_each_entry_safe(zdev, tmp, &zpci_list, entry) {
88 		if (zdev->state == ZPCI_FN_STATE_STANDBY &&
89 		    !clp_get_state(zdev->fid, &state) &&
90 		    state == ZPCI_FN_STATE_RESERVED)
91 			list_move_tail(&zdev->entry, &remove);
92 	}
93 	spin_unlock(&zpci_list_lock);
94 
95 	list_for_each_entry_safe(zdev, tmp, &remove, entry)
96 		zpci_device_reserved(zdev);
97 }
98 
pci_domain_nr(struct pci_bus * bus)99 int pci_domain_nr(struct pci_bus *bus)
100 {
101 	return ((struct zpci_bus *) bus->sysdata)->domain_nr;
102 }
103 EXPORT_SYMBOL_GPL(pci_domain_nr);
104 
pci_proc_domain(struct pci_bus * bus)105 int pci_proc_domain(struct pci_bus *bus)
106 {
107 	return pci_domain_nr(bus);
108 }
109 EXPORT_SYMBOL_GPL(pci_proc_domain);
110 
111 /* Modify PCI: Register I/O address translation parameters */
zpci_register_ioat(struct zpci_dev * zdev,u8 dmaas,u64 base,u64 limit,u64 iota)112 int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
113 		       u64 base, u64 limit, u64 iota)
114 {
115 	u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT);
116 	struct zpci_fib fib = {0};
117 	u8 cc, status;
118 
119 	WARN_ON_ONCE(iota & 0x3fff);
120 	fib.pba = base;
121 	fib.pal = limit;
122 	fib.iota = iota | ZPCI_IOTA_RTTO_FLAG;
123 	cc = zpci_mod_fc(req, &fib, &status);
124 	if (cc)
125 		zpci_dbg(3, "reg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, status);
126 	return cc;
127 }
128 
129 /* Modify PCI: Unregister I/O address translation parameters */
zpci_unregister_ioat(struct zpci_dev * zdev,u8 dmaas)130 int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
131 {
132 	u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_DEREG_IOAT);
133 	struct zpci_fib fib = {0};
134 	u8 cc, status;
135 
136 	cc = zpci_mod_fc(req, &fib, &status);
137 	if (cc)
138 		zpci_dbg(3, "unreg ioat fid:%x, cc:%d, status:%d\n", zdev->fid, cc, status);
139 	return cc;
140 }
141 
142 /* Modify PCI: Set PCI function measurement parameters */
zpci_fmb_enable_device(struct zpci_dev * zdev)143 int zpci_fmb_enable_device(struct zpci_dev *zdev)
144 {
145 	u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
146 	struct zpci_fib fib = {0};
147 	u8 cc, status;
148 
149 	if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length)
150 		return -EINVAL;
151 
152 	zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
153 	if (!zdev->fmb)
154 		return -ENOMEM;
155 	WARN_ON((u64) zdev->fmb & 0xf);
156 
157 	/* reset software counters */
158 	atomic64_set(&zdev->allocated_pages, 0);
159 	atomic64_set(&zdev->mapped_pages, 0);
160 	atomic64_set(&zdev->unmapped_pages, 0);
161 
162 	fib.fmb_addr = virt_to_phys(zdev->fmb);
163 	cc = zpci_mod_fc(req, &fib, &status);
164 	if (cc) {
165 		kmem_cache_free(zdev_fmb_cache, zdev->fmb);
166 		zdev->fmb = NULL;
167 	}
168 	return cc ? -EIO : 0;
169 }
170 
171 /* Modify PCI: Disable PCI function measurement */
zpci_fmb_disable_device(struct zpci_dev * zdev)172 int zpci_fmb_disable_device(struct zpci_dev *zdev)
173 {
174 	u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
175 	struct zpci_fib fib = {0};
176 	u8 cc, status;
177 
178 	if (!zdev->fmb)
179 		return -EINVAL;
180 
181 	/* Function measurement is disabled if fmb address is zero */
182 	cc = zpci_mod_fc(req, &fib, &status);
183 	if (cc == 3) /* Function already gone. */
184 		cc = 0;
185 
186 	if (!cc) {
187 		kmem_cache_free(zdev_fmb_cache, zdev->fmb);
188 		zdev->fmb = NULL;
189 	}
190 	return cc ? -EIO : 0;
191 }
192 
zpci_cfg_load(struct zpci_dev * zdev,int offset,u32 * val,u8 len)193 static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
194 {
195 	u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
196 	u64 data;
197 	int rc;
198 
199 	rc = __zpci_load(&data, req, offset);
200 	if (!rc) {
201 		data = le64_to_cpu((__force __le64) data);
202 		data >>= (8 - len) * 8;
203 		*val = (u32) data;
204 	} else
205 		*val = 0xffffffff;
206 	return rc;
207 }
208 
zpci_cfg_store(struct zpci_dev * zdev,int offset,u32 val,u8 len)209 static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
210 {
211 	u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
212 	u64 data = val;
213 	int rc;
214 
215 	data <<= (8 - len) * 8;
216 	data = (__force u64) cpu_to_le64(data);
217 	rc = __zpci_store(data, req, offset);
218 	return rc;
219 }
220 
pcibios_align_resource(void * data,const struct resource * res,resource_size_t size,resource_size_t align)221 resource_size_t pcibios_align_resource(void *data, const struct resource *res,
222 				       resource_size_t size,
223 				       resource_size_t align)
224 {
225 	return 0;
226 }
227 
228 /* combine single writes by using store-block insn */
__iowrite64_copy(void __iomem * to,const void * from,size_t count)229 void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
230 {
231        zpci_memcpy_toio(to, from, count);
232 }
233 
__ioremap(phys_addr_t addr,size_t size,pgprot_t prot)234 static void __iomem *__ioremap(phys_addr_t addr, size_t size, pgprot_t prot)
235 {
236 	unsigned long offset, vaddr;
237 	struct vm_struct *area;
238 	phys_addr_t last_addr;
239 
240 	last_addr = addr + size - 1;
241 	if (!size || last_addr < addr)
242 		return NULL;
243 
244 	if (!static_branch_unlikely(&have_mio))
245 		return (void __iomem *) addr;
246 
247 	offset = addr & ~PAGE_MASK;
248 	addr &= PAGE_MASK;
249 	size = PAGE_ALIGN(size + offset);
250 	area = get_vm_area(size, VM_IOREMAP);
251 	if (!area)
252 		return NULL;
253 
254 	vaddr = (unsigned long) area->addr;
255 	if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) {
256 		free_vm_area(area);
257 		return NULL;
258 	}
259 	return (void __iomem *) ((unsigned long) area->addr + offset);
260 }
261 
ioremap_prot(phys_addr_t addr,size_t size,unsigned long prot)262 void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
263 {
264 	return __ioremap(addr, size, __pgprot(prot));
265 }
266 EXPORT_SYMBOL(ioremap_prot);
267 
ioremap(phys_addr_t addr,size_t size)268 void __iomem *ioremap(phys_addr_t addr, size_t size)
269 {
270 	return __ioremap(addr, size, PAGE_KERNEL);
271 }
272 EXPORT_SYMBOL(ioremap);
273 
ioremap_wc(phys_addr_t addr,size_t size)274 void __iomem *ioremap_wc(phys_addr_t addr, size_t size)
275 {
276 	return __ioremap(addr, size, pgprot_writecombine(PAGE_KERNEL));
277 }
278 EXPORT_SYMBOL(ioremap_wc);
279 
ioremap_wt(phys_addr_t addr,size_t size)280 void __iomem *ioremap_wt(phys_addr_t addr, size_t size)
281 {
282 	return __ioremap(addr, size, pgprot_writethrough(PAGE_KERNEL));
283 }
284 EXPORT_SYMBOL(ioremap_wt);
285 
iounmap(volatile void __iomem * addr)286 void iounmap(volatile void __iomem *addr)
287 {
288 	if (static_branch_likely(&have_mio))
289 		vunmap((__force void *) ((unsigned long) addr & PAGE_MASK));
290 }
291 EXPORT_SYMBOL(iounmap);
292 
293 /* Create a virtual mapping cookie for a PCI BAR */
pci_iomap_range_fh(struct pci_dev * pdev,int bar,unsigned long offset,unsigned long max)294 static void __iomem *pci_iomap_range_fh(struct pci_dev *pdev, int bar,
295 					unsigned long offset, unsigned long max)
296 {
297 	struct zpci_dev *zdev =	to_zpci(pdev);
298 	int idx;
299 
300 	idx = zdev->bars[bar].map_idx;
301 	spin_lock(&zpci_iomap_lock);
302 	/* Detect overrun */
303 	WARN_ON(!++zpci_iomap_start[idx].count);
304 	zpci_iomap_start[idx].fh = zdev->fh;
305 	zpci_iomap_start[idx].bar = bar;
306 	spin_unlock(&zpci_iomap_lock);
307 
308 	return (void __iomem *) ZPCI_ADDR(idx) + offset;
309 }
310 
pci_iomap_range_mio(struct pci_dev * pdev,int bar,unsigned long offset,unsigned long max)311 static void __iomem *pci_iomap_range_mio(struct pci_dev *pdev, int bar,
312 					 unsigned long offset,
313 					 unsigned long max)
314 {
315 	unsigned long barsize = pci_resource_len(pdev, bar);
316 	struct zpci_dev *zdev = to_zpci(pdev);
317 	void __iomem *iova;
318 
319 	iova = ioremap((unsigned long) zdev->bars[bar].mio_wt, barsize);
320 	return iova ? iova + offset : iova;
321 }
322 
pci_iomap_range(struct pci_dev * pdev,int bar,unsigned long offset,unsigned long max)323 void __iomem *pci_iomap_range(struct pci_dev *pdev, int bar,
324 			      unsigned long offset, unsigned long max)
325 {
326 	if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
327 		return NULL;
328 
329 	if (static_branch_likely(&have_mio))
330 		return pci_iomap_range_mio(pdev, bar, offset, max);
331 	else
332 		return pci_iomap_range_fh(pdev, bar, offset, max);
333 }
334 EXPORT_SYMBOL(pci_iomap_range);
335 
pci_iomap(struct pci_dev * dev,int bar,unsigned long maxlen)336 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
337 {
338 	return pci_iomap_range(dev, bar, 0, maxlen);
339 }
340 EXPORT_SYMBOL(pci_iomap);
341 
pci_iomap_wc_range_mio(struct pci_dev * pdev,int bar,unsigned long offset,unsigned long max)342 static void __iomem *pci_iomap_wc_range_mio(struct pci_dev *pdev, int bar,
343 					    unsigned long offset, unsigned long max)
344 {
345 	unsigned long barsize = pci_resource_len(pdev, bar);
346 	struct zpci_dev *zdev = to_zpci(pdev);
347 	void __iomem *iova;
348 
349 	iova = ioremap((unsigned long) zdev->bars[bar].mio_wb, barsize);
350 	return iova ? iova + offset : iova;
351 }
352 
pci_iomap_wc_range(struct pci_dev * pdev,int bar,unsigned long offset,unsigned long max)353 void __iomem *pci_iomap_wc_range(struct pci_dev *pdev, int bar,
354 				 unsigned long offset, unsigned long max)
355 {
356 	if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
357 		return NULL;
358 
359 	if (static_branch_likely(&have_mio))
360 		return pci_iomap_wc_range_mio(pdev, bar, offset, max);
361 	else
362 		return pci_iomap_range_fh(pdev, bar, offset, max);
363 }
364 EXPORT_SYMBOL(pci_iomap_wc_range);
365 
pci_iomap_wc(struct pci_dev * dev,int bar,unsigned long maxlen)366 void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
367 {
368 	return pci_iomap_wc_range(dev, bar, 0, maxlen);
369 }
370 EXPORT_SYMBOL(pci_iomap_wc);
371 
pci_iounmap_fh(struct pci_dev * pdev,void __iomem * addr)372 static void pci_iounmap_fh(struct pci_dev *pdev, void __iomem *addr)
373 {
374 	unsigned int idx = ZPCI_IDX(addr);
375 
376 	spin_lock(&zpci_iomap_lock);
377 	/* Detect underrun */
378 	WARN_ON(!zpci_iomap_start[idx].count);
379 	if (!--zpci_iomap_start[idx].count) {
380 		zpci_iomap_start[idx].fh = 0;
381 		zpci_iomap_start[idx].bar = 0;
382 	}
383 	spin_unlock(&zpci_iomap_lock);
384 }
385 
pci_iounmap_mio(struct pci_dev * pdev,void __iomem * addr)386 static void pci_iounmap_mio(struct pci_dev *pdev, void __iomem *addr)
387 {
388 	iounmap(addr);
389 }
390 
pci_iounmap(struct pci_dev * pdev,void __iomem * addr)391 void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
392 {
393 	if (static_branch_likely(&have_mio))
394 		pci_iounmap_mio(pdev, addr);
395 	else
396 		pci_iounmap_fh(pdev, addr);
397 }
398 EXPORT_SYMBOL(pci_iounmap);
399 
pci_read(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)400 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
401 		    int size, u32 *val)
402 {
403 	struct zpci_dev *zdev = get_zdev_by_bus(bus, devfn);
404 
405 	return (zdev) ? zpci_cfg_load(zdev, where, val, size) : -ENODEV;
406 }
407 
pci_write(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)408 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
409 		     int size, u32 val)
410 {
411 	struct zpci_dev *zdev = get_zdev_by_bus(bus, devfn);
412 
413 	return (zdev) ? zpci_cfg_store(zdev, where, val, size) : -ENODEV;
414 }
415 
416 static struct pci_ops pci_root_ops = {
417 	.read = pci_read,
418 	.write = pci_write,
419 };
420 
zpci_map_resources(struct pci_dev * pdev)421 static void zpci_map_resources(struct pci_dev *pdev)
422 {
423 	struct zpci_dev *zdev = to_zpci(pdev);
424 	resource_size_t len;
425 	int i;
426 
427 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
428 		len = pci_resource_len(pdev, i);
429 		if (!len)
430 			continue;
431 
432 		if (zpci_use_mio(zdev))
433 			pdev->resource[i].start =
434 				(resource_size_t __force) zdev->bars[i].mio_wt;
435 		else
436 			pdev->resource[i].start = (resource_size_t __force)
437 				pci_iomap_range_fh(pdev, i, 0, 0);
438 		pdev->resource[i].end = pdev->resource[i].start + len - 1;
439 	}
440 
441 	zpci_iov_map_resources(pdev);
442 }
443 
zpci_unmap_resources(struct pci_dev * pdev)444 static void zpci_unmap_resources(struct pci_dev *pdev)
445 {
446 	struct zpci_dev *zdev = to_zpci(pdev);
447 	resource_size_t len;
448 	int i;
449 
450 	if (zpci_use_mio(zdev))
451 		return;
452 
453 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
454 		len = pci_resource_len(pdev, i);
455 		if (!len)
456 			continue;
457 		pci_iounmap_fh(pdev, (void __iomem __force *)
458 			       pdev->resource[i].start);
459 	}
460 }
461 
zpci_alloc_iomap(struct zpci_dev * zdev)462 static int zpci_alloc_iomap(struct zpci_dev *zdev)
463 {
464 	unsigned long entry;
465 
466 	spin_lock(&zpci_iomap_lock);
467 	entry = find_first_zero_bit(zpci_iomap_bitmap, ZPCI_IOMAP_ENTRIES);
468 	if (entry == ZPCI_IOMAP_ENTRIES) {
469 		spin_unlock(&zpci_iomap_lock);
470 		return -ENOSPC;
471 	}
472 	set_bit(entry, zpci_iomap_bitmap);
473 	spin_unlock(&zpci_iomap_lock);
474 	return entry;
475 }
476 
zpci_free_iomap(struct zpci_dev * zdev,int entry)477 static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
478 {
479 	spin_lock(&zpci_iomap_lock);
480 	memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
481 	clear_bit(entry, zpci_iomap_bitmap);
482 	spin_unlock(&zpci_iomap_lock);
483 }
484 
__alloc_res(struct zpci_dev * zdev,unsigned long start,unsigned long size,unsigned long flags)485 static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
486 				    unsigned long size, unsigned long flags)
487 {
488 	struct resource *r;
489 
490 	r = kzalloc(sizeof(*r), GFP_KERNEL);
491 	if (!r)
492 		return NULL;
493 
494 	r->start = start;
495 	r->end = r->start + size - 1;
496 	r->flags = flags;
497 	r->name = zdev->res_name;
498 
499 	if (request_resource(&iomem_resource, r)) {
500 		kfree(r);
501 		return NULL;
502 	}
503 	return r;
504 }
505 
zpci_setup_bus_resources(struct zpci_dev * zdev)506 int zpci_setup_bus_resources(struct zpci_dev *zdev)
507 {
508 	unsigned long addr, size, flags;
509 	struct resource *res;
510 	int i, entry;
511 
512 	snprintf(zdev->res_name, sizeof(zdev->res_name),
513 		 "PCI Bus %04x:%02x", zdev->uid, ZPCI_BUS_NR);
514 
515 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
516 		if (!zdev->bars[i].size)
517 			continue;
518 		entry = zpci_alloc_iomap(zdev);
519 		if (entry < 0)
520 			return entry;
521 		zdev->bars[i].map_idx = entry;
522 
523 		/* only MMIO is supported */
524 		flags = IORESOURCE_MEM;
525 		if (zdev->bars[i].val & 8)
526 			flags |= IORESOURCE_PREFETCH;
527 		if (zdev->bars[i].val & 4)
528 			flags |= IORESOURCE_MEM_64;
529 
530 		if (zpci_use_mio(zdev))
531 			addr = (unsigned long) zdev->bars[i].mio_wt;
532 		else
533 			addr = ZPCI_ADDR(entry);
534 		size = 1UL << zdev->bars[i].size;
535 
536 		res = __alloc_res(zdev, addr, size, flags);
537 		if (!res) {
538 			zpci_free_iomap(zdev, entry);
539 			return -ENOMEM;
540 		}
541 		zdev->bars[i].res = res;
542 	}
543 	zdev->has_resources = 1;
544 
545 	return 0;
546 }
547 
zpci_cleanup_bus_resources(struct zpci_dev * zdev)548 static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
549 {
550 	struct resource *res;
551 	int i;
552 
553 	pci_lock_rescan_remove();
554 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
555 		res = zdev->bars[i].res;
556 		if (!res)
557 			continue;
558 
559 		release_resource(res);
560 		pci_bus_remove_resource(zdev->zbus->bus, res);
561 		zpci_free_iomap(zdev, zdev->bars[i].map_idx);
562 		zdev->bars[i].res = NULL;
563 		kfree(res);
564 	}
565 	zdev->has_resources = 0;
566 	pci_unlock_rescan_remove();
567 }
568 
pcibios_add_device(struct pci_dev * pdev)569 int pcibios_add_device(struct pci_dev *pdev)
570 {
571 	struct zpci_dev *zdev = to_zpci(pdev);
572 	struct resource *res;
573 	int i;
574 
575 	/* The pdev has a reference to the zdev via its bus */
576 	zpci_zdev_get(zdev);
577 	if (pdev->is_physfn)
578 		pdev->no_vf_scan = 1;
579 
580 	pdev->dev.groups = zpci_attr_groups;
581 	pdev->dev.dma_ops = &s390_pci_dma_ops;
582 	zpci_map_resources(pdev);
583 
584 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
585 		res = &pdev->resource[i];
586 		if (res->parent || !res->flags)
587 			continue;
588 		pci_claim_resource(pdev, i);
589 	}
590 
591 	return 0;
592 }
593 
pcibios_release_device(struct pci_dev * pdev)594 void pcibios_release_device(struct pci_dev *pdev)
595 {
596 	struct zpci_dev *zdev = to_zpci(pdev);
597 
598 	zpci_unmap_resources(pdev);
599 	zpci_zdev_put(zdev);
600 }
601 
pcibios_enable_device(struct pci_dev * pdev,int mask)602 int pcibios_enable_device(struct pci_dev *pdev, int mask)
603 {
604 	struct zpci_dev *zdev = to_zpci(pdev);
605 
606 	zpci_debug_init_device(zdev, dev_name(&pdev->dev));
607 	zpci_fmb_enable_device(zdev);
608 
609 	return pci_enable_resources(pdev, mask);
610 }
611 
pcibios_disable_device(struct pci_dev * pdev)612 void pcibios_disable_device(struct pci_dev *pdev)
613 {
614 	struct zpci_dev *zdev = to_zpci(pdev);
615 
616 	zpci_fmb_disable_device(zdev);
617 	zpci_debug_exit_device(zdev);
618 }
619 
__zpci_register_domain(int domain)620 static int __zpci_register_domain(int domain)
621 {
622 	spin_lock(&zpci_domain_lock);
623 	if (test_bit(domain, zpci_domain)) {
624 		spin_unlock(&zpci_domain_lock);
625 		pr_err("Domain %04x is already assigned\n", domain);
626 		return -EEXIST;
627 	}
628 	set_bit(domain, zpci_domain);
629 	spin_unlock(&zpci_domain_lock);
630 	return domain;
631 }
632 
__zpci_alloc_domain(void)633 static int __zpci_alloc_domain(void)
634 {
635 	int domain;
636 
637 	spin_lock(&zpci_domain_lock);
638 	/*
639 	 * We can always auto allocate domains below ZPCI_NR_DEVICES.
640 	 * There is either a free domain or we have reached the maximum in
641 	 * which case we would have bailed earlier.
642 	 */
643 	domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
644 	set_bit(domain, zpci_domain);
645 	spin_unlock(&zpci_domain_lock);
646 	return domain;
647 }
648 
zpci_alloc_domain(int domain)649 int zpci_alloc_domain(int domain)
650 {
651 	if (zpci_unique_uid) {
652 		if (domain)
653 			return __zpci_register_domain(domain);
654 		pr_warn("UID checking was active but no UID is provided: switching to automatic domain allocation\n");
655 		update_uid_checking(false);
656 	}
657 	return __zpci_alloc_domain();
658 }
659 
zpci_free_domain(int domain)660 void zpci_free_domain(int domain)
661 {
662 	spin_lock(&zpci_domain_lock);
663 	clear_bit(domain, zpci_domain);
664 	spin_unlock(&zpci_domain_lock);
665 }
666 
667 
zpci_enable_device(struct zpci_dev * zdev)668 int zpci_enable_device(struct zpci_dev *zdev)
669 {
670 	u32 fh = zdev->fh;
671 	int rc = 0;
672 
673 	if (clp_enable_fh(zdev, &fh, ZPCI_NR_DMA_SPACES))
674 		rc = -EIO;
675 	else
676 		zdev->fh = fh;
677 	return rc;
678 }
679 
zpci_disable_device(struct zpci_dev * zdev)680 int zpci_disable_device(struct zpci_dev *zdev)
681 {
682 	u32 fh = zdev->fh;
683 	int cc, rc = 0;
684 
685 	cc = clp_disable_fh(zdev, &fh);
686 	if (!cc) {
687 		zdev->fh = fh;
688 	} else if (cc == CLP_RC_SETPCIFN_ALRDY) {
689 		pr_info("Disabling PCI function %08x had no effect as it was already disabled\n",
690 			zdev->fid);
691 		/* Function is already disabled - update handle */
692 		rc = clp_refresh_fh(zdev->fid, &fh);
693 		if (!rc) {
694 			zdev->fh = fh;
695 			rc = -EINVAL;
696 		}
697 	} else {
698 		rc = -EIO;
699 	}
700 	return rc;
701 }
702 
703 /**
704  * zpci_create_device() - Create a new zpci_dev and add it to the zbus
705  * @fid: Function ID of the device to be created
706  * @fh: Current Function Handle of the device to be created
707  * @state: Initial state after creation either Standby or Configured
708  *
709  * Creates a new zpci device and adds it to its, possibly newly created, zbus
710  * as well as zpci_list.
711  *
712  * Returns: the zdev on success or an error pointer otherwise
713  */
zpci_create_device(u32 fid,u32 fh,enum zpci_state state)714 struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
715 {
716 	struct zpci_dev *zdev;
717 	int rc;
718 
719 	zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, state);
720 	zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
721 	if (!zdev)
722 		return ERR_PTR(-ENOMEM);
723 
724 	/* FID and Function Handle are the static/dynamic identifiers */
725 	zdev->fid = fid;
726 	zdev->fh = fh;
727 
728 	/* Query function properties and update zdev */
729 	rc = clp_query_pci_fn(zdev);
730 	if (rc)
731 		goto error;
732 	zdev->state =  state;
733 
734 	kref_init(&zdev->kref);
735 	mutex_init(&zdev->lock);
736 
737 	rc = zpci_init_iommu(zdev);
738 	if (rc)
739 		goto error;
740 
741 	rc = zpci_bus_device_register(zdev, &pci_root_ops);
742 	if (rc)
743 		goto error_destroy_iommu;
744 
745 	spin_lock(&zpci_list_lock);
746 	list_add_tail(&zdev->entry, &zpci_list);
747 	spin_unlock(&zpci_list_lock);
748 
749 	return zdev;
750 
751 error_destroy_iommu:
752 	zpci_destroy_iommu(zdev);
753 error:
754 	zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc);
755 	kfree(zdev);
756 	return ERR_PTR(rc);
757 }
758 
zpci_is_device_configured(struct zpci_dev * zdev)759 bool zpci_is_device_configured(struct zpci_dev *zdev)
760 {
761 	enum zpci_state state = zdev->state;
762 
763 	return state != ZPCI_FN_STATE_RESERVED &&
764 		state != ZPCI_FN_STATE_STANDBY;
765 }
766 
767 /**
768  * zpci_scan_configured_device() - Scan a freshly configured zpci_dev
769  * @zdev: The zpci_dev to be configured
770  * @fh: The general function handle supplied by the platform
771  *
772  * Given a device in the configuration state Configured, enables, scans and
773  * adds it to the common code PCI subsystem if possible. If the PCI device is
774  * parked because we can not yet create a PCI bus because we have not seen
775  * function 0, it is ignored but will be scanned once function 0 appears.
776  * If any failure occurs, the zpci_dev is left disabled.
777  *
778  * Return: 0 on success, or an error code otherwise
779  */
zpci_scan_configured_device(struct zpci_dev * zdev,u32 fh)780 int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh)
781 {
782 	int rc;
783 
784 	zdev->fh = fh;
785 	/* the PCI function will be scanned once function 0 appears */
786 	if (!zdev->zbus->bus)
787 		return 0;
788 
789 	/* For function 0 on a multi-function bus scan whole bus as we might
790 	 * have to pick up existing functions waiting for it to allow creating
791 	 * the PCI bus
792 	 */
793 	if (zdev->devfn == 0 && zdev->zbus->multifunction)
794 		rc = zpci_bus_scan_bus(zdev->zbus);
795 	else
796 		rc = zpci_bus_scan_device(zdev);
797 
798 	return rc;
799 }
800 
801 /**
802  * zpci_deconfigure_device() - Deconfigure a zpci_dev
803  * @zdev: The zpci_dev to configure
804  *
805  * Deconfigure a zPCI function that is currently configured and possibly known
806  * to the common code PCI subsystem.
807  * If any failure occurs the device is left as is.
808  *
809  * Return: 0 on success, or an error code otherwise
810  */
zpci_deconfigure_device(struct zpci_dev * zdev)811 int zpci_deconfigure_device(struct zpci_dev *zdev)
812 {
813 	int rc;
814 
815 	if (zdev->zbus->bus)
816 		zpci_bus_remove_device(zdev, false);
817 
818 	if (zdev->dma_table) {
819 		rc = zpci_dma_exit_device(zdev);
820 		if (rc)
821 			return rc;
822 	}
823 	if (zdev_enabled(zdev)) {
824 		rc = zpci_disable_device(zdev);
825 		if (rc)
826 			return rc;
827 	}
828 
829 	rc = sclp_pci_deconfigure(zdev->fid);
830 	zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, rc);
831 	if (rc)
832 		return rc;
833 	zdev->state = ZPCI_FN_STATE_STANDBY;
834 
835 	return 0;
836 }
837 
838 /**
839  * zpci_device_reserved() - Mark device as resverved
840  * @zdev: the zpci_dev that was reserved
841  *
842  * Handle the case that a given zPCI function was reserved by another system.
843  * After a call to this function the zpci_dev can not be found via
844  * get_zdev_by_fid() anymore but may still be accessible via existing
845  * references though it will not be functional anymore.
846  */
zpci_device_reserved(struct zpci_dev * zdev)847 void zpci_device_reserved(struct zpci_dev *zdev)
848 {
849 	if (zdev->has_hp_slot)
850 		zpci_exit_slot(zdev);
851 	/*
852 	 * Remove device from zpci_list as it is going away. This also
853 	 * makes sure we ignore subsequent zPCI events for this device.
854 	 */
855 	spin_lock(&zpci_list_lock);
856 	list_del(&zdev->entry);
857 	spin_unlock(&zpci_list_lock);
858 	zdev->state = ZPCI_FN_STATE_RESERVED;
859 	zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
860 	zpci_zdev_put(zdev);
861 }
862 
zpci_release_device(struct kref * kref)863 void zpci_release_device(struct kref *kref)
864 {
865 	struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
866 	int ret;
867 
868 	if (zdev->zbus->bus)
869 		zpci_bus_remove_device(zdev, false);
870 
871 	if (zdev->dma_table)
872 		zpci_dma_exit_device(zdev);
873 	if (zdev_enabled(zdev))
874 		zpci_disable_device(zdev);
875 
876 	switch (zdev->state) {
877 	case ZPCI_FN_STATE_CONFIGURED:
878 		ret = sclp_pci_deconfigure(zdev->fid);
879 		zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, ret);
880 		fallthrough;
881 	case ZPCI_FN_STATE_STANDBY:
882 		if (zdev->has_hp_slot)
883 			zpci_exit_slot(zdev);
884 		spin_lock(&zpci_list_lock);
885 		list_del(&zdev->entry);
886 		spin_unlock(&zpci_list_lock);
887 		zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
888 		fallthrough;
889 	case ZPCI_FN_STATE_RESERVED:
890 		if (zdev->has_resources)
891 			zpci_cleanup_bus_resources(zdev);
892 		zpci_bus_device_unregister(zdev);
893 		zpci_destroy_iommu(zdev);
894 		fallthrough;
895 	default:
896 		break;
897 	}
898 	zpci_dbg(3, "rem fid:%x\n", zdev->fid);
899 	kfree(zdev);
900 }
901 
zpci_report_error(struct pci_dev * pdev,struct zpci_report_error_header * report)902 int zpci_report_error(struct pci_dev *pdev,
903 		      struct zpci_report_error_header *report)
904 {
905 	struct zpci_dev *zdev = to_zpci(pdev);
906 
907 	return sclp_pci_report(report, zdev->fh, zdev->fid);
908 }
909 EXPORT_SYMBOL(zpci_report_error);
910 
zpci_mem_init(void)911 static int zpci_mem_init(void)
912 {
913 	BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
914 		     __alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb));
915 
916 	zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
917 					   __alignof__(struct zpci_fmb), 0, NULL);
918 	if (!zdev_fmb_cache)
919 		goto error_fmb;
920 
921 	zpci_iomap_start = kcalloc(ZPCI_IOMAP_ENTRIES,
922 				   sizeof(*zpci_iomap_start), GFP_KERNEL);
923 	if (!zpci_iomap_start)
924 		goto error_iomap;
925 
926 	zpci_iomap_bitmap = kcalloc(BITS_TO_LONGS(ZPCI_IOMAP_ENTRIES),
927 				    sizeof(*zpci_iomap_bitmap), GFP_KERNEL);
928 	if (!zpci_iomap_bitmap)
929 		goto error_iomap_bitmap;
930 
931 	if (static_branch_likely(&have_mio))
932 		clp_setup_writeback_mio();
933 
934 	return 0;
935 error_iomap_bitmap:
936 	kfree(zpci_iomap_start);
937 error_iomap:
938 	kmem_cache_destroy(zdev_fmb_cache);
939 error_fmb:
940 	return -ENOMEM;
941 }
942 
zpci_mem_exit(void)943 static void zpci_mem_exit(void)
944 {
945 	kfree(zpci_iomap_bitmap);
946 	kfree(zpci_iomap_start);
947 	kmem_cache_destroy(zdev_fmb_cache);
948 }
949 
950 static unsigned int s390_pci_probe __initdata = 1;
951 unsigned int s390_pci_force_floating __initdata;
952 static unsigned int s390_pci_initialized;
953 
pcibios_setup(char * str)954 char * __init pcibios_setup(char *str)
955 {
956 	if (!strcmp(str, "off")) {
957 		s390_pci_probe = 0;
958 		return NULL;
959 	}
960 	if (!strcmp(str, "nomio")) {
961 		S390_lowcore.machine_flags &= ~MACHINE_FLAG_PCI_MIO;
962 		return NULL;
963 	}
964 	if (!strcmp(str, "force_floating")) {
965 		s390_pci_force_floating = 1;
966 		return NULL;
967 	}
968 	if (!strcmp(str, "norid")) {
969 		s390_pci_no_rid = 1;
970 		return NULL;
971 	}
972 	return str;
973 }
974 
zpci_is_enabled(void)975 bool zpci_is_enabled(void)
976 {
977 	return s390_pci_initialized;
978 }
979 
pci_base_init(void)980 static int __init pci_base_init(void)
981 {
982 	int rc;
983 
984 	if (!s390_pci_probe)
985 		return 0;
986 
987 	if (!test_facility(69) || !test_facility(71)) {
988 		pr_info("PCI is not supported because CPU facilities 69 or 71 are not available\n");
989 		return 0;
990 	}
991 
992 	if (MACHINE_HAS_PCI_MIO) {
993 		static_branch_enable(&have_mio);
994 		ctl_set_bit(2, 5);
995 	}
996 
997 	rc = zpci_debug_init();
998 	if (rc)
999 		goto out;
1000 
1001 	rc = zpci_mem_init();
1002 	if (rc)
1003 		goto out_mem;
1004 
1005 	rc = zpci_irq_init();
1006 	if (rc)
1007 		goto out_irq;
1008 
1009 	rc = zpci_dma_init();
1010 	if (rc)
1011 		goto out_dma;
1012 
1013 	rc = clp_scan_pci_devices();
1014 	if (rc)
1015 		goto out_find;
1016 	zpci_bus_scan_busses();
1017 
1018 	s390_pci_initialized = 1;
1019 	return 0;
1020 
1021 out_find:
1022 	zpci_dma_exit();
1023 out_dma:
1024 	zpci_irq_exit();
1025 out_irq:
1026 	zpci_mem_exit();
1027 out_mem:
1028 	zpci_debug_exit();
1029 out:
1030 	return rc;
1031 }
1032 subsys_initcall_sync(pci_base_init);
1033