• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
3  * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/pci.h>
36 #include <linux/poll.h>
37 #include <linux/cdev.h>
38 #include <linux/swap.h>
39 #include <linux/vmalloc.h>
40 #include <linux/highmem.h>
41 #include <linux/io.h>
42 #include <linux/aio.h>
43 #include <linux/jiffies.h>
44 #include <asm/pgtable.h>
45 #include <linux/delay.h>
46 #include <linux/export.h>
47 
48 #include <rdma/ib.h>
49 
50 #include "qib.h"
51 #include "qib_common.h"
52 #include "qib_user_sdma.h"
53 
54 #undef pr_fmt
55 #define pr_fmt(fmt) QIB_DRV_NAME ": " fmt
56 
57 static int qib_open(struct inode *, struct file *);
58 static int qib_close(struct inode *, struct file *);
59 static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *);
60 static ssize_t qib_aio_write(struct kiocb *, const struct iovec *,
61 			     unsigned long, loff_t);
62 static unsigned int qib_poll(struct file *, struct poll_table_struct *);
63 static int qib_mmapf(struct file *, struct vm_area_struct *);
64 
65 static const struct file_operations qib_file_ops = {
66 	.owner = THIS_MODULE,
67 	.write = qib_write,
68 	.aio_write = qib_aio_write,
69 	.open = qib_open,
70 	.release = qib_close,
71 	.poll = qib_poll,
72 	.mmap = qib_mmapf,
73 	.llseek = noop_llseek,
74 };
75 
76 /*
77  * Convert kernel virtual addresses to physical addresses so they don't
78  * potentially conflict with the chip addresses used as mmap offsets.
79  * It doesn't really matter what mmap offset we use as long as we can
80  * interpret it correctly.
81  */
cvt_kvaddr(void * p)82 static u64 cvt_kvaddr(void *p)
83 {
84 	struct page *page;
85 	u64 paddr = 0;
86 
87 	page = vmalloc_to_page(p);
88 	if (page)
89 		paddr = page_to_pfn(page) << PAGE_SHIFT;
90 
91 	return paddr;
92 }
93 
qib_get_base_info(struct file * fp,void __user * ubase,size_t ubase_size)94 static int qib_get_base_info(struct file *fp, void __user *ubase,
95 			     size_t ubase_size)
96 {
97 	struct qib_ctxtdata *rcd = ctxt_fp(fp);
98 	int ret = 0;
99 	struct qib_base_info *kinfo = NULL;
100 	struct qib_devdata *dd = rcd->dd;
101 	struct qib_pportdata *ppd = rcd->ppd;
102 	unsigned subctxt_cnt;
103 	int shared, master;
104 	size_t sz;
105 
106 	subctxt_cnt = rcd->subctxt_cnt;
107 	if (!subctxt_cnt) {
108 		shared = 0;
109 		master = 0;
110 		subctxt_cnt = 1;
111 	} else {
112 		shared = 1;
113 		master = !subctxt_fp(fp);
114 	}
115 
116 	sz = sizeof(*kinfo);
117 	/* If context sharing is not requested, allow the old size structure */
118 	if (!shared)
119 		sz -= 7 * sizeof(u64);
120 	if (ubase_size < sz) {
121 		ret = -EINVAL;
122 		goto bail;
123 	}
124 
125 	kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL);
126 	if (kinfo == NULL) {
127 		ret = -ENOMEM;
128 		goto bail;
129 	}
130 
131 	ret = dd->f_get_base_info(rcd, kinfo);
132 	if (ret < 0)
133 		goto bail;
134 
135 	kinfo->spi_rcvhdr_cnt = dd->rcvhdrcnt;
136 	kinfo->spi_rcvhdrent_size = dd->rcvhdrentsize;
137 	kinfo->spi_tidegrcnt = rcd->rcvegrcnt;
138 	kinfo->spi_rcv_egrbufsize = dd->rcvegrbufsize;
139 	/*
140 	 * have to mmap whole thing
141 	 */
142 	kinfo->spi_rcv_egrbuftotlen =
143 		rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
144 	kinfo->spi_rcv_egrperchunk = rcd->rcvegrbufs_perchunk;
145 	kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen /
146 		rcd->rcvegrbuf_chunks;
147 	kinfo->spi_tidcnt = dd->rcvtidcnt / subctxt_cnt;
148 	if (master)
149 		kinfo->spi_tidcnt += dd->rcvtidcnt % subctxt_cnt;
150 	/*
151 	 * for this use, may be cfgctxts summed over all chips that
152 	 * are are configured and present
153 	 */
154 	kinfo->spi_nctxts = dd->cfgctxts;
155 	/* unit (chip/board) our context is on */
156 	kinfo->spi_unit = dd->unit;
157 	kinfo->spi_port = ppd->port;
158 	/* for now, only a single page */
159 	kinfo->spi_tid_maxsize = PAGE_SIZE;
160 
161 	/*
162 	 * Doing this per context, and based on the skip value, etc.  This has
163 	 * to be the actual buffer size, since the protocol code treats it
164 	 * as an array.
165 	 *
166 	 * These have to be set to user addresses in the user code via mmap.
167 	 * These values are used on return to user code for the mmap target
168 	 * addresses only.  For 32 bit, same 44 bit address problem, so use
169 	 * the physical address, not virtual.  Before 2.6.11, using the
170 	 * page_address() macro worked, but in 2.6.11, even that returns the
171 	 * full 64 bit address (upper bits all 1's).  So far, using the
172 	 * physical addresses (or chip offsets, for chip mapping) works, but
173 	 * no doubt some future kernel release will change that, and we'll be
174 	 * on to yet another method of dealing with this.
175 	 * Normally only one of rcvhdr_tailaddr or rhf_offset is useful
176 	 * since the chips with non-zero rhf_offset don't normally
177 	 * enable tail register updates to host memory, but for testing,
178 	 * both can be enabled and used.
179 	 */
180 	kinfo->spi_rcvhdr_base = (u64) rcd->rcvhdrq_phys;
181 	kinfo->spi_rcvhdr_tailaddr = (u64) rcd->rcvhdrqtailaddr_phys;
182 	kinfo->spi_rhf_offset = dd->rhf_offset;
183 	kinfo->spi_rcv_egrbufs = (u64) rcd->rcvegr_phys;
184 	kinfo->spi_pioavailaddr = (u64) dd->pioavailregs_phys;
185 	/* setup per-unit (not port) status area for user programs */
186 	kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
187 		(char *) ppd->statusp -
188 		(char *) dd->pioavailregs_dma;
189 	kinfo->spi_uregbase = (u64) dd->uregbase + dd->ureg_align * rcd->ctxt;
190 	if (!shared) {
191 		kinfo->spi_piocnt = rcd->piocnt;
192 		kinfo->spi_piobufbase = (u64) rcd->piobufs;
193 		kinfo->spi_sendbuf_status = cvt_kvaddr(rcd->user_event_mask);
194 	} else if (master) {
195 		kinfo->spi_piocnt = (rcd->piocnt / subctxt_cnt) +
196 				    (rcd->piocnt % subctxt_cnt);
197 		/* Master's PIO buffers are after all the slave's */
198 		kinfo->spi_piobufbase = (u64) rcd->piobufs +
199 			dd->palign *
200 			(rcd->piocnt - kinfo->spi_piocnt);
201 	} else {
202 		unsigned slave = subctxt_fp(fp) - 1;
203 
204 		kinfo->spi_piocnt = rcd->piocnt / subctxt_cnt;
205 		kinfo->spi_piobufbase = (u64) rcd->piobufs +
206 			dd->palign * kinfo->spi_piocnt * slave;
207 	}
208 
209 	if (shared) {
210 		kinfo->spi_sendbuf_status =
211 			cvt_kvaddr(&rcd->user_event_mask[subctxt_fp(fp)]);
212 		/* only spi_subctxt_* fields should be set in this block! */
213 		kinfo->spi_subctxt_uregbase = cvt_kvaddr(rcd->subctxt_uregbase);
214 
215 		kinfo->spi_subctxt_rcvegrbuf =
216 			cvt_kvaddr(rcd->subctxt_rcvegrbuf);
217 		kinfo->spi_subctxt_rcvhdr_base =
218 			cvt_kvaddr(rcd->subctxt_rcvhdr_base);
219 	}
220 
221 	/*
222 	 * All user buffers are 2KB buffers.  If we ever support
223 	 * giving 4KB buffers to user processes, this will need some
224 	 * work.  Can't use piobufbase directly, because it has
225 	 * both 2K and 4K buffer base values.
226 	 */
227 	kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->pio2k_bufbase) /
228 		dd->palign;
229 	kinfo->spi_pioalign = dd->palign;
230 	kinfo->spi_qpair = QIB_KD_QP;
231 	/*
232 	 * user mode PIO buffers are always 2KB, even when 4KB can
233 	 * be received, and sent via the kernel; this is ibmaxlen
234 	 * for 2K MTU.
235 	 */
236 	kinfo->spi_piosize = dd->piosize2k - 2 * sizeof(u32);
237 	kinfo->spi_mtu = ppd->ibmaxlen; /* maxlen, not ibmtu */
238 	kinfo->spi_ctxt = rcd->ctxt;
239 	kinfo->spi_subctxt = subctxt_fp(fp);
240 	kinfo->spi_sw_version = QIB_KERN_SWVERSION;
241 	kinfo->spi_sw_version |= 1U << 31; /* QLogic-built, not kernel.org */
242 	kinfo->spi_hw_version = dd->revision;
243 
244 	if (master)
245 		kinfo->spi_runtime_flags |= QIB_RUNTIME_MASTER;
246 
247 	sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo);
248 	if (copy_to_user(ubase, kinfo, sz))
249 		ret = -EFAULT;
250 bail:
251 	kfree(kinfo);
252 	return ret;
253 }
254 
255 /**
256  * qib_tid_update - update a context TID
257  * @rcd: the context
258  * @fp: the qib device file
259  * @ti: the TID information
260  *
261  * The new implementation as of Oct 2004 is that the driver assigns
262  * the tid and returns it to the caller.   To reduce search time, we
263  * keep a cursor for each context, walking the shadow tid array to find
264  * one that's not in use.
265  *
266  * For now, if we can't allocate the full list, we fail, although
267  * in the long run, we'll allocate as many as we can, and the
268  * caller will deal with that by trying the remaining pages later.
269  * That means that when we fail, we have to mark the tids as not in
270  * use again, in our shadow copy.
271  *
272  * It's up to the caller to free the tids when they are done.
273  * We'll unlock the pages as they free them.
274  *
275  * Also, right now we are locking one page at a time, but since
276  * the intended use of this routine is for a single group of
277  * virtually contiguous pages, that should change to improve
278  * performance.
279  */
qib_tid_update(struct qib_ctxtdata * rcd,struct file * fp,const struct qib_tid_info * ti)280 static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
281 			  const struct qib_tid_info *ti)
282 {
283 	int ret = 0, ntids;
284 	u32 tid, ctxttid, cnt, i, tidcnt, tidoff;
285 	u16 *tidlist;
286 	struct qib_devdata *dd = rcd->dd;
287 	u64 physaddr;
288 	unsigned long vaddr;
289 	u64 __iomem *tidbase;
290 	unsigned long tidmap[8];
291 	struct page **pagep = NULL;
292 	unsigned subctxt = subctxt_fp(fp);
293 
294 	if (!dd->pageshadow) {
295 		ret = -ENOMEM;
296 		goto done;
297 	}
298 
299 	cnt = ti->tidcnt;
300 	if (!cnt) {
301 		ret = -EFAULT;
302 		goto done;
303 	}
304 	ctxttid = rcd->ctxt * dd->rcvtidcnt;
305 	if (!rcd->subctxt_cnt) {
306 		tidcnt = dd->rcvtidcnt;
307 		tid = rcd->tidcursor;
308 		tidoff = 0;
309 	} else if (!subctxt) {
310 		tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
311 			 (dd->rcvtidcnt % rcd->subctxt_cnt);
312 		tidoff = dd->rcvtidcnt - tidcnt;
313 		ctxttid += tidoff;
314 		tid = tidcursor_fp(fp);
315 	} else {
316 		tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
317 		tidoff = tidcnt * (subctxt - 1);
318 		ctxttid += tidoff;
319 		tid = tidcursor_fp(fp);
320 	}
321 	if (cnt > tidcnt) {
322 		/* make sure it all fits in tid_pg_list */
323 		qib_devinfo(dd->pcidev,
324 			"Process tried to allocate %u TIDs, only trying max (%u)\n",
325 			cnt, tidcnt);
326 		cnt = tidcnt;
327 	}
328 	pagep = (struct page **) rcd->tid_pg_list;
329 	tidlist = (u16 *) &pagep[dd->rcvtidcnt];
330 	pagep += tidoff;
331 	tidlist += tidoff;
332 
333 	memset(tidmap, 0, sizeof(tidmap));
334 	/* before decrement; chip actual # */
335 	ntids = tidcnt;
336 	tidbase = (u64 __iomem *) (((char __iomem *) dd->kregbase) +
337 				   dd->rcvtidbase +
338 				   ctxttid * sizeof(*tidbase));
339 
340 	/* virtual address of first page in transfer */
341 	vaddr = ti->tidvaddr;
342 	if (!access_ok(VERIFY_WRITE, (void __user *) vaddr,
343 		       cnt * PAGE_SIZE)) {
344 		ret = -EFAULT;
345 		goto done;
346 	}
347 	ret = qib_get_user_pages(vaddr, cnt, pagep);
348 	if (ret) {
349 		/*
350 		 * if (ret == -EBUSY)
351 		 * We can't continue because the pagep array won't be
352 		 * initialized. This should never happen,
353 		 * unless perhaps the user has mpin'ed the pages
354 		 * themselves.
355 		 */
356 		qib_devinfo(dd->pcidev,
357 			 "Failed to lock addr %p, %u pages: "
358 			 "errno %d\n", (void *) vaddr, cnt, -ret);
359 		goto done;
360 	}
361 	for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
362 		for (; ntids--; tid++) {
363 			if (tid == tidcnt)
364 				tid = 0;
365 			if (!dd->pageshadow[ctxttid + tid])
366 				break;
367 		}
368 		if (ntids < 0) {
369 			/*
370 			 * Oops, wrapped all the way through their TIDs,
371 			 * and didn't have enough free; see comments at
372 			 * start of routine
373 			 */
374 			i--;    /* last tidlist[i] not filled in */
375 			ret = -ENOMEM;
376 			break;
377 		}
378 		tidlist[i] = tid + tidoff;
379 		/* we "know" system pages and TID pages are same size */
380 		dd->pageshadow[ctxttid + tid] = pagep[i];
381 		dd->physshadow[ctxttid + tid] =
382 			qib_map_page(dd->pcidev, pagep[i], 0, PAGE_SIZE,
383 				     PCI_DMA_FROMDEVICE);
384 		/*
385 		 * don't need atomic or it's overhead
386 		 */
387 		__set_bit(tid, tidmap);
388 		physaddr = dd->physshadow[ctxttid + tid];
389 		/* PERFORMANCE: below should almost certainly be cached */
390 		dd->f_put_tid(dd, &tidbase[tid],
391 				  RCVHQ_RCV_TYPE_EXPECTED, physaddr);
392 		/*
393 		 * don't check this tid in qib_ctxtshadow, since we
394 		 * just filled it in; start with the next one.
395 		 */
396 		tid++;
397 	}
398 
399 	if (ret) {
400 		u32 limit;
401 cleanup:
402 		/* jump here if copy out of updated info failed... */
403 		/* same code that's in qib_free_tid() */
404 		limit = sizeof(tidmap) * BITS_PER_BYTE;
405 		if (limit > tidcnt)
406 			/* just in case size changes in future */
407 			limit = tidcnt;
408 		tid = find_first_bit((const unsigned long *)tidmap, limit);
409 		for (; tid < limit; tid++) {
410 			if (!test_bit(tid, tidmap))
411 				continue;
412 			if (dd->pageshadow[ctxttid + tid]) {
413 				dma_addr_t phys;
414 
415 				phys = dd->physshadow[ctxttid + tid];
416 				dd->physshadow[ctxttid + tid] = dd->tidinvalid;
417 				/* PERFORMANCE: below should almost certainly
418 				 * be cached
419 				 */
420 				dd->f_put_tid(dd, &tidbase[tid],
421 					      RCVHQ_RCV_TYPE_EXPECTED,
422 					      dd->tidinvalid);
423 				pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
424 					       PCI_DMA_FROMDEVICE);
425 				dd->pageshadow[ctxttid + tid] = NULL;
426 			}
427 		}
428 		qib_release_user_pages(pagep, cnt);
429 	} else {
430 		/*
431 		 * Copy the updated array, with qib_tid's filled in, back
432 		 * to user.  Since we did the copy in already, this "should
433 		 * never fail" If it does, we have to clean up...
434 		 */
435 		if (copy_to_user((void __user *)
436 				 (unsigned long) ti->tidlist,
437 				 tidlist, cnt * sizeof(*tidlist))) {
438 			ret = -EFAULT;
439 			goto cleanup;
440 		}
441 		if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
442 				 tidmap, sizeof tidmap)) {
443 			ret = -EFAULT;
444 			goto cleanup;
445 		}
446 		if (tid == tidcnt)
447 			tid = 0;
448 		if (!rcd->subctxt_cnt)
449 			rcd->tidcursor = tid;
450 		else
451 			tidcursor_fp(fp) = tid;
452 	}
453 
454 done:
455 	return ret;
456 }
457 
458 /**
459  * qib_tid_free - free a context TID
460  * @rcd: the context
461  * @subctxt: the subcontext
462  * @ti: the TID info
463  *
464  * right now we are unlocking one page at a time, but since
465  * the intended use of this routine is for a single group of
466  * virtually contiguous pages, that should change to improve
467  * performance.  We check that the TID is in range for this context
468  * but otherwise don't check validity; if user has an error and
469  * frees the wrong tid, it's only their own data that can thereby
470  * be corrupted.  We do check that the TID was in use, for sanity
471  * We always use our idea of the saved address, not the address that
472  * they pass in to us.
473  */
qib_tid_free(struct qib_ctxtdata * rcd,unsigned subctxt,const struct qib_tid_info * ti)474 static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt,
475 			const struct qib_tid_info *ti)
476 {
477 	int ret = 0;
478 	u32 tid, ctxttid, cnt, limit, tidcnt;
479 	struct qib_devdata *dd = rcd->dd;
480 	u64 __iomem *tidbase;
481 	unsigned long tidmap[8];
482 
483 	if (!dd->pageshadow) {
484 		ret = -ENOMEM;
485 		goto done;
486 	}
487 
488 	if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
489 			   sizeof tidmap)) {
490 		ret = -EFAULT;
491 		goto done;
492 	}
493 
494 	ctxttid = rcd->ctxt * dd->rcvtidcnt;
495 	if (!rcd->subctxt_cnt)
496 		tidcnt = dd->rcvtidcnt;
497 	else if (!subctxt) {
498 		tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) +
499 			 (dd->rcvtidcnt % rcd->subctxt_cnt);
500 		ctxttid += dd->rcvtidcnt - tidcnt;
501 	} else {
502 		tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt;
503 		ctxttid += tidcnt * (subctxt - 1);
504 	}
505 	tidbase = (u64 __iomem *) ((char __iomem *)(dd->kregbase) +
506 				   dd->rcvtidbase +
507 				   ctxttid * sizeof(*tidbase));
508 
509 	limit = sizeof(tidmap) * BITS_PER_BYTE;
510 	if (limit > tidcnt)
511 		/* just in case size changes in future */
512 		limit = tidcnt;
513 	tid = find_first_bit(tidmap, limit);
514 	for (cnt = 0; tid < limit; tid++) {
515 		/*
516 		 * small optimization; if we detect a run of 3 or so without
517 		 * any set, use find_first_bit again.  That's mainly to
518 		 * accelerate the case where we wrapped, so we have some at
519 		 * the beginning, and some at the end, and a big gap
520 		 * in the middle.
521 		 */
522 		if (!test_bit(tid, tidmap))
523 			continue;
524 		cnt++;
525 		if (dd->pageshadow[ctxttid + tid]) {
526 			struct page *p;
527 			dma_addr_t phys;
528 
529 			p = dd->pageshadow[ctxttid + tid];
530 			dd->pageshadow[ctxttid + tid] = NULL;
531 			phys = dd->physshadow[ctxttid + tid];
532 			dd->physshadow[ctxttid + tid] = dd->tidinvalid;
533 			/* PERFORMANCE: below should almost certainly be
534 			 * cached
535 			 */
536 			dd->f_put_tid(dd, &tidbase[tid],
537 				      RCVHQ_RCV_TYPE_EXPECTED, dd->tidinvalid);
538 			pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
539 				       PCI_DMA_FROMDEVICE);
540 			qib_release_user_pages(&p, 1);
541 		}
542 	}
543 done:
544 	return ret;
545 }
546 
547 /**
548  * qib_set_part_key - set a partition key
549  * @rcd: the context
550  * @key: the key
551  *
552  * We can have up to 4 active at a time (other than the default, which is
553  * always allowed).  This is somewhat tricky, since multiple contexts may set
554  * the same key, so we reference count them, and clean up at exit.  All 4
555  * partition keys are packed into a single qlogic_ib register.  It's an
556  * error for a process to set the same pkey multiple times.  We provide no
557  * mechanism to de-allocate a pkey at this time, we may eventually need to
558  * do that.  I've used the atomic operations, and no locking, and only make
559  * a single pass through what's available.  This should be more than
560  * adequate for some time. I'll think about spinlocks or the like if and as
561  * it's necessary.
562  */
qib_set_part_key(struct qib_ctxtdata * rcd,u16 key)563 static int qib_set_part_key(struct qib_ctxtdata *rcd, u16 key)
564 {
565 	struct qib_pportdata *ppd = rcd->ppd;
566 	int i, any = 0, pidx = -1;
567 	u16 lkey = key & 0x7FFF;
568 	int ret;
569 
570 	if (lkey == (QIB_DEFAULT_P_KEY & 0x7FFF)) {
571 		/* nothing to do; this key always valid */
572 		ret = 0;
573 		goto bail;
574 	}
575 
576 	if (!lkey) {
577 		ret = -EINVAL;
578 		goto bail;
579 	}
580 
581 	/*
582 	 * Set the full membership bit, because it has to be
583 	 * set in the register or the packet, and it seems
584 	 * cleaner to set in the register than to force all
585 	 * callers to set it.
586 	 */
587 	key |= 0x8000;
588 
589 	for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
590 		if (!rcd->pkeys[i] && pidx == -1)
591 			pidx = i;
592 		if (rcd->pkeys[i] == key) {
593 			ret = -EEXIST;
594 			goto bail;
595 		}
596 	}
597 	if (pidx == -1) {
598 		ret = -EBUSY;
599 		goto bail;
600 	}
601 	for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
602 		if (!ppd->pkeys[i]) {
603 			any++;
604 			continue;
605 		}
606 		if (ppd->pkeys[i] == key) {
607 			atomic_t *pkrefs = &ppd->pkeyrefs[i];
608 
609 			if (atomic_inc_return(pkrefs) > 1) {
610 				rcd->pkeys[pidx] = key;
611 				ret = 0;
612 				goto bail;
613 			} else {
614 				/*
615 				 * lost race, decrement count, catch below
616 				 */
617 				atomic_dec(pkrefs);
618 				any++;
619 			}
620 		}
621 		if ((ppd->pkeys[i] & 0x7FFF) == lkey) {
622 			/*
623 			 * It makes no sense to have both the limited and
624 			 * full membership PKEY set at the same time since
625 			 * the unlimited one will disable the limited one.
626 			 */
627 			ret = -EEXIST;
628 			goto bail;
629 		}
630 	}
631 	if (!any) {
632 		ret = -EBUSY;
633 		goto bail;
634 	}
635 	for (any = i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) {
636 		if (!ppd->pkeys[i] &&
637 		    atomic_inc_return(&ppd->pkeyrefs[i]) == 1) {
638 			rcd->pkeys[pidx] = key;
639 			ppd->pkeys[i] = key;
640 			(void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
641 			ret = 0;
642 			goto bail;
643 		}
644 	}
645 	ret = -EBUSY;
646 
647 bail:
648 	return ret;
649 }
650 
651 /**
652  * qib_manage_rcvq - manage a context's receive queue
653  * @rcd: the context
654  * @subctxt: the subcontext
655  * @start_stop: action to carry out
656  *
657  * start_stop == 0 disables receive on the context, for use in queue
658  * overflow conditions.  start_stop==1 re-enables, to be used to
659  * re-init the software copy of the head register
660  */
qib_manage_rcvq(struct qib_ctxtdata * rcd,unsigned subctxt,int start_stop)661 static int qib_manage_rcvq(struct qib_ctxtdata *rcd, unsigned subctxt,
662 			   int start_stop)
663 {
664 	struct qib_devdata *dd = rcd->dd;
665 	unsigned int rcvctrl_op;
666 
667 	if (subctxt)
668 		goto bail;
669 	/* atomically clear receive enable ctxt. */
670 	if (start_stop) {
671 		/*
672 		 * On enable, force in-memory copy of the tail register to
673 		 * 0, so that protocol code doesn't have to worry about
674 		 * whether or not the chip has yet updated the in-memory
675 		 * copy or not on return from the system call. The chip
676 		 * always resets it's tail register back to 0 on a
677 		 * transition from disabled to enabled.
678 		 */
679 		if (rcd->rcvhdrtail_kvaddr)
680 			qib_clear_rcvhdrtail(rcd);
681 		rcvctrl_op = QIB_RCVCTRL_CTXT_ENB;
682 	} else
683 		rcvctrl_op = QIB_RCVCTRL_CTXT_DIS;
684 	dd->f_rcvctrl(rcd->ppd, rcvctrl_op, rcd->ctxt);
685 	/* always; new head should be equal to new tail; see above */
686 bail:
687 	return 0;
688 }
689 
qib_clean_part_key(struct qib_ctxtdata * rcd,struct qib_devdata * dd)690 static void qib_clean_part_key(struct qib_ctxtdata *rcd,
691 			       struct qib_devdata *dd)
692 {
693 	int i, j, pchanged = 0;
694 	u64 oldpkey;
695 	struct qib_pportdata *ppd = rcd->ppd;
696 
697 	/* for debugging only */
698 	oldpkey = (u64) ppd->pkeys[0] |
699 		((u64) ppd->pkeys[1] << 16) |
700 		((u64) ppd->pkeys[2] << 32) |
701 		((u64) ppd->pkeys[3] << 48);
702 
703 	for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) {
704 		if (!rcd->pkeys[i])
705 			continue;
706 		for (j = 0; j < ARRAY_SIZE(ppd->pkeys); j++) {
707 			/* check for match independent of the global bit */
708 			if ((ppd->pkeys[j] & 0x7fff) !=
709 			    (rcd->pkeys[i] & 0x7fff))
710 				continue;
711 			if (atomic_dec_and_test(&ppd->pkeyrefs[j])) {
712 				ppd->pkeys[j] = 0;
713 				pchanged++;
714 			}
715 			break;
716 		}
717 		rcd->pkeys[i] = 0;
718 	}
719 	if (pchanged)
720 		(void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0);
721 }
722 
723 /* common code for the mappings on dma_alloc_coherent mem */
qib_mmap_mem(struct vm_area_struct * vma,struct qib_ctxtdata * rcd,unsigned len,void * kvaddr,u32 write_ok,char * what)724 static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd,
725 			unsigned len, void *kvaddr, u32 write_ok, char *what)
726 {
727 	struct qib_devdata *dd = rcd->dd;
728 	unsigned long pfn;
729 	int ret;
730 
731 	if ((vma->vm_end - vma->vm_start) > len) {
732 		qib_devinfo(dd->pcidev,
733 			 "FAIL on %s: len %lx > %x\n", what,
734 			 vma->vm_end - vma->vm_start, len);
735 		ret = -EFAULT;
736 		goto bail;
737 	}
738 
739 	/*
740 	 * shared context user code requires rcvhdrq mapped r/w, others
741 	 * only allowed readonly mapping.
742 	 */
743 	if (!write_ok) {
744 		if (vma->vm_flags & VM_WRITE) {
745 			qib_devinfo(dd->pcidev,
746 				 "%s must be mapped readonly\n", what);
747 			ret = -EPERM;
748 			goto bail;
749 		}
750 
751 		/* don't allow them to later change with mprotect */
752 		vma->vm_flags &= ~VM_MAYWRITE;
753 	}
754 
755 	pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
756 	ret = remap_pfn_range(vma, vma->vm_start, pfn,
757 			      len, vma->vm_page_prot);
758 	if (ret)
759 		qib_devinfo(dd->pcidev,
760 			"%s ctxt%u mmap of %lx, %x bytes failed: %d\n",
761 			what, rcd->ctxt, pfn, len, ret);
762 bail:
763 	return ret;
764 }
765 
mmap_ureg(struct vm_area_struct * vma,struct qib_devdata * dd,u64 ureg)766 static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd,
767 		     u64 ureg)
768 {
769 	unsigned long phys;
770 	unsigned long sz;
771 	int ret;
772 
773 	/*
774 	 * This is real hardware, so use io_remap.  This is the mechanism
775 	 * for the user process to update the head registers for their ctxt
776 	 * in the chip.
777 	 */
778 	sz = dd->flags & QIB_HAS_HDRSUPP ? 2 * PAGE_SIZE : PAGE_SIZE;
779 	if ((vma->vm_end - vma->vm_start) > sz) {
780 		qib_devinfo(dd->pcidev,
781 			"FAIL mmap userreg: reqlen %lx > PAGE\n",
782 			vma->vm_end - vma->vm_start);
783 		ret = -EFAULT;
784 	} else {
785 		phys = dd->physaddr + ureg;
786 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
787 
788 		vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
789 		ret = io_remap_pfn_range(vma, vma->vm_start,
790 					 phys >> PAGE_SHIFT,
791 					 vma->vm_end - vma->vm_start,
792 					 vma->vm_page_prot);
793 	}
794 	return ret;
795 }
796 
mmap_piobufs(struct vm_area_struct * vma,struct qib_devdata * dd,struct qib_ctxtdata * rcd,unsigned piobufs,unsigned piocnt)797 static int mmap_piobufs(struct vm_area_struct *vma,
798 			struct qib_devdata *dd,
799 			struct qib_ctxtdata *rcd,
800 			unsigned piobufs, unsigned piocnt)
801 {
802 	unsigned long phys;
803 	int ret;
804 
805 	/*
806 	 * When we map the PIO buffers in the chip, we want to map them as
807 	 * writeonly, no read possible; unfortunately, x86 doesn't allow
808 	 * for this in hardware, but we still prevent users from asking
809 	 * for it.
810 	 */
811 	if ((vma->vm_end - vma->vm_start) > (piocnt * dd->palign)) {
812 		qib_devinfo(dd->pcidev,
813 			"FAIL mmap piobufs: reqlen %lx > PAGE\n",
814 			 vma->vm_end - vma->vm_start);
815 		ret = -EINVAL;
816 		goto bail;
817 	}
818 
819 	phys = dd->physaddr + piobufs;
820 
821 #if defined(__powerpc__)
822 	/* There isn't a generic way to specify writethrough mappings */
823 	pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
824 	pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU;
825 	pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED;
826 #endif
827 
828 	/*
829 	 * don't allow them to later change to readable with mprotect (for when
830 	 * not initially mapped readable, as is normally the case)
831 	 */
832 	vma->vm_flags &= ~VM_MAYREAD;
833 	vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
834 
835 	if (qib_wc_pat)
836 		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
837 
838 	ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
839 				 vma->vm_end - vma->vm_start,
840 				 vma->vm_page_prot);
841 bail:
842 	return ret;
843 }
844 
mmap_rcvegrbufs(struct vm_area_struct * vma,struct qib_ctxtdata * rcd)845 static int mmap_rcvegrbufs(struct vm_area_struct *vma,
846 			   struct qib_ctxtdata *rcd)
847 {
848 	struct qib_devdata *dd = rcd->dd;
849 	unsigned long start, size;
850 	size_t total_size, i;
851 	unsigned long pfn;
852 	int ret;
853 
854 	size = rcd->rcvegrbuf_size;
855 	total_size = rcd->rcvegrbuf_chunks * size;
856 	if ((vma->vm_end - vma->vm_start) > total_size) {
857 		qib_devinfo(dd->pcidev,
858 			"FAIL on egr bufs: reqlen %lx > actual %lx\n",
859 			 vma->vm_end - vma->vm_start,
860 			 (unsigned long) total_size);
861 		ret = -EINVAL;
862 		goto bail;
863 	}
864 
865 	if (vma->vm_flags & VM_WRITE) {
866 		qib_devinfo(dd->pcidev,
867 			"Can't map eager buffers as writable (flags=%lx)\n",
868 			vma->vm_flags);
869 		ret = -EPERM;
870 		goto bail;
871 	}
872 	/* don't allow them to later change to writeable with mprotect */
873 	vma->vm_flags &= ~VM_MAYWRITE;
874 
875 	start = vma->vm_start;
876 
877 	for (i = 0; i < rcd->rcvegrbuf_chunks; i++, start += size) {
878 		pfn = virt_to_phys(rcd->rcvegrbuf[i]) >> PAGE_SHIFT;
879 		ret = remap_pfn_range(vma, start, pfn, size,
880 				      vma->vm_page_prot);
881 		if (ret < 0)
882 			goto bail;
883 	}
884 	ret = 0;
885 
886 bail:
887 	return ret;
888 }
889 
890 /*
891  * qib_file_vma_fault - handle a VMA page fault.
892  */
qib_file_vma_fault(struct vm_area_struct * vma,struct vm_fault * vmf)893 static int qib_file_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
894 {
895 	struct page *page;
896 
897 	page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
898 	if (!page)
899 		return VM_FAULT_SIGBUS;
900 
901 	get_page(page);
902 	vmf->page = page;
903 
904 	return 0;
905 }
906 
907 static struct vm_operations_struct qib_file_vm_ops = {
908 	.fault = qib_file_vma_fault,
909 };
910 
mmap_kvaddr(struct vm_area_struct * vma,u64 pgaddr,struct qib_ctxtdata * rcd,unsigned subctxt)911 static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
912 		       struct qib_ctxtdata *rcd, unsigned subctxt)
913 {
914 	struct qib_devdata *dd = rcd->dd;
915 	unsigned subctxt_cnt;
916 	unsigned long len;
917 	void *addr;
918 	size_t size;
919 	int ret = 0;
920 
921 	subctxt_cnt = rcd->subctxt_cnt;
922 	size = rcd->rcvegrbuf_chunks * rcd->rcvegrbuf_size;
923 
924 	/*
925 	 * Each process has all the subctxt uregbase, rcvhdrq, and
926 	 * rcvegrbufs mmapped - as an array for all the processes,
927 	 * and also separately for this process.
928 	 */
929 	if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase)) {
930 		addr = rcd->subctxt_uregbase;
931 		size = PAGE_SIZE * subctxt_cnt;
932 	} else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base)) {
933 		addr = rcd->subctxt_rcvhdr_base;
934 		size = rcd->rcvhdrq_size * subctxt_cnt;
935 	} else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf)) {
936 		addr = rcd->subctxt_rcvegrbuf;
937 		size *= subctxt_cnt;
938 	} else if (pgaddr == cvt_kvaddr(rcd->subctxt_uregbase +
939 					PAGE_SIZE * subctxt)) {
940 		addr = rcd->subctxt_uregbase + PAGE_SIZE * subctxt;
941 		size = PAGE_SIZE;
942 	} else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvhdr_base +
943 					rcd->rcvhdrq_size * subctxt)) {
944 		addr = rcd->subctxt_rcvhdr_base +
945 			rcd->rcvhdrq_size * subctxt;
946 		size = rcd->rcvhdrq_size;
947 	} else if (pgaddr == cvt_kvaddr(&rcd->user_event_mask[subctxt])) {
948 		addr = rcd->user_event_mask;
949 		size = PAGE_SIZE;
950 	} else if (pgaddr == cvt_kvaddr(rcd->subctxt_rcvegrbuf +
951 					size * subctxt)) {
952 		addr = rcd->subctxt_rcvegrbuf + size * subctxt;
953 		/* rcvegrbufs are read-only on the slave */
954 		if (vma->vm_flags & VM_WRITE) {
955 			qib_devinfo(dd->pcidev,
956 				 "Can't map eager buffers as "
957 				 "writable (flags=%lx)\n", vma->vm_flags);
958 			ret = -EPERM;
959 			goto bail;
960 		}
961 		/*
962 		 * Don't allow permission to later change to writeable
963 		 * with mprotect.
964 		 */
965 		vma->vm_flags &= ~VM_MAYWRITE;
966 	} else
967 		goto bail;
968 	len = vma->vm_end - vma->vm_start;
969 	if (len > size) {
970 		ret = -EINVAL;
971 		goto bail;
972 	}
973 
974 	vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
975 	vma->vm_ops = &qib_file_vm_ops;
976 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
977 	ret = 1;
978 
979 bail:
980 	return ret;
981 }
982 
983 /**
984  * qib_mmapf - mmap various structures into user space
985  * @fp: the file pointer
986  * @vma: the VM area
987  *
988  * We use this to have a shared buffer between the kernel and the user code
989  * for the rcvhdr queue, egr buffers, and the per-context user regs and pio
990  * buffers in the chip.  We have the open and close entries so we can bump
991  * the ref count and keep the driver from being unloaded while still mapped.
992  */
qib_mmapf(struct file * fp,struct vm_area_struct * vma)993 static int qib_mmapf(struct file *fp, struct vm_area_struct *vma)
994 {
995 	struct qib_ctxtdata *rcd;
996 	struct qib_devdata *dd;
997 	u64 pgaddr, ureg;
998 	unsigned piobufs, piocnt;
999 	int ret, match = 1;
1000 
1001 	rcd = ctxt_fp(fp);
1002 	if (!rcd || !(vma->vm_flags & VM_SHARED)) {
1003 		ret = -EINVAL;
1004 		goto bail;
1005 	}
1006 	dd = rcd->dd;
1007 
1008 	/*
1009 	 * This is the qib_do_user_init() code, mapping the shared buffers
1010 	 * and per-context user registers into the user process. The address
1011 	 * referred to by vm_pgoff is the file offset passed via mmap().
1012 	 * For shared contexts, this is the kernel vmalloc() address of the
1013 	 * pages to share with the master.
1014 	 * For non-shared or master ctxts, this is a physical address.
1015 	 * We only do one mmap for each space mapped.
1016 	 */
1017 	pgaddr = vma->vm_pgoff << PAGE_SHIFT;
1018 
1019 	/*
1020 	 * Check for 0 in case one of the allocations failed, but user
1021 	 * called mmap anyway.
1022 	 */
1023 	if (!pgaddr)  {
1024 		ret = -EINVAL;
1025 		goto bail;
1026 	}
1027 
1028 	/*
1029 	 * Physical addresses must fit in 40 bits for our hardware.
1030 	 * Check for kernel virtual addresses first, anything else must
1031 	 * match a HW or memory address.
1032 	 */
1033 	ret = mmap_kvaddr(vma, pgaddr, rcd, subctxt_fp(fp));
1034 	if (ret) {
1035 		if (ret > 0)
1036 			ret = 0;
1037 		goto bail;
1038 	}
1039 
1040 	ureg = dd->uregbase + dd->ureg_align * rcd->ctxt;
1041 	if (!rcd->subctxt_cnt) {
1042 		/* ctxt is not shared */
1043 		piocnt = rcd->piocnt;
1044 		piobufs = rcd->piobufs;
1045 	} else if (!subctxt_fp(fp)) {
1046 		/* caller is the master */
1047 		piocnt = (rcd->piocnt / rcd->subctxt_cnt) +
1048 			 (rcd->piocnt % rcd->subctxt_cnt);
1049 		piobufs = rcd->piobufs +
1050 			dd->palign * (rcd->piocnt - piocnt);
1051 	} else {
1052 		unsigned slave = subctxt_fp(fp) - 1;
1053 
1054 		/* caller is a slave */
1055 		piocnt = rcd->piocnt / rcd->subctxt_cnt;
1056 		piobufs = rcd->piobufs + dd->palign * piocnt * slave;
1057 	}
1058 
1059 	if (pgaddr == ureg)
1060 		ret = mmap_ureg(vma, dd, ureg);
1061 	else if (pgaddr == piobufs)
1062 		ret = mmap_piobufs(vma, dd, rcd, piobufs, piocnt);
1063 	else if (pgaddr == dd->pioavailregs_phys)
1064 		/* in-memory copy of pioavail registers */
1065 		ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
1066 				   (void *) dd->pioavailregs_dma, 0,
1067 				   "pioavail registers");
1068 	else if (pgaddr == rcd->rcvegr_phys)
1069 		ret = mmap_rcvegrbufs(vma, rcd);
1070 	else if (pgaddr == (u64) rcd->rcvhdrq_phys)
1071 		/*
1072 		 * The rcvhdrq itself; multiple pages, contiguous
1073 		 * from an i/o perspective.  Shared contexts need
1074 		 * to map r/w, so we allow writing.
1075 		 */
1076 		ret = qib_mmap_mem(vma, rcd, rcd->rcvhdrq_size,
1077 				   rcd->rcvhdrq, 1, "rcvhdrq");
1078 	else if (pgaddr == (u64) rcd->rcvhdrqtailaddr_phys)
1079 		/* in-memory copy of rcvhdrq tail register */
1080 		ret = qib_mmap_mem(vma, rcd, PAGE_SIZE,
1081 				   rcd->rcvhdrtail_kvaddr, 0,
1082 				   "rcvhdrq tail");
1083 	else
1084 		match = 0;
1085 	if (!match)
1086 		ret = -EINVAL;
1087 
1088 	vma->vm_private_data = NULL;
1089 
1090 	if (ret < 0)
1091 		qib_devinfo(dd->pcidev,
1092 			 "mmap Failure %d: off %llx len %lx\n",
1093 			 -ret, (unsigned long long)pgaddr,
1094 			 vma->vm_end - vma->vm_start);
1095 bail:
1096 	return ret;
1097 }
1098 
qib_poll_urgent(struct qib_ctxtdata * rcd,struct file * fp,struct poll_table_struct * pt)1099 static unsigned int qib_poll_urgent(struct qib_ctxtdata *rcd,
1100 				    struct file *fp,
1101 				    struct poll_table_struct *pt)
1102 {
1103 	struct qib_devdata *dd = rcd->dd;
1104 	unsigned pollflag;
1105 
1106 	poll_wait(fp, &rcd->wait, pt);
1107 
1108 	spin_lock_irq(&dd->uctxt_lock);
1109 	if (rcd->urgent != rcd->urgent_poll) {
1110 		pollflag = POLLIN | POLLRDNORM;
1111 		rcd->urgent_poll = rcd->urgent;
1112 	} else {
1113 		pollflag = 0;
1114 		set_bit(QIB_CTXT_WAITING_URG, &rcd->flag);
1115 	}
1116 	spin_unlock_irq(&dd->uctxt_lock);
1117 
1118 	return pollflag;
1119 }
1120 
qib_poll_next(struct qib_ctxtdata * rcd,struct file * fp,struct poll_table_struct * pt)1121 static unsigned int qib_poll_next(struct qib_ctxtdata *rcd,
1122 				  struct file *fp,
1123 				  struct poll_table_struct *pt)
1124 {
1125 	struct qib_devdata *dd = rcd->dd;
1126 	unsigned pollflag;
1127 
1128 	poll_wait(fp, &rcd->wait, pt);
1129 
1130 	spin_lock_irq(&dd->uctxt_lock);
1131 	if (dd->f_hdrqempty(rcd)) {
1132 		set_bit(QIB_CTXT_WAITING_RCV, &rcd->flag);
1133 		dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_ENB, rcd->ctxt);
1134 		pollflag = 0;
1135 	} else
1136 		pollflag = POLLIN | POLLRDNORM;
1137 	spin_unlock_irq(&dd->uctxt_lock);
1138 
1139 	return pollflag;
1140 }
1141 
qib_poll(struct file * fp,struct poll_table_struct * pt)1142 static unsigned int qib_poll(struct file *fp, struct poll_table_struct *pt)
1143 {
1144 	struct qib_ctxtdata *rcd;
1145 	unsigned pollflag;
1146 
1147 	rcd = ctxt_fp(fp);
1148 	if (!rcd)
1149 		pollflag = POLLERR;
1150 	else if (rcd->poll_type == QIB_POLL_TYPE_URGENT)
1151 		pollflag = qib_poll_urgent(rcd, fp, pt);
1152 	else  if (rcd->poll_type == QIB_POLL_TYPE_ANYRCV)
1153 		pollflag = qib_poll_next(rcd, fp, pt);
1154 	else /* invalid */
1155 		pollflag = POLLERR;
1156 
1157 	return pollflag;
1158 }
1159 
assign_ctxt_affinity(struct file * fp,struct qib_devdata * dd)1160 static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd)
1161 {
1162 	struct qib_filedata *fd = fp->private_data;
1163 	const unsigned int weight = cpumask_weight(&current->cpus_allowed);
1164 	const struct cpumask *local_mask = cpumask_of_pcibus(dd->pcidev->bus);
1165 	int local_cpu;
1166 
1167 	/*
1168 	 * If process has NOT already set it's affinity, select and
1169 	 * reserve a processor for it on the local NUMA node.
1170 	 */
1171 	if ((weight >= qib_cpulist_count) &&
1172 		(cpumask_weight(local_mask) <= qib_cpulist_count)) {
1173 		for_each_cpu(local_cpu, local_mask)
1174 			if (!test_and_set_bit(local_cpu, qib_cpulist)) {
1175 				fd->rec_cpu_num = local_cpu;
1176 				return;
1177 			}
1178 	}
1179 
1180 	/*
1181 	 * If process has NOT already set it's affinity, select and
1182 	 * reserve a processor for it, as a rendevous for all
1183 	 * users of the driver.  If they don't actually later
1184 	 * set affinity to this cpu, or set it to some other cpu,
1185 	 * it just means that sooner or later we don't recommend
1186 	 * a cpu, and let the scheduler do it's best.
1187 	 */
1188 	if (weight >= qib_cpulist_count) {
1189 		int cpu;
1190 		cpu = find_first_zero_bit(qib_cpulist,
1191 					  qib_cpulist_count);
1192 		if (cpu == qib_cpulist_count)
1193 			qib_dev_err(dd,
1194 			"no cpus avail for affinity PID %u\n",
1195 			current->pid);
1196 		else {
1197 			__set_bit(cpu, qib_cpulist);
1198 			fd->rec_cpu_num = cpu;
1199 		}
1200 	}
1201 }
1202 
1203 /*
1204  * Check that userland and driver are compatible for subcontexts.
1205  */
qib_compatible_subctxts(int user_swmajor,int user_swminor)1206 static int qib_compatible_subctxts(int user_swmajor, int user_swminor)
1207 {
1208 	/* this code is written long-hand for clarity */
1209 	if (QIB_USER_SWMAJOR != user_swmajor) {
1210 		/* no promise of compatibility if major mismatch */
1211 		return 0;
1212 	}
1213 	if (QIB_USER_SWMAJOR == 1) {
1214 		switch (QIB_USER_SWMINOR) {
1215 		case 0:
1216 		case 1:
1217 		case 2:
1218 			/* no subctxt implementation so cannot be compatible */
1219 			return 0;
1220 		case 3:
1221 			/* 3 is only compatible with itself */
1222 			return user_swminor == 3;
1223 		default:
1224 			/* >= 4 are compatible (or are expected to be) */
1225 			return user_swminor <= QIB_USER_SWMINOR;
1226 		}
1227 	}
1228 	/* make no promises yet for future major versions */
1229 	return 0;
1230 }
1231 
init_subctxts(struct qib_devdata * dd,struct qib_ctxtdata * rcd,const struct qib_user_info * uinfo)1232 static int init_subctxts(struct qib_devdata *dd,
1233 			 struct qib_ctxtdata *rcd,
1234 			 const struct qib_user_info *uinfo)
1235 {
1236 	int ret = 0;
1237 	unsigned num_subctxts;
1238 	size_t size;
1239 
1240 	/*
1241 	 * If the user is requesting zero subctxts,
1242 	 * skip the subctxt allocation.
1243 	 */
1244 	if (uinfo->spu_subctxt_cnt <= 0)
1245 		goto bail;
1246 	num_subctxts = uinfo->spu_subctxt_cnt;
1247 
1248 	/* Check for subctxt compatibility */
1249 	if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16,
1250 		uinfo->spu_userversion & 0xffff)) {
1251 		qib_devinfo(dd->pcidev,
1252 			 "Mismatched user version (%d.%d) and driver "
1253 			 "version (%d.%d) while context sharing. Ensure "
1254 			 "that driver and library are from the same "
1255 			 "release.\n",
1256 			 (int) (uinfo->spu_userversion >> 16),
1257 			 (int) (uinfo->spu_userversion & 0xffff),
1258 			 QIB_USER_SWMAJOR, QIB_USER_SWMINOR);
1259 		goto bail;
1260 	}
1261 	if (num_subctxts > QLOGIC_IB_MAX_SUBCTXT) {
1262 		ret = -EINVAL;
1263 		goto bail;
1264 	}
1265 
1266 	rcd->subctxt_uregbase = vmalloc_user(PAGE_SIZE * num_subctxts);
1267 	if (!rcd->subctxt_uregbase) {
1268 		ret = -ENOMEM;
1269 		goto bail;
1270 	}
1271 	/* Note: rcd->rcvhdrq_size isn't initialized yet. */
1272 	size = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize *
1273 		     sizeof(u32), PAGE_SIZE) * num_subctxts;
1274 	rcd->subctxt_rcvhdr_base = vmalloc_user(size);
1275 	if (!rcd->subctxt_rcvhdr_base) {
1276 		ret = -ENOMEM;
1277 		goto bail_ureg;
1278 	}
1279 
1280 	rcd->subctxt_rcvegrbuf = vmalloc_user(rcd->rcvegrbuf_chunks *
1281 					      rcd->rcvegrbuf_size *
1282 					      num_subctxts);
1283 	if (!rcd->subctxt_rcvegrbuf) {
1284 		ret = -ENOMEM;
1285 		goto bail_rhdr;
1286 	}
1287 
1288 	rcd->subctxt_cnt = uinfo->spu_subctxt_cnt;
1289 	rcd->subctxt_id = uinfo->spu_subctxt_id;
1290 	rcd->active_slaves = 1;
1291 	rcd->redirect_seq_cnt = 1;
1292 	set_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
1293 	goto bail;
1294 
1295 bail_rhdr:
1296 	vfree(rcd->subctxt_rcvhdr_base);
1297 bail_ureg:
1298 	vfree(rcd->subctxt_uregbase);
1299 	rcd->subctxt_uregbase = NULL;
1300 bail:
1301 	return ret;
1302 }
1303 
setup_ctxt(struct qib_pportdata * ppd,int ctxt,struct file * fp,const struct qib_user_info * uinfo)1304 static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
1305 		      struct file *fp, const struct qib_user_info *uinfo)
1306 {
1307 	struct qib_filedata *fd = fp->private_data;
1308 	struct qib_devdata *dd = ppd->dd;
1309 	struct qib_ctxtdata *rcd;
1310 	void *ptmp = NULL;
1311 	int ret;
1312 	int numa_id;
1313 
1314 	assign_ctxt_affinity(fp, dd);
1315 
1316 	numa_id = qib_numa_aware ? ((fd->rec_cpu_num != -1) ?
1317 		cpu_to_node(fd->rec_cpu_num) :
1318 		numa_node_id()) : dd->assigned_node_id;
1319 
1320 	rcd = qib_create_ctxtdata(ppd, ctxt, numa_id);
1321 
1322 	/*
1323 	 * Allocate memory for use in qib_tid_update() at open to
1324 	 * reduce cost of expected send setup per message segment
1325 	 */
1326 	if (rcd)
1327 		ptmp = kmalloc(dd->rcvtidcnt * sizeof(u16) +
1328 			       dd->rcvtidcnt * sizeof(struct page **),
1329 			       GFP_KERNEL);
1330 
1331 	if (!rcd || !ptmp) {
1332 		qib_dev_err(dd,
1333 			"Unable to allocate ctxtdata memory, failing open\n");
1334 		ret = -ENOMEM;
1335 		goto bailerr;
1336 	}
1337 	rcd->userversion = uinfo->spu_userversion;
1338 	ret = init_subctxts(dd, rcd, uinfo);
1339 	if (ret)
1340 		goto bailerr;
1341 	rcd->tid_pg_list = ptmp;
1342 	rcd->pid = current->pid;
1343 	init_waitqueue_head(&dd->rcd[ctxt]->wait);
1344 	strlcpy(rcd->comm, current->comm, sizeof(rcd->comm));
1345 	ctxt_fp(fp) = rcd;
1346 	qib_stats.sps_ctxts++;
1347 	dd->freectxts--;
1348 	ret = 0;
1349 	goto bail;
1350 
1351 bailerr:
1352 	if (fd->rec_cpu_num != -1)
1353 		__clear_bit(fd->rec_cpu_num, qib_cpulist);
1354 
1355 	dd->rcd[ctxt] = NULL;
1356 	kfree(rcd);
1357 	kfree(ptmp);
1358 bail:
1359 	return ret;
1360 }
1361 
usable(struct qib_pportdata * ppd)1362 static inline int usable(struct qib_pportdata *ppd)
1363 {
1364 	struct qib_devdata *dd = ppd->dd;
1365 
1366 	return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid &&
1367 		(ppd->lflags & QIBL_LINKACTIVE);
1368 }
1369 
1370 /*
1371  * Select a context on the given device, either using a requested port
1372  * or the port based on the context number.
1373  */
choose_port_ctxt(struct file * fp,struct qib_devdata * dd,u32 port,const struct qib_user_info * uinfo)1374 static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port,
1375 			    const struct qib_user_info *uinfo)
1376 {
1377 	struct qib_pportdata *ppd = NULL;
1378 	int ret, ctxt;
1379 
1380 	if (port) {
1381 		if (!usable(dd->pport + port - 1)) {
1382 			ret = -ENETDOWN;
1383 			goto done;
1384 		} else
1385 			ppd = dd->pport + port - 1;
1386 	}
1387 	for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts && dd->rcd[ctxt];
1388 	     ctxt++)
1389 		;
1390 	if (ctxt == dd->cfgctxts) {
1391 		ret = -EBUSY;
1392 		goto done;
1393 	}
1394 	if (!ppd) {
1395 		u32 pidx = ctxt % dd->num_pports;
1396 		if (usable(dd->pport + pidx))
1397 			ppd = dd->pport + pidx;
1398 		else {
1399 			for (pidx = 0; pidx < dd->num_pports && !ppd;
1400 			     pidx++)
1401 				if (usable(dd->pport + pidx))
1402 					ppd = dd->pport + pidx;
1403 		}
1404 	}
1405 	ret = ppd ? setup_ctxt(ppd, ctxt, fp, uinfo) : -ENETDOWN;
1406 done:
1407 	return ret;
1408 }
1409 
find_free_ctxt(int unit,struct file * fp,const struct qib_user_info * uinfo)1410 static int find_free_ctxt(int unit, struct file *fp,
1411 			  const struct qib_user_info *uinfo)
1412 {
1413 	struct qib_devdata *dd = qib_lookup(unit);
1414 	int ret;
1415 
1416 	if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports))
1417 		ret = -ENODEV;
1418 	else
1419 		ret = choose_port_ctxt(fp, dd, uinfo->spu_port, uinfo);
1420 
1421 	return ret;
1422 }
1423 
get_a_ctxt(struct file * fp,const struct qib_user_info * uinfo,unsigned alg)1424 static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
1425 		      unsigned alg)
1426 {
1427 	struct qib_devdata *udd = NULL;
1428 	int ret = 0, devmax, npresent, nup, ndev, dusable = 0, i;
1429 	u32 port = uinfo->spu_port, ctxt;
1430 
1431 	devmax = qib_count_units(&npresent, &nup);
1432 	if (!npresent) {
1433 		ret = -ENXIO;
1434 		goto done;
1435 	}
1436 	if (nup == 0) {
1437 		ret = -ENETDOWN;
1438 		goto done;
1439 	}
1440 
1441 	if (alg == QIB_PORT_ALG_ACROSS) {
1442 		unsigned inuse = ~0U;
1443 		/* find device (with ACTIVE ports) with fewest ctxts in use */
1444 		for (ndev = 0; ndev < devmax; ndev++) {
1445 			struct qib_devdata *dd = qib_lookup(ndev);
1446 			unsigned cused = 0, cfree = 0, pusable = 0;
1447 			if (!dd)
1448 				continue;
1449 			if (port && port <= dd->num_pports &&
1450 			    usable(dd->pport + port - 1))
1451 				pusable = 1;
1452 			else
1453 				for (i = 0; i < dd->num_pports; i++)
1454 					if (usable(dd->pport + i))
1455 						pusable++;
1456 			if (!pusable)
1457 				continue;
1458 			for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts;
1459 			     ctxt++)
1460 				if (dd->rcd[ctxt])
1461 					cused++;
1462 				else
1463 					cfree++;
1464 			if (cfree && cused < inuse) {
1465 				udd = dd;
1466 				inuse = cused;
1467 			}
1468 		}
1469 		if (udd) {
1470 			ret = choose_port_ctxt(fp, udd, port, uinfo);
1471 			goto done;
1472 		}
1473 	} else {
1474 		for (ndev = 0; ndev < devmax; ndev++) {
1475 			struct qib_devdata *dd = qib_lookup(ndev);
1476 			if (dd) {
1477 				ret = choose_port_ctxt(fp, dd, port, uinfo);
1478 				if (!ret)
1479 					goto done;
1480 				if (ret == -EBUSY)
1481 					dusable++;
1482 			}
1483 		}
1484 	}
1485 	ret = dusable ? -EBUSY : -ENETDOWN;
1486 
1487 done:
1488 	return ret;
1489 }
1490 
find_shared_ctxt(struct file * fp,const struct qib_user_info * uinfo)1491 static int find_shared_ctxt(struct file *fp,
1492 			    const struct qib_user_info *uinfo)
1493 {
1494 	int devmax, ndev, i;
1495 	int ret = 0;
1496 
1497 	devmax = qib_count_units(NULL, NULL);
1498 
1499 	for (ndev = 0; ndev < devmax; ndev++) {
1500 		struct qib_devdata *dd = qib_lookup(ndev);
1501 
1502 		/* device portion of usable() */
1503 		if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase))
1504 			continue;
1505 		for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) {
1506 			struct qib_ctxtdata *rcd = dd->rcd[i];
1507 
1508 			/* Skip ctxts which are not yet open */
1509 			if (!rcd || !rcd->cnt)
1510 				continue;
1511 			/* Skip ctxt if it doesn't match the requested one */
1512 			if (rcd->subctxt_id != uinfo->spu_subctxt_id)
1513 				continue;
1514 			/* Verify the sharing process matches the master */
1515 			if (rcd->subctxt_cnt != uinfo->spu_subctxt_cnt ||
1516 			    rcd->userversion != uinfo->spu_userversion ||
1517 			    rcd->cnt >= rcd->subctxt_cnt) {
1518 				ret = -EINVAL;
1519 				goto done;
1520 			}
1521 			ctxt_fp(fp) = rcd;
1522 			subctxt_fp(fp) = rcd->cnt++;
1523 			rcd->subpid[subctxt_fp(fp)] = current->pid;
1524 			tidcursor_fp(fp) = 0;
1525 			rcd->active_slaves |= 1 << subctxt_fp(fp);
1526 			ret = 1;
1527 			goto done;
1528 		}
1529 	}
1530 
1531 done:
1532 	return ret;
1533 }
1534 
qib_open(struct inode * in,struct file * fp)1535 static int qib_open(struct inode *in, struct file *fp)
1536 {
1537 	/* The real work is performed later in qib_assign_ctxt() */
1538 	fp->private_data = kzalloc(sizeof(struct qib_filedata), GFP_KERNEL);
1539 	if (fp->private_data) /* no cpu affinity by default */
1540 		((struct qib_filedata *)fp->private_data)->rec_cpu_num = -1;
1541 	return fp->private_data ? 0 : -ENOMEM;
1542 }
1543 
find_hca(unsigned int cpu,int * unit)1544 static int find_hca(unsigned int cpu, int *unit)
1545 {
1546 	int ret = 0, devmax, npresent, nup, ndev;
1547 
1548 	*unit = -1;
1549 
1550 	devmax = qib_count_units(&npresent, &nup);
1551 	if (!npresent) {
1552 		ret = -ENXIO;
1553 		goto done;
1554 	}
1555 	if (!nup) {
1556 		ret = -ENETDOWN;
1557 		goto done;
1558 	}
1559 	for (ndev = 0; ndev < devmax; ndev++) {
1560 		struct qib_devdata *dd = qib_lookup(ndev);
1561 		if (dd) {
1562 			if (pcibus_to_node(dd->pcidev->bus) < 0) {
1563 				ret = -EINVAL;
1564 				goto done;
1565 			}
1566 			if (cpu_to_node(cpu) ==
1567 				pcibus_to_node(dd->pcidev->bus)) {
1568 				*unit = ndev;
1569 				goto done;
1570 			}
1571 		}
1572 	}
1573 done:
1574 	return ret;
1575 }
1576 
do_qib_user_sdma_queue_create(struct file * fp)1577 static int do_qib_user_sdma_queue_create(struct file *fp)
1578 {
1579 	struct qib_filedata *fd = fp->private_data;
1580 	struct qib_ctxtdata *rcd = fd->rcd;
1581 	struct qib_devdata *dd = rcd->dd;
1582 
1583 	if (dd->flags & QIB_HAS_SEND_DMA) {
1584 
1585 		fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev,
1586 						    dd->unit,
1587 						    rcd->ctxt,
1588 						    fd->subctxt);
1589 		if (!fd->pq)
1590 			return -ENOMEM;
1591 	}
1592 
1593 	return 0;
1594 }
1595 
1596 /*
1597  * Get ctxt early, so can set affinity prior to memory allocation.
1598  */
qib_assign_ctxt(struct file * fp,const struct qib_user_info * uinfo)1599 static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)
1600 {
1601 	int ret;
1602 	int i_minor;
1603 	unsigned swmajor, swminor, alg = QIB_PORT_ALG_ACROSS;
1604 
1605 	/* Check to be sure we haven't already initialized this file */
1606 	if (ctxt_fp(fp)) {
1607 		ret = -EINVAL;
1608 		goto done;
1609 	}
1610 
1611 	/* for now, if major version is different, bail */
1612 	swmajor = uinfo->spu_userversion >> 16;
1613 	if (swmajor != QIB_USER_SWMAJOR) {
1614 		ret = -ENODEV;
1615 		goto done;
1616 	}
1617 
1618 	swminor = uinfo->spu_userversion & 0xffff;
1619 
1620 	if (swminor >= 11 && uinfo->spu_port_alg < QIB_PORT_ALG_COUNT)
1621 		alg = uinfo->spu_port_alg;
1622 
1623 	mutex_lock(&qib_mutex);
1624 
1625 	if (qib_compatible_subctxts(swmajor, swminor) &&
1626 	    uinfo->spu_subctxt_cnt) {
1627 		ret = find_shared_ctxt(fp, uinfo);
1628 		if (ret > 0) {
1629 			ret = do_qib_user_sdma_queue_create(fp);
1630 			if (!ret)
1631 				assign_ctxt_affinity(fp, (ctxt_fp(fp))->dd);
1632 			goto done_ok;
1633 		}
1634 	}
1635 
1636 	i_minor = iminor(file_inode(fp)) - QIB_USER_MINOR_BASE;
1637 	if (i_minor)
1638 		ret = find_free_ctxt(i_minor - 1, fp, uinfo);
1639 	else {
1640 		int unit;
1641 		const unsigned int cpu = cpumask_first(&current->cpus_allowed);
1642 		const unsigned int weight =
1643 			cpumask_weight(&current->cpus_allowed);
1644 
1645 		if (weight == 1 && !test_bit(cpu, qib_cpulist))
1646 			if (!find_hca(cpu, &unit) && unit >= 0)
1647 				if (!find_free_ctxt(unit, fp, uinfo)) {
1648 					ret = 0;
1649 					goto done_chk_sdma;
1650 				}
1651 		ret = get_a_ctxt(fp, uinfo, alg);
1652 	}
1653 
1654 done_chk_sdma:
1655 	if (!ret)
1656 		ret = do_qib_user_sdma_queue_create(fp);
1657 done_ok:
1658 	mutex_unlock(&qib_mutex);
1659 
1660 done:
1661 	return ret;
1662 }
1663 
1664 
qib_do_user_init(struct file * fp,const struct qib_user_info * uinfo)1665 static int qib_do_user_init(struct file *fp,
1666 			    const struct qib_user_info *uinfo)
1667 {
1668 	int ret;
1669 	struct qib_ctxtdata *rcd = ctxt_fp(fp);
1670 	struct qib_devdata *dd;
1671 	unsigned uctxt;
1672 
1673 	/* Subctxts don't need to initialize anything since master did it. */
1674 	if (subctxt_fp(fp)) {
1675 		ret = wait_event_interruptible(rcd->wait,
1676 			!test_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag));
1677 		goto bail;
1678 	}
1679 
1680 	dd = rcd->dd;
1681 
1682 	/* some ctxts may get extra buffers, calculate that here */
1683 	uctxt = rcd->ctxt - dd->first_user_ctxt;
1684 	if (uctxt < dd->ctxts_extrabuf) {
1685 		rcd->piocnt = dd->pbufsctxt + 1;
1686 		rcd->pio_base = rcd->piocnt * uctxt;
1687 	} else {
1688 		rcd->piocnt = dd->pbufsctxt;
1689 		rcd->pio_base = rcd->piocnt * uctxt +
1690 			dd->ctxts_extrabuf;
1691 	}
1692 
1693 	/*
1694 	 * All user buffers are 2KB buffers.  If we ever support
1695 	 * giving 4KB buffers to user processes, this will need some
1696 	 * work.  Can't use piobufbase directly, because it has
1697 	 * both 2K and 4K buffer base values.  So check and handle.
1698 	 */
1699 	if ((rcd->pio_base + rcd->piocnt) > dd->piobcnt2k) {
1700 		if (rcd->pio_base >= dd->piobcnt2k) {
1701 			qib_dev_err(dd,
1702 				    "%u:ctxt%u: no 2KB buffers available\n",
1703 				    dd->unit, rcd->ctxt);
1704 			ret = -ENOBUFS;
1705 			goto bail;
1706 		}
1707 		rcd->piocnt = dd->piobcnt2k - rcd->pio_base;
1708 		qib_dev_err(dd, "Ctxt%u: would use 4KB bufs, using %u\n",
1709 			    rcd->ctxt, rcd->piocnt);
1710 	}
1711 
1712 	rcd->piobufs = dd->pio2k_bufbase + rcd->pio_base * dd->palign;
1713 	qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
1714 			       TXCHK_CHG_TYPE_USER, rcd);
1715 	/*
1716 	 * try to ensure that processes start up with consistent avail update
1717 	 * for their own range, at least.   If system very quiet, it might
1718 	 * have the in-memory copy out of date at startup for this range of
1719 	 * buffers, when a context gets re-used.  Do after the chg_pioavail
1720 	 * and before the rest of setup, so it's "almost certain" the dma
1721 	 * will have occurred (can't 100% guarantee, but should be many
1722 	 * decimals of 9s, with this ordering), given how much else happens
1723 	 * after this.
1724 	 */
1725 	dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
1726 
1727 	/*
1728 	 * Now allocate the rcvhdr Q and eager TIDs; skip the TID
1729 	 * array for time being.  If rcd->ctxt > chip-supported,
1730 	 * we need to do extra stuff here to handle by handling overflow
1731 	 * through ctxt 0, someday
1732 	 */
1733 	ret = qib_create_rcvhdrq(dd, rcd);
1734 	if (!ret)
1735 		ret = qib_setup_eagerbufs(rcd);
1736 	if (ret)
1737 		goto bail_pio;
1738 
1739 	rcd->tidcursor = 0; /* start at beginning after open */
1740 
1741 	/* initialize poll variables... */
1742 	rcd->urgent = 0;
1743 	rcd->urgent_poll = 0;
1744 
1745 	/*
1746 	 * Now enable the ctxt for receive.
1747 	 * For chips that are set to DMA the tail register to memory
1748 	 * when they change (and when the update bit transitions from
1749 	 * 0 to 1.  So for those chips, we turn it off and then back on.
1750 	 * This will (very briefly) affect any other open ctxts, but the
1751 	 * duration is very short, and therefore isn't an issue.  We
1752 	 * explicitly set the in-memory tail copy to 0 beforehand, so we
1753 	 * don't have to wait to be sure the DMA update has happened
1754 	 * (chip resets head/tail to 0 on transition to enable).
1755 	 */
1756 	if (rcd->rcvhdrtail_kvaddr)
1757 		qib_clear_rcvhdrtail(rcd);
1758 
1759 	dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_TIDFLOW_ENB,
1760 		      rcd->ctxt);
1761 
1762 	/* Notify any waiting slaves */
1763 	if (rcd->subctxt_cnt) {
1764 		clear_bit(QIB_CTXT_MASTER_UNINIT, &rcd->flag);
1765 		wake_up(&rcd->wait);
1766 	}
1767 	return 0;
1768 
1769 bail_pio:
1770 	qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt,
1771 			       TXCHK_CHG_TYPE_KERN, rcd);
1772 bail:
1773 	return ret;
1774 }
1775 
1776 /**
1777  * unlock_exptid - unlock any expected TID entries context still had in use
1778  * @rcd: ctxt
1779  *
1780  * We don't actually update the chip here, because we do a bulk update
1781  * below, using f_clear_tids.
1782  */
unlock_expected_tids(struct qib_ctxtdata * rcd)1783 static void unlock_expected_tids(struct qib_ctxtdata *rcd)
1784 {
1785 	struct qib_devdata *dd = rcd->dd;
1786 	int ctxt_tidbase = rcd->ctxt * dd->rcvtidcnt;
1787 	int i, cnt = 0, maxtid = ctxt_tidbase + dd->rcvtidcnt;
1788 
1789 	for (i = ctxt_tidbase; i < maxtid; i++) {
1790 		struct page *p = dd->pageshadow[i];
1791 		dma_addr_t phys;
1792 
1793 		if (!p)
1794 			continue;
1795 
1796 		phys = dd->physshadow[i];
1797 		dd->physshadow[i] = dd->tidinvalid;
1798 		dd->pageshadow[i] = NULL;
1799 		pci_unmap_page(dd->pcidev, phys, PAGE_SIZE,
1800 			       PCI_DMA_FROMDEVICE);
1801 		qib_release_user_pages(&p, 1);
1802 		cnt++;
1803 	}
1804 }
1805 
qib_close(struct inode * in,struct file * fp)1806 static int qib_close(struct inode *in, struct file *fp)
1807 {
1808 	int ret = 0;
1809 	struct qib_filedata *fd;
1810 	struct qib_ctxtdata *rcd;
1811 	struct qib_devdata *dd;
1812 	unsigned long flags;
1813 	unsigned ctxt;
1814 	pid_t pid;
1815 
1816 	mutex_lock(&qib_mutex);
1817 
1818 	fd = fp->private_data;
1819 	fp->private_data = NULL;
1820 	rcd = fd->rcd;
1821 	if (!rcd) {
1822 		mutex_unlock(&qib_mutex);
1823 		goto bail;
1824 	}
1825 
1826 	dd = rcd->dd;
1827 
1828 	/* ensure all pio buffer writes in progress are flushed */
1829 	qib_flush_wc();
1830 
1831 	/* drain user sdma queue */
1832 	if (fd->pq) {
1833 		qib_user_sdma_queue_drain(rcd->ppd, fd->pq);
1834 		qib_user_sdma_queue_destroy(fd->pq);
1835 	}
1836 
1837 	if (fd->rec_cpu_num != -1)
1838 		__clear_bit(fd->rec_cpu_num, qib_cpulist);
1839 
1840 	if (--rcd->cnt) {
1841 		/*
1842 		 * XXX If the master closes the context before the slave(s),
1843 		 * revoke the mmap for the eager receive queue so
1844 		 * the slave(s) don't wait for receive data forever.
1845 		 */
1846 		rcd->active_slaves &= ~(1 << fd->subctxt);
1847 		rcd->subpid[fd->subctxt] = 0;
1848 		mutex_unlock(&qib_mutex);
1849 		goto bail;
1850 	}
1851 
1852 	/* early; no interrupt users after this */
1853 	spin_lock_irqsave(&dd->uctxt_lock, flags);
1854 	ctxt = rcd->ctxt;
1855 	dd->rcd[ctxt] = NULL;
1856 	pid = rcd->pid;
1857 	rcd->pid = 0;
1858 	spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1859 
1860 	if (rcd->rcvwait_to || rcd->piowait_to ||
1861 	    rcd->rcvnowait || rcd->pionowait) {
1862 		rcd->rcvwait_to = 0;
1863 		rcd->piowait_to = 0;
1864 		rcd->rcvnowait = 0;
1865 		rcd->pionowait = 0;
1866 	}
1867 	if (rcd->flag)
1868 		rcd->flag = 0;
1869 
1870 	if (dd->kregbase) {
1871 		/* atomically clear receive enable ctxt and intr avail. */
1872 		dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_DIS |
1873 				  QIB_RCVCTRL_INTRAVAIL_DIS, ctxt);
1874 
1875 		/* clean up the pkeys for this ctxt user */
1876 		qib_clean_part_key(rcd, dd);
1877 		qib_disarm_piobufs(dd, rcd->pio_base, rcd->piocnt);
1878 		qib_chg_pioavailkernel(dd, rcd->pio_base,
1879 				       rcd->piocnt, TXCHK_CHG_TYPE_KERN, NULL);
1880 
1881 		dd->f_clear_tids(dd, rcd);
1882 
1883 		if (dd->pageshadow)
1884 			unlock_expected_tids(rcd);
1885 		qib_stats.sps_ctxts--;
1886 		dd->freectxts++;
1887 	}
1888 
1889 	mutex_unlock(&qib_mutex);
1890 	qib_free_ctxtdata(dd, rcd); /* after releasing the mutex */
1891 
1892 bail:
1893 	kfree(fd);
1894 	return ret;
1895 }
1896 
qib_ctxt_info(struct file * fp,struct qib_ctxt_info __user * uinfo)1897 static int qib_ctxt_info(struct file *fp, struct qib_ctxt_info __user *uinfo)
1898 {
1899 	struct qib_ctxt_info info;
1900 	int ret;
1901 	size_t sz;
1902 	struct qib_ctxtdata *rcd = ctxt_fp(fp);
1903 	struct qib_filedata *fd;
1904 
1905 	fd = fp->private_data;
1906 
1907 	info.num_active = qib_count_active_units();
1908 	info.unit = rcd->dd->unit;
1909 	info.port = rcd->ppd->port;
1910 	info.ctxt = rcd->ctxt;
1911 	info.subctxt =  subctxt_fp(fp);
1912 	/* Number of user ctxts available for this device. */
1913 	info.num_ctxts = rcd->dd->cfgctxts - rcd->dd->first_user_ctxt;
1914 	info.num_subctxts = rcd->subctxt_cnt;
1915 	info.rec_cpu = fd->rec_cpu_num;
1916 	sz = sizeof(info);
1917 
1918 	if (copy_to_user(uinfo, &info, sz)) {
1919 		ret = -EFAULT;
1920 		goto bail;
1921 	}
1922 	ret = 0;
1923 
1924 bail:
1925 	return ret;
1926 }
1927 
qib_sdma_get_inflight(struct qib_user_sdma_queue * pq,u32 __user * inflightp)1928 static int qib_sdma_get_inflight(struct qib_user_sdma_queue *pq,
1929 				 u32 __user *inflightp)
1930 {
1931 	const u32 val = qib_user_sdma_inflight_counter(pq);
1932 
1933 	if (put_user(val, inflightp))
1934 		return -EFAULT;
1935 
1936 	return 0;
1937 }
1938 
qib_sdma_get_complete(struct qib_pportdata * ppd,struct qib_user_sdma_queue * pq,u32 __user * completep)1939 static int qib_sdma_get_complete(struct qib_pportdata *ppd,
1940 				 struct qib_user_sdma_queue *pq,
1941 				 u32 __user *completep)
1942 {
1943 	u32 val;
1944 	int err;
1945 
1946 	if (!pq)
1947 		return -EINVAL;
1948 
1949 	err = qib_user_sdma_make_progress(ppd, pq);
1950 	if (err < 0)
1951 		return err;
1952 
1953 	val = qib_user_sdma_complete_counter(pq);
1954 	if (put_user(val, completep))
1955 		return -EFAULT;
1956 
1957 	return 0;
1958 }
1959 
disarm_req_delay(struct qib_ctxtdata * rcd)1960 static int disarm_req_delay(struct qib_ctxtdata *rcd)
1961 {
1962 	int ret = 0;
1963 
1964 	if (!usable(rcd->ppd)) {
1965 		int i;
1966 		/*
1967 		 * if link is down, or otherwise not usable, delay
1968 		 * the caller up to 30 seconds, so we don't thrash
1969 		 * in trying to get the chip back to ACTIVE, and
1970 		 * set flag so they make the call again.
1971 		 */
1972 		if (rcd->user_event_mask) {
1973 			/*
1974 			 * subctxt_cnt is 0 if not shared, so do base
1975 			 * separately, first, then remaining subctxt, if any
1976 			 */
1977 			set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
1978 				&rcd->user_event_mask[0]);
1979 			for (i = 1; i < rcd->subctxt_cnt; i++)
1980 				set_bit(_QIB_EVENT_DISARM_BUFS_BIT,
1981 					&rcd->user_event_mask[i]);
1982 		}
1983 		for (i = 0; !usable(rcd->ppd) && i < 300; i++)
1984 			msleep(100);
1985 		ret = -ENETDOWN;
1986 	}
1987 	return ret;
1988 }
1989 
1990 /*
1991  * Find all user contexts in use, and set the specified bit in their
1992  * event mask.
1993  * See also find_ctxt() for a similar use, that is specific to send buffers.
1994  */
qib_set_uevent_bits(struct qib_pportdata * ppd,const int evtbit)1995 int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit)
1996 {
1997 	struct qib_ctxtdata *rcd;
1998 	unsigned ctxt;
1999 	int ret = 0;
2000 	unsigned long flags;
2001 
2002 	spin_lock_irqsave(&ppd->dd->uctxt_lock, flags);
2003 	for (ctxt = ppd->dd->first_user_ctxt; ctxt < ppd->dd->cfgctxts;
2004 	     ctxt++) {
2005 		rcd = ppd->dd->rcd[ctxt];
2006 		if (!rcd)
2007 			continue;
2008 		if (rcd->user_event_mask) {
2009 			int i;
2010 			/*
2011 			 * subctxt_cnt is 0 if not shared, so do base
2012 			 * separately, first, then remaining subctxt, if any
2013 			 */
2014 			set_bit(evtbit, &rcd->user_event_mask[0]);
2015 			for (i = 1; i < rcd->subctxt_cnt; i++)
2016 				set_bit(evtbit, &rcd->user_event_mask[i]);
2017 		}
2018 		ret = 1;
2019 		break;
2020 	}
2021 	spin_unlock_irqrestore(&ppd->dd->uctxt_lock, flags);
2022 
2023 	return ret;
2024 }
2025 
2026 /*
2027  * clear the event notifier events for this context.
2028  * For the DISARM_BUFS case, we also take action (this obsoletes
2029  * the older QIB_CMD_DISARM_BUFS, but we keep it for backwards
2030  * compatibility.
2031  * Other bits don't currently require actions, just atomically clear.
2032  * User process then performs actions appropriate to bit having been
2033  * set, if desired, and checks again in future.
2034  */
qib_user_event_ack(struct qib_ctxtdata * rcd,int subctxt,unsigned long events)2035 static int qib_user_event_ack(struct qib_ctxtdata *rcd, int subctxt,
2036 			      unsigned long events)
2037 {
2038 	int ret = 0, i;
2039 
2040 	for (i = 0; i <= _QIB_MAX_EVENT_BIT; i++) {
2041 		if (!test_bit(i, &events))
2042 			continue;
2043 		if (i == _QIB_EVENT_DISARM_BUFS_BIT) {
2044 			(void)qib_disarm_piobufs_ifneeded(rcd);
2045 			ret = disarm_req_delay(rcd);
2046 		} else
2047 			clear_bit(i, &rcd->user_event_mask[subctxt]);
2048 	}
2049 	return ret;
2050 }
2051 
qib_write(struct file * fp,const char __user * data,size_t count,loff_t * off)2052 static ssize_t qib_write(struct file *fp, const char __user *data,
2053 			 size_t count, loff_t *off)
2054 {
2055 	const struct qib_cmd __user *ucmd;
2056 	struct qib_ctxtdata *rcd;
2057 	const void __user *src;
2058 	size_t consumed, copy = 0;
2059 	struct qib_cmd cmd;
2060 	ssize_t ret = 0;
2061 	void *dest;
2062 
2063 	if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
2064 		return -EACCES;
2065 
2066 	if (count < sizeof(cmd.type)) {
2067 		ret = -EINVAL;
2068 		goto bail;
2069 	}
2070 
2071 	ucmd = (const struct qib_cmd __user *) data;
2072 
2073 	if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) {
2074 		ret = -EFAULT;
2075 		goto bail;
2076 	}
2077 
2078 	consumed = sizeof(cmd.type);
2079 
2080 	switch (cmd.type) {
2081 	case QIB_CMD_ASSIGN_CTXT:
2082 	case QIB_CMD_USER_INIT:
2083 		copy = sizeof(cmd.cmd.user_info);
2084 		dest = &cmd.cmd.user_info;
2085 		src = &ucmd->cmd.user_info;
2086 		break;
2087 
2088 	case QIB_CMD_RECV_CTRL:
2089 		copy = sizeof(cmd.cmd.recv_ctrl);
2090 		dest = &cmd.cmd.recv_ctrl;
2091 		src = &ucmd->cmd.recv_ctrl;
2092 		break;
2093 
2094 	case QIB_CMD_CTXT_INFO:
2095 		copy = sizeof(cmd.cmd.ctxt_info);
2096 		dest = &cmd.cmd.ctxt_info;
2097 		src = &ucmd->cmd.ctxt_info;
2098 		break;
2099 
2100 	case QIB_CMD_TID_UPDATE:
2101 	case QIB_CMD_TID_FREE:
2102 		copy = sizeof(cmd.cmd.tid_info);
2103 		dest = &cmd.cmd.tid_info;
2104 		src = &ucmd->cmd.tid_info;
2105 		break;
2106 
2107 	case QIB_CMD_SET_PART_KEY:
2108 		copy = sizeof(cmd.cmd.part_key);
2109 		dest = &cmd.cmd.part_key;
2110 		src = &ucmd->cmd.part_key;
2111 		break;
2112 
2113 	case QIB_CMD_DISARM_BUFS:
2114 	case QIB_CMD_PIOAVAILUPD: /* force an update of PIOAvail reg */
2115 		copy = 0;
2116 		src = NULL;
2117 		dest = NULL;
2118 		break;
2119 
2120 	case QIB_CMD_POLL_TYPE:
2121 		copy = sizeof(cmd.cmd.poll_type);
2122 		dest = &cmd.cmd.poll_type;
2123 		src = &ucmd->cmd.poll_type;
2124 		break;
2125 
2126 	case QIB_CMD_ARMLAUNCH_CTRL:
2127 		copy = sizeof(cmd.cmd.armlaunch_ctrl);
2128 		dest = &cmd.cmd.armlaunch_ctrl;
2129 		src = &ucmd->cmd.armlaunch_ctrl;
2130 		break;
2131 
2132 	case QIB_CMD_SDMA_INFLIGHT:
2133 		copy = sizeof(cmd.cmd.sdma_inflight);
2134 		dest = &cmd.cmd.sdma_inflight;
2135 		src = &ucmd->cmd.sdma_inflight;
2136 		break;
2137 
2138 	case QIB_CMD_SDMA_COMPLETE:
2139 		copy = sizeof(cmd.cmd.sdma_complete);
2140 		dest = &cmd.cmd.sdma_complete;
2141 		src = &ucmd->cmd.sdma_complete;
2142 		break;
2143 
2144 	case QIB_CMD_ACK_EVENT:
2145 		copy = sizeof(cmd.cmd.event_mask);
2146 		dest = &cmd.cmd.event_mask;
2147 		src = &ucmd->cmd.event_mask;
2148 		break;
2149 
2150 	default:
2151 		ret = -EINVAL;
2152 		goto bail;
2153 	}
2154 
2155 	if (copy) {
2156 		if ((count - consumed) < copy) {
2157 			ret = -EINVAL;
2158 			goto bail;
2159 		}
2160 		if (copy_from_user(dest, src, copy)) {
2161 			ret = -EFAULT;
2162 			goto bail;
2163 		}
2164 		consumed += copy;
2165 	}
2166 
2167 	rcd = ctxt_fp(fp);
2168 	if (!rcd && cmd.type != QIB_CMD_ASSIGN_CTXT) {
2169 		ret = -EINVAL;
2170 		goto bail;
2171 	}
2172 
2173 	switch (cmd.type) {
2174 	case QIB_CMD_ASSIGN_CTXT:
2175 		ret = qib_assign_ctxt(fp, &cmd.cmd.user_info);
2176 		if (ret)
2177 			goto bail;
2178 		break;
2179 
2180 	case QIB_CMD_USER_INIT:
2181 		ret = qib_do_user_init(fp, &cmd.cmd.user_info);
2182 		if (ret)
2183 			goto bail;
2184 		ret = qib_get_base_info(fp, (void __user *) (unsigned long)
2185 					cmd.cmd.user_info.spu_base_info,
2186 					cmd.cmd.user_info.spu_base_info_size);
2187 		break;
2188 
2189 	case QIB_CMD_RECV_CTRL:
2190 		ret = qib_manage_rcvq(rcd, subctxt_fp(fp), cmd.cmd.recv_ctrl);
2191 		break;
2192 
2193 	case QIB_CMD_CTXT_INFO:
2194 		ret = qib_ctxt_info(fp, (struct qib_ctxt_info __user *)
2195 				    (unsigned long) cmd.cmd.ctxt_info);
2196 		break;
2197 
2198 	case QIB_CMD_TID_UPDATE:
2199 		ret = qib_tid_update(rcd, fp, &cmd.cmd.tid_info);
2200 		break;
2201 
2202 	case QIB_CMD_TID_FREE:
2203 		ret = qib_tid_free(rcd, subctxt_fp(fp), &cmd.cmd.tid_info);
2204 		break;
2205 
2206 	case QIB_CMD_SET_PART_KEY:
2207 		ret = qib_set_part_key(rcd, cmd.cmd.part_key);
2208 		break;
2209 
2210 	case QIB_CMD_DISARM_BUFS:
2211 		(void)qib_disarm_piobufs_ifneeded(rcd);
2212 		ret = disarm_req_delay(rcd);
2213 		break;
2214 
2215 	case QIB_CMD_PIOAVAILUPD:
2216 		qib_force_pio_avail_update(rcd->dd);
2217 		break;
2218 
2219 	case QIB_CMD_POLL_TYPE:
2220 		rcd->poll_type = cmd.cmd.poll_type;
2221 		break;
2222 
2223 	case QIB_CMD_ARMLAUNCH_CTRL:
2224 		rcd->dd->f_set_armlaunch(rcd->dd, cmd.cmd.armlaunch_ctrl);
2225 		break;
2226 
2227 	case QIB_CMD_SDMA_INFLIGHT:
2228 		ret = qib_sdma_get_inflight(user_sdma_queue_fp(fp),
2229 					    (u32 __user *) (unsigned long)
2230 					    cmd.cmd.sdma_inflight);
2231 		break;
2232 
2233 	case QIB_CMD_SDMA_COMPLETE:
2234 		ret = qib_sdma_get_complete(rcd->ppd,
2235 					    user_sdma_queue_fp(fp),
2236 					    (u32 __user *) (unsigned long)
2237 					    cmd.cmd.sdma_complete);
2238 		break;
2239 
2240 	case QIB_CMD_ACK_EVENT:
2241 		ret = qib_user_event_ack(rcd, subctxt_fp(fp),
2242 					 cmd.cmd.event_mask);
2243 		break;
2244 	}
2245 
2246 	if (ret >= 0)
2247 		ret = consumed;
2248 
2249 bail:
2250 	return ret;
2251 }
2252 
qib_aio_write(struct kiocb * iocb,const struct iovec * iov,unsigned long dim,loff_t off)2253 static ssize_t qib_aio_write(struct kiocb *iocb, const struct iovec *iov,
2254 			     unsigned long dim, loff_t off)
2255 {
2256 	struct qib_filedata *fp = iocb->ki_filp->private_data;
2257 	struct qib_ctxtdata *rcd = ctxt_fp(iocb->ki_filp);
2258 	struct qib_user_sdma_queue *pq = fp->pq;
2259 
2260 	if (!dim || !pq)
2261 		return -EINVAL;
2262 
2263 	return qib_user_sdma_writev(rcd, pq, iov, dim);
2264 }
2265 
2266 static struct class *qib_class;
2267 static dev_t qib_dev;
2268 
qib_cdev_init(int minor,const char * name,const struct file_operations * fops,struct cdev ** cdevp,struct device ** devp)2269 int qib_cdev_init(int minor, const char *name,
2270 		  const struct file_operations *fops,
2271 		  struct cdev **cdevp, struct device **devp)
2272 {
2273 	const dev_t dev = MKDEV(MAJOR(qib_dev), minor);
2274 	struct cdev *cdev;
2275 	struct device *device = NULL;
2276 	int ret;
2277 
2278 	cdev = cdev_alloc();
2279 	if (!cdev) {
2280 		pr_err("Could not allocate cdev for minor %d, %s\n",
2281 		       minor, name);
2282 		ret = -ENOMEM;
2283 		goto done;
2284 	}
2285 
2286 	cdev->owner = THIS_MODULE;
2287 	cdev->ops = fops;
2288 	kobject_set_name(&cdev->kobj, name);
2289 
2290 	ret = cdev_add(cdev, dev, 1);
2291 	if (ret < 0) {
2292 		pr_err("Could not add cdev for minor %d, %s (err %d)\n",
2293 		       minor, name, -ret);
2294 		goto err_cdev;
2295 	}
2296 
2297 	device = device_create(qib_class, NULL, dev, NULL, "%s", name);
2298 	if (!IS_ERR(device))
2299 		goto done;
2300 	ret = PTR_ERR(device);
2301 	device = NULL;
2302 	pr_err("Could not create device for minor %d, %s (err %d)\n",
2303 	       minor, name, -ret);
2304 err_cdev:
2305 	cdev_del(cdev);
2306 	cdev = NULL;
2307 done:
2308 	*cdevp = cdev;
2309 	*devp = device;
2310 	return ret;
2311 }
2312 
qib_cdev_cleanup(struct cdev ** cdevp,struct device ** devp)2313 void qib_cdev_cleanup(struct cdev **cdevp, struct device **devp)
2314 {
2315 	struct device *device = *devp;
2316 
2317 	if (device) {
2318 		device_unregister(device);
2319 		*devp = NULL;
2320 	}
2321 
2322 	if (*cdevp) {
2323 		cdev_del(*cdevp);
2324 		*cdevp = NULL;
2325 	}
2326 }
2327 
2328 static struct cdev *wildcard_cdev;
2329 static struct device *wildcard_device;
2330 
qib_dev_init(void)2331 int __init qib_dev_init(void)
2332 {
2333 	int ret;
2334 
2335 	ret = alloc_chrdev_region(&qib_dev, 0, QIB_NMINORS, QIB_DRV_NAME);
2336 	if (ret < 0) {
2337 		pr_err("Could not allocate chrdev region (err %d)\n", -ret);
2338 		goto done;
2339 	}
2340 
2341 	qib_class = class_create(THIS_MODULE, "ipath");
2342 	if (IS_ERR(qib_class)) {
2343 		ret = PTR_ERR(qib_class);
2344 		pr_err("Could not create device class (err %d)\n", -ret);
2345 		unregister_chrdev_region(qib_dev, QIB_NMINORS);
2346 	}
2347 
2348 done:
2349 	return ret;
2350 }
2351 
qib_dev_cleanup(void)2352 void qib_dev_cleanup(void)
2353 {
2354 	if (qib_class) {
2355 		class_destroy(qib_class);
2356 		qib_class = NULL;
2357 	}
2358 
2359 	unregister_chrdev_region(qib_dev, QIB_NMINORS);
2360 }
2361 
2362 static atomic_t user_count = ATOMIC_INIT(0);
2363 
qib_user_remove(struct qib_devdata * dd)2364 static void qib_user_remove(struct qib_devdata *dd)
2365 {
2366 	if (atomic_dec_return(&user_count) == 0)
2367 		qib_cdev_cleanup(&wildcard_cdev, &wildcard_device);
2368 
2369 	qib_cdev_cleanup(&dd->user_cdev, &dd->user_device);
2370 }
2371 
qib_user_add(struct qib_devdata * dd)2372 static int qib_user_add(struct qib_devdata *dd)
2373 {
2374 	char name[10];
2375 	int ret;
2376 
2377 	if (atomic_inc_return(&user_count) == 1) {
2378 		ret = qib_cdev_init(0, "ipath", &qib_file_ops,
2379 				    &wildcard_cdev, &wildcard_device);
2380 		if (ret)
2381 			goto done;
2382 	}
2383 
2384 	snprintf(name, sizeof(name), "ipath%d", dd->unit);
2385 	ret = qib_cdev_init(dd->unit + 1, name, &qib_file_ops,
2386 			    &dd->user_cdev, &dd->user_device);
2387 	if (ret)
2388 		qib_user_remove(dd);
2389 done:
2390 	return ret;
2391 }
2392 
2393 /*
2394  * Create per-unit files in /dev
2395  */
qib_device_create(struct qib_devdata * dd)2396 int qib_device_create(struct qib_devdata *dd)
2397 {
2398 	int r, ret;
2399 
2400 	r = qib_user_add(dd);
2401 	ret = qib_diag_add(dd);
2402 	if (r && !ret)
2403 		ret = r;
2404 	return ret;
2405 }
2406 
2407 /*
2408  * Remove per-unit files in /dev
2409  * void, core kernel returns no errors for this stuff
2410  */
qib_device_remove(struct qib_devdata * dd)2411 void qib_device_remove(struct qib_devdata *dd)
2412 {
2413 	qib_user_remove(dd);
2414 	qib_diag_remove(dd);
2415 }
2416