• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2012 Intel Corporation.  All rights reserved.
3  * Copyright (c) 2006 - 2012 QLogic Corporation.  * All rights reserved.
4  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/err.h>
36 #include <linux/vmalloc.h>
37 #include <linux/jhash.h>
38 
39 #include "qib.h"
40 
41 #define BITS_PER_PAGE           (PAGE_SIZE*BITS_PER_BYTE)
42 #define BITS_PER_PAGE_MASK      (BITS_PER_PAGE-1)
43 
mk_qpn(struct qib_qpn_table * qpt,struct qpn_map * map,unsigned off)44 static inline unsigned mk_qpn(struct qib_qpn_table *qpt,
45 			      struct qpn_map *map, unsigned off)
46 {
47 	return (map - qpt->map) * BITS_PER_PAGE + off;
48 }
49 
find_next_offset(struct qib_qpn_table * qpt,struct qpn_map * map,unsigned off,unsigned n)50 static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
51 					struct qpn_map *map, unsigned off,
52 					unsigned n)
53 {
54 	if (qpt->mask) {
55 		off++;
56 		if (((off & qpt->mask) >> 1) >= n)
57 			off = (off | qpt->mask) + 2;
58 	} else
59 		off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
60 	return off;
61 }
62 
63 /*
64  * Convert the AETH credit code into the number of credits.
65  */
66 static u32 credit_table[31] = {
67 	0,                      /* 0 */
68 	1,                      /* 1 */
69 	2,                      /* 2 */
70 	3,                      /* 3 */
71 	4,                      /* 4 */
72 	6,                      /* 5 */
73 	8,                      /* 6 */
74 	12,                     /* 7 */
75 	16,                     /* 8 */
76 	24,                     /* 9 */
77 	32,                     /* A */
78 	48,                     /* B */
79 	64,                     /* C */
80 	96,                     /* D */
81 	128,                    /* E */
82 	192,                    /* F */
83 	256,                    /* 10 */
84 	384,                    /* 11 */
85 	512,                    /* 12 */
86 	768,                    /* 13 */
87 	1024,                   /* 14 */
88 	1536,                   /* 15 */
89 	2048,                   /* 16 */
90 	3072,                   /* 17 */
91 	4096,                   /* 18 */
92 	6144,                   /* 19 */
93 	8192,                   /* 1A */
94 	12288,                  /* 1B */
95 	16384,                  /* 1C */
96 	24576,                  /* 1D */
97 	32768                   /* 1E */
98 };
99 
get_map_page(struct qib_qpn_table * qpt,struct qpn_map * map)100 static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
101 {
102 	unsigned long page = get_zeroed_page(GFP_KERNEL);
103 
104 	/*
105 	 * Free the page if someone raced with us installing it.
106 	 */
107 
108 	spin_lock(&qpt->lock);
109 	if (map->page)
110 		free_page(page);
111 	else
112 		map->page = (void *)page;
113 	spin_unlock(&qpt->lock);
114 }
115 
116 /*
117  * Allocate the next available QPN or
118  * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
119  */
alloc_qpn(struct qib_devdata * dd,struct qib_qpn_table * qpt,enum ib_qp_type type,u8 port)120 static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
121 		     enum ib_qp_type type, u8 port)
122 {
123 	u32 i, offset, max_scan, qpn;
124 	struct qpn_map *map;
125 	u32 ret;
126 
127 	if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
128 		unsigned n;
129 
130 		ret = type == IB_QPT_GSI;
131 		n = 1 << (ret + 2 * (port - 1));
132 		spin_lock(&qpt->lock);
133 		if (qpt->flags & n)
134 			ret = -EINVAL;
135 		else
136 			qpt->flags |= n;
137 		spin_unlock(&qpt->lock);
138 		goto bail;
139 	}
140 
141 	qpn = qpt->last + 2;
142 	if (qpn >= QPN_MAX)
143 		qpn = 2;
144 	if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues)
145 		qpn = (qpn | qpt->mask) + 2;
146 	offset = qpn & BITS_PER_PAGE_MASK;
147 	map = &qpt->map[qpn / BITS_PER_PAGE];
148 	max_scan = qpt->nmaps - !offset;
149 	for (i = 0;;) {
150 		if (unlikely(!map->page)) {
151 			get_map_page(qpt, map);
152 			if (unlikely(!map->page))
153 				break;
154 		}
155 		do {
156 			if (!test_and_set_bit(offset, map->page)) {
157 				qpt->last = qpn;
158 				ret = qpn;
159 				goto bail;
160 			}
161 			offset = find_next_offset(qpt, map, offset,
162 				dd->n_krcv_queues);
163 			qpn = mk_qpn(qpt, map, offset);
164 			/*
165 			 * This test differs from alloc_pidmap().
166 			 * If find_next_offset() does find a zero
167 			 * bit, we don't need to check for QPN
168 			 * wrapping around past our starting QPN.
169 			 * We just need to be sure we don't loop
170 			 * forever.
171 			 */
172 		} while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
173 		/*
174 		 * In order to keep the number of pages allocated to a
175 		 * minimum, we scan the all existing pages before increasing
176 		 * the size of the bitmap table.
177 		 */
178 		if (++i > max_scan) {
179 			if (qpt->nmaps == QPNMAP_ENTRIES)
180 				break;
181 			map = &qpt->map[qpt->nmaps++];
182 			offset = 0;
183 		} else if (map < &qpt->map[qpt->nmaps]) {
184 			++map;
185 			offset = 0;
186 		} else {
187 			map = &qpt->map[0];
188 			offset = 2;
189 		}
190 		qpn = mk_qpn(qpt, map, offset);
191 	}
192 
193 	ret = -ENOMEM;
194 
195 bail:
196 	return ret;
197 }
198 
free_qpn(struct qib_qpn_table * qpt,u32 qpn)199 static void free_qpn(struct qib_qpn_table *qpt, u32 qpn)
200 {
201 	struct qpn_map *map;
202 
203 	map = qpt->map + qpn / BITS_PER_PAGE;
204 	if (map->page)
205 		clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
206 }
207 
qpn_hash(struct qib_ibdev * dev,u32 qpn)208 static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn)
209 {
210 	return jhash_1word(qpn, dev->qp_rnd) &
211 		(dev->qp_table_size - 1);
212 }
213 
214 
215 /*
216  * Put the QP into the hash table.
217  * The hash table holds a reference to the QP.
218  */
insert_qp(struct qib_ibdev * dev,struct qib_qp * qp)219 static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)
220 {
221 	struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
222 	unsigned long flags;
223 	unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
224 
225 	spin_lock_irqsave(&dev->qpt_lock, flags);
226 	atomic_inc(&qp->refcount);
227 
228 	if (qp->ibqp.qp_num == 0)
229 		rcu_assign_pointer(ibp->qp0, qp);
230 	else if (qp->ibqp.qp_num == 1)
231 		rcu_assign_pointer(ibp->qp1, qp);
232 	else {
233 		qp->next = dev->qp_table[n];
234 		rcu_assign_pointer(dev->qp_table[n], qp);
235 	}
236 
237 	spin_unlock_irqrestore(&dev->qpt_lock, flags);
238 	synchronize_rcu();
239 }
240 
241 /*
242  * Remove the QP from the table so it can't be found asynchronously by
243  * the receive interrupt routine.
244  */
remove_qp(struct qib_ibdev * dev,struct qib_qp * qp)245 static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
246 {
247 	struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
248 	unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
249 	unsigned long flags;
250 
251 	spin_lock_irqsave(&dev->qpt_lock, flags);
252 
253 	if (rcu_dereference_protected(ibp->qp0,
254 			lockdep_is_held(&dev->qpt_lock)) == qp) {
255 		atomic_dec(&qp->refcount);
256 		rcu_assign_pointer(ibp->qp0, NULL);
257 	} else if (rcu_dereference_protected(ibp->qp1,
258 			lockdep_is_held(&dev->qpt_lock)) == qp) {
259 		atomic_dec(&qp->refcount);
260 		rcu_assign_pointer(ibp->qp1, NULL);
261 	} else {
262 		struct qib_qp *q;
263 		struct qib_qp __rcu **qpp;
264 
265 		qpp = &dev->qp_table[n];
266 		for (; (q = rcu_dereference_protected(*qpp,
267 				lockdep_is_held(&dev->qpt_lock))) != NULL;
268 				qpp = &q->next)
269 			if (q == qp) {
270 				atomic_dec(&qp->refcount);
271 				rcu_assign_pointer(*qpp,
272 					rcu_dereference_protected(qp->next,
273 					 lockdep_is_held(&dev->qpt_lock)));
274 				break;
275 			}
276 	}
277 
278 	spin_unlock_irqrestore(&dev->qpt_lock, flags);
279 	synchronize_rcu();
280 }
281 
282 /**
283  * qib_free_all_qps - check for QPs still in use
284  * @qpt: the QP table to empty
285  *
286  * There should not be any QPs still in use.
287  * Free memory for table.
288  */
qib_free_all_qps(struct qib_devdata * dd)289 unsigned qib_free_all_qps(struct qib_devdata *dd)
290 {
291 	struct qib_ibdev *dev = &dd->verbs_dev;
292 	unsigned long flags;
293 	struct qib_qp *qp;
294 	unsigned n, qp_inuse = 0;
295 
296 	for (n = 0; n < dd->num_pports; n++) {
297 		struct qib_ibport *ibp = &dd->pport[n].ibport_data;
298 
299 		if (!qib_mcast_tree_empty(ibp))
300 			qp_inuse++;
301 		rcu_read_lock();
302 		if (rcu_dereference(ibp->qp0))
303 			qp_inuse++;
304 		if (rcu_dereference(ibp->qp1))
305 			qp_inuse++;
306 		rcu_read_unlock();
307 	}
308 
309 	spin_lock_irqsave(&dev->qpt_lock, flags);
310 	for (n = 0; n < dev->qp_table_size; n++) {
311 		qp = rcu_dereference_protected(dev->qp_table[n],
312 			lockdep_is_held(&dev->qpt_lock));
313 		rcu_assign_pointer(dev->qp_table[n], NULL);
314 
315 		for (; qp; qp = rcu_dereference_protected(qp->next,
316 					lockdep_is_held(&dev->qpt_lock)))
317 			qp_inuse++;
318 	}
319 	spin_unlock_irqrestore(&dev->qpt_lock, flags);
320 	synchronize_rcu();
321 
322 	return qp_inuse;
323 }
324 
325 /**
326  * qib_lookup_qpn - return the QP with the given QPN
327  * @qpt: the QP table
328  * @qpn: the QP number to look up
329  *
330  * The caller is responsible for decrementing the QP reference count
331  * when done.
332  */
qib_lookup_qpn(struct qib_ibport * ibp,u32 qpn)333 struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
334 {
335 	struct qib_qp *qp = NULL;
336 
337 	if (unlikely(qpn <= 1)) {
338 		rcu_read_lock();
339 		if (qpn == 0)
340 			qp = rcu_dereference(ibp->qp0);
341 		else
342 			qp = rcu_dereference(ibp->qp1);
343 	} else {
344 		struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
345 		unsigned n = qpn_hash(dev, qpn);
346 
347 		rcu_read_lock();
348 		for (qp = rcu_dereference(dev->qp_table[n]); qp;
349 			qp = rcu_dereference(qp->next))
350 			if (qp->ibqp.qp_num == qpn)
351 				break;
352 	}
353 	if (qp)
354 		if (unlikely(!atomic_inc_not_zero(&qp->refcount)))
355 			qp = NULL;
356 
357 	rcu_read_unlock();
358 	return qp;
359 }
360 
361 /**
362  * qib_reset_qp - initialize the QP state to the reset state
363  * @qp: the QP to reset
364  * @type: the QP type
365  */
qib_reset_qp(struct qib_qp * qp,enum ib_qp_type type)366 static void qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type)
367 {
368 	qp->remote_qpn = 0;
369 	qp->qkey = 0;
370 	qp->qp_access_flags = 0;
371 	atomic_set(&qp->s_dma_busy, 0);
372 	qp->s_flags &= QIB_S_SIGNAL_REQ_WR;
373 	qp->s_hdrwords = 0;
374 	qp->s_wqe = NULL;
375 	qp->s_draining = 0;
376 	qp->s_next_psn = 0;
377 	qp->s_last_psn = 0;
378 	qp->s_sending_psn = 0;
379 	qp->s_sending_hpsn = 0;
380 	qp->s_psn = 0;
381 	qp->r_psn = 0;
382 	qp->r_msn = 0;
383 	if (type == IB_QPT_RC) {
384 		qp->s_state = IB_OPCODE_RC_SEND_LAST;
385 		qp->r_state = IB_OPCODE_RC_SEND_LAST;
386 	} else {
387 		qp->s_state = IB_OPCODE_UC_SEND_LAST;
388 		qp->r_state = IB_OPCODE_UC_SEND_LAST;
389 	}
390 	qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
391 	qp->r_nak_state = 0;
392 	qp->r_aflags = 0;
393 	qp->r_flags = 0;
394 	qp->s_head = 0;
395 	qp->s_tail = 0;
396 	qp->s_cur = 0;
397 	qp->s_acked = 0;
398 	qp->s_last = 0;
399 	qp->s_ssn = 1;
400 	qp->s_lsn = 0;
401 	qp->s_mig_state = IB_MIG_MIGRATED;
402 	memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
403 	qp->r_head_ack_queue = 0;
404 	qp->s_tail_ack_queue = 0;
405 	qp->s_num_rd_atomic = 0;
406 	if (qp->r_rq.wq) {
407 		qp->r_rq.wq->head = 0;
408 		qp->r_rq.wq->tail = 0;
409 	}
410 	qp->r_sge.num_sge = 0;
411 }
412 
clear_mr_refs(struct qib_qp * qp,int clr_sends)413 static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
414 {
415 	unsigned n;
416 
417 	if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
418 		qib_put_ss(&qp->s_rdma_read_sge);
419 
420 	qib_put_ss(&qp->r_sge);
421 
422 	if (clr_sends) {
423 		while (qp->s_last != qp->s_head) {
424 			struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
425 			unsigned i;
426 
427 			for (i = 0; i < wqe->wr.num_sge; i++) {
428 				struct qib_sge *sge = &wqe->sg_list[i];
429 
430 				qib_put_mr(sge->mr);
431 			}
432 			if (qp->ibqp.qp_type == IB_QPT_UD ||
433 			    qp->ibqp.qp_type == IB_QPT_SMI ||
434 			    qp->ibqp.qp_type == IB_QPT_GSI)
435 				atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount);
436 			if (++qp->s_last >= qp->s_size)
437 				qp->s_last = 0;
438 		}
439 		if (qp->s_rdma_mr) {
440 			qib_put_mr(qp->s_rdma_mr);
441 			qp->s_rdma_mr = NULL;
442 		}
443 	}
444 
445 	if (qp->ibqp.qp_type != IB_QPT_RC)
446 		return;
447 
448 	for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
449 		struct qib_ack_entry *e = &qp->s_ack_queue[n];
450 
451 		if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
452 		    e->rdma_sge.mr) {
453 			qib_put_mr(e->rdma_sge.mr);
454 			e->rdma_sge.mr = NULL;
455 		}
456 	}
457 }
458 
459 /**
460  * qib_error_qp - put a QP into the error state
461  * @qp: the QP to put into the error state
462  * @err: the receive completion error to signal if a RWQE is active
463  *
464  * Flushes both send and receive work queues.
465  * Returns true if last WQE event should be generated.
466  * The QP r_lock and s_lock should be held and interrupts disabled.
467  * If we are already in error state, just return.
468  */
qib_error_qp(struct qib_qp * qp,enum ib_wc_status err)469 int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
470 {
471 	struct qib_ibdev *dev = to_idev(qp->ibqp.device);
472 	struct ib_wc wc;
473 	int ret = 0;
474 
475 	if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
476 		goto bail;
477 
478 	qp->state = IB_QPS_ERR;
479 
480 	if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
481 		qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
482 		del_timer(&qp->s_timer);
483 	}
484 
485 	if (qp->s_flags & QIB_S_ANY_WAIT_SEND)
486 		qp->s_flags &= ~QIB_S_ANY_WAIT_SEND;
487 
488 	spin_lock(&dev->pending_lock);
489 	if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
490 		qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
491 		list_del_init(&qp->iowait);
492 	}
493 	spin_unlock(&dev->pending_lock);
494 
495 	if (!(qp->s_flags & QIB_S_BUSY)) {
496 		qp->s_hdrwords = 0;
497 		if (qp->s_rdma_mr) {
498 			qib_put_mr(qp->s_rdma_mr);
499 			qp->s_rdma_mr = NULL;
500 		}
501 		if (qp->s_tx) {
502 			qib_put_txreq(qp->s_tx);
503 			qp->s_tx = NULL;
504 		}
505 	}
506 
507 	/* Schedule the sending tasklet to drain the send work queue. */
508 	if (qp->s_last != qp->s_head)
509 		qib_schedule_send(qp);
510 
511 	clear_mr_refs(qp, 0);
512 
513 	memset(&wc, 0, sizeof(wc));
514 	wc.qp = &qp->ibqp;
515 	wc.opcode = IB_WC_RECV;
516 
517 	if (test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) {
518 		wc.wr_id = qp->r_wr_id;
519 		wc.status = err;
520 		qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
521 	}
522 	wc.status = IB_WC_WR_FLUSH_ERR;
523 
524 	if (qp->r_rq.wq) {
525 		struct qib_rwq *wq;
526 		u32 head;
527 		u32 tail;
528 
529 		spin_lock(&qp->r_rq.lock);
530 
531 		/* sanity check pointers before trusting them */
532 		wq = qp->r_rq.wq;
533 		head = wq->head;
534 		if (head >= qp->r_rq.size)
535 			head = 0;
536 		tail = wq->tail;
537 		if (tail >= qp->r_rq.size)
538 			tail = 0;
539 		while (tail != head) {
540 			wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
541 			if (++tail >= qp->r_rq.size)
542 				tail = 0;
543 			qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
544 		}
545 		wq->tail = tail;
546 
547 		spin_unlock(&qp->r_rq.lock);
548 	} else if (qp->ibqp.event_handler)
549 		ret = 1;
550 
551 bail:
552 	return ret;
553 }
554 
555 /**
556  * qib_modify_qp - modify the attributes of a queue pair
557  * @ibqp: the queue pair who's attributes we're modifying
558  * @attr: the new attributes
559  * @attr_mask: the mask of attributes to modify
560  * @udata: user data for libibverbs.so
561  *
562  * Returns 0 on success, otherwise returns an errno.
563  */
qib_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)564 int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
565 		  int attr_mask, struct ib_udata *udata)
566 {
567 	struct qib_ibdev *dev = to_idev(ibqp->device);
568 	struct qib_qp *qp = to_iqp(ibqp);
569 	enum ib_qp_state cur_state, new_state;
570 	struct ib_event ev;
571 	int lastwqe = 0;
572 	int mig = 0;
573 	int ret;
574 	u32 pmtu = 0; /* for gcc warning only */
575 
576 	spin_lock_irq(&qp->r_lock);
577 	spin_lock(&qp->s_lock);
578 
579 	cur_state = attr_mask & IB_QP_CUR_STATE ?
580 		attr->cur_qp_state : qp->state;
581 	new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
582 
583 	if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
584 				attr_mask))
585 		goto inval;
586 
587 	if (attr_mask & IB_QP_AV) {
588 		if (attr->ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
589 			goto inval;
590 		if (qib_check_ah(qp->ibqp.device, &attr->ah_attr))
591 			goto inval;
592 	}
593 
594 	if (attr_mask & IB_QP_ALT_PATH) {
595 		if (attr->alt_ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
596 			goto inval;
597 		if (qib_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
598 			goto inval;
599 		if (attr->alt_pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
600 			goto inval;
601 	}
602 
603 	if (attr_mask & IB_QP_PKEY_INDEX)
604 		if (attr->pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
605 			goto inval;
606 
607 	if (attr_mask & IB_QP_MIN_RNR_TIMER)
608 		if (attr->min_rnr_timer > 31)
609 			goto inval;
610 
611 	if (attr_mask & IB_QP_PORT)
612 		if (qp->ibqp.qp_type == IB_QPT_SMI ||
613 		    qp->ibqp.qp_type == IB_QPT_GSI ||
614 		    attr->port_num == 0 ||
615 		    attr->port_num > ibqp->device->phys_port_cnt)
616 			goto inval;
617 
618 	if (attr_mask & IB_QP_DEST_QPN)
619 		if (attr->dest_qp_num > QIB_QPN_MASK)
620 			goto inval;
621 
622 	if (attr_mask & IB_QP_RETRY_CNT)
623 		if (attr->retry_cnt > 7)
624 			goto inval;
625 
626 	if (attr_mask & IB_QP_RNR_RETRY)
627 		if (attr->rnr_retry > 7)
628 			goto inval;
629 
630 	/*
631 	 * Don't allow invalid path_mtu values.  OK to set greater
632 	 * than the active mtu (or even the max_cap, if we have tuned
633 	 * that to a small mtu.  We'll set qp->path_mtu
634 	 * to the lesser of requested attribute mtu and active,
635 	 * for packetizing messages.
636 	 * Note that the QP port has to be set in INIT and MTU in RTR.
637 	 */
638 	if (attr_mask & IB_QP_PATH_MTU) {
639 		struct qib_devdata *dd = dd_from_dev(dev);
640 		int mtu, pidx = qp->port_num - 1;
641 
642 		mtu = ib_mtu_enum_to_int(attr->path_mtu);
643 		if (mtu == -1)
644 			goto inval;
645 		if (mtu > dd->pport[pidx].ibmtu) {
646 			switch (dd->pport[pidx].ibmtu) {
647 			case 4096:
648 				pmtu = IB_MTU_4096;
649 				break;
650 			case 2048:
651 				pmtu = IB_MTU_2048;
652 				break;
653 			case 1024:
654 				pmtu = IB_MTU_1024;
655 				break;
656 			case 512:
657 				pmtu = IB_MTU_512;
658 				break;
659 			case 256:
660 				pmtu = IB_MTU_256;
661 				break;
662 			default:
663 				pmtu = IB_MTU_2048;
664 			}
665 		} else
666 			pmtu = attr->path_mtu;
667 	}
668 
669 	if (attr_mask & IB_QP_PATH_MIG_STATE) {
670 		if (attr->path_mig_state == IB_MIG_REARM) {
671 			if (qp->s_mig_state == IB_MIG_ARMED)
672 				goto inval;
673 			if (new_state != IB_QPS_RTS)
674 				goto inval;
675 		} else if (attr->path_mig_state == IB_MIG_MIGRATED) {
676 			if (qp->s_mig_state == IB_MIG_REARM)
677 				goto inval;
678 			if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
679 				goto inval;
680 			if (qp->s_mig_state == IB_MIG_ARMED)
681 				mig = 1;
682 		} else
683 			goto inval;
684 	}
685 
686 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
687 		if (attr->max_dest_rd_atomic > QIB_MAX_RDMA_ATOMIC)
688 			goto inval;
689 
690 	switch (new_state) {
691 	case IB_QPS_RESET:
692 		if (qp->state != IB_QPS_RESET) {
693 			qp->state = IB_QPS_RESET;
694 			spin_lock(&dev->pending_lock);
695 			if (!list_empty(&qp->iowait))
696 				list_del_init(&qp->iowait);
697 			spin_unlock(&dev->pending_lock);
698 			qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
699 			spin_unlock(&qp->s_lock);
700 			spin_unlock_irq(&qp->r_lock);
701 			/* Stop the sending work queue and retry timer */
702 			cancel_work_sync(&qp->s_work);
703 			del_timer_sync(&qp->s_timer);
704 			wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
705 			if (qp->s_tx) {
706 				qib_put_txreq(qp->s_tx);
707 				qp->s_tx = NULL;
708 			}
709 			remove_qp(dev, qp);
710 			wait_event(qp->wait, !atomic_read(&qp->refcount));
711 			spin_lock_irq(&qp->r_lock);
712 			spin_lock(&qp->s_lock);
713 			clear_mr_refs(qp, 1);
714 			qib_reset_qp(qp, ibqp->qp_type);
715 		}
716 		break;
717 
718 	case IB_QPS_RTR:
719 		/* Allow event to retrigger if QP set to RTR more than once */
720 		qp->r_flags &= ~QIB_R_COMM_EST;
721 		qp->state = new_state;
722 		break;
723 
724 	case IB_QPS_SQD:
725 		qp->s_draining = qp->s_last != qp->s_cur;
726 		qp->state = new_state;
727 		break;
728 
729 	case IB_QPS_SQE:
730 		if (qp->ibqp.qp_type == IB_QPT_RC)
731 			goto inval;
732 		qp->state = new_state;
733 		break;
734 
735 	case IB_QPS_ERR:
736 		lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
737 		break;
738 
739 	default:
740 		qp->state = new_state;
741 		break;
742 	}
743 
744 	if (attr_mask & IB_QP_PKEY_INDEX)
745 		qp->s_pkey_index = attr->pkey_index;
746 
747 	if (attr_mask & IB_QP_PORT)
748 		qp->port_num = attr->port_num;
749 
750 	if (attr_mask & IB_QP_DEST_QPN)
751 		qp->remote_qpn = attr->dest_qp_num;
752 
753 	if (attr_mask & IB_QP_SQ_PSN) {
754 		qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK;
755 		qp->s_psn = qp->s_next_psn;
756 		qp->s_sending_psn = qp->s_next_psn;
757 		qp->s_last_psn = qp->s_next_psn - 1;
758 		qp->s_sending_hpsn = qp->s_last_psn;
759 	}
760 
761 	if (attr_mask & IB_QP_RQ_PSN)
762 		qp->r_psn = attr->rq_psn & QIB_PSN_MASK;
763 
764 	if (attr_mask & IB_QP_ACCESS_FLAGS)
765 		qp->qp_access_flags = attr->qp_access_flags;
766 
767 	if (attr_mask & IB_QP_AV) {
768 		qp->remote_ah_attr = attr->ah_attr;
769 		qp->s_srate = attr->ah_attr.static_rate;
770 	}
771 
772 	if (attr_mask & IB_QP_ALT_PATH) {
773 		qp->alt_ah_attr = attr->alt_ah_attr;
774 		qp->s_alt_pkey_index = attr->alt_pkey_index;
775 	}
776 
777 	if (attr_mask & IB_QP_PATH_MIG_STATE) {
778 		qp->s_mig_state = attr->path_mig_state;
779 		if (mig) {
780 			qp->remote_ah_attr = qp->alt_ah_attr;
781 			qp->port_num = qp->alt_ah_attr.port_num;
782 			qp->s_pkey_index = qp->s_alt_pkey_index;
783 		}
784 	}
785 
786 	if (attr_mask & IB_QP_PATH_MTU) {
787 		qp->path_mtu = pmtu;
788 		qp->pmtu = ib_mtu_enum_to_int(pmtu);
789 	}
790 
791 	if (attr_mask & IB_QP_RETRY_CNT) {
792 		qp->s_retry_cnt = attr->retry_cnt;
793 		qp->s_retry = attr->retry_cnt;
794 	}
795 
796 	if (attr_mask & IB_QP_RNR_RETRY) {
797 		qp->s_rnr_retry_cnt = attr->rnr_retry;
798 		qp->s_rnr_retry = attr->rnr_retry;
799 	}
800 
801 	if (attr_mask & IB_QP_MIN_RNR_TIMER)
802 		qp->r_min_rnr_timer = attr->min_rnr_timer;
803 
804 	if (attr_mask & IB_QP_TIMEOUT) {
805 		qp->timeout = attr->timeout;
806 		qp->timeout_jiffies =
807 			usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
808 				1000UL);
809 	}
810 
811 	if (attr_mask & IB_QP_QKEY)
812 		qp->qkey = attr->qkey;
813 
814 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
815 		qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
816 
817 	if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
818 		qp->s_max_rd_atomic = attr->max_rd_atomic;
819 
820 	spin_unlock(&qp->s_lock);
821 	spin_unlock_irq(&qp->r_lock);
822 
823 	if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
824 		insert_qp(dev, qp);
825 
826 	if (lastwqe) {
827 		ev.device = qp->ibqp.device;
828 		ev.element.qp = &qp->ibqp;
829 		ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
830 		qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
831 	}
832 	if (mig) {
833 		ev.device = qp->ibqp.device;
834 		ev.element.qp = &qp->ibqp;
835 		ev.event = IB_EVENT_PATH_MIG;
836 		qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
837 	}
838 	ret = 0;
839 	goto bail;
840 
841 inval:
842 	spin_unlock(&qp->s_lock);
843 	spin_unlock_irq(&qp->r_lock);
844 	ret = -EINVAL;
845 
846 bail:
847 	return ret;
848 }
849 
qib_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_qp_init_attr * init_attr)850 int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
851 		 int attr_mask, struct ib_qp_init_attr *init_attr)
852 {
853 	struct qib_qp *qp = to_iqp(ibqp);
854 
855 	attr->qp_state = qp->state;
856 	attr->cur_qp_state = attr->qp_state;
857 	attr->path_mtu = qp->path_mtu;
858 	attr->path_mig_state = qp->s_mig_state;
859 	attr->qkey = qp->qkey;
860 	attr->rq_psn = qp->r_psn & QIB_PSN_MASK;
861 	attr->sq_psn = qp->s_next_psn & QIB_PSN_MASK;
862 	attr->dest_qp_num = qp->remote_qpn;
863 	attr->qp_access_flags = qp->qp_access_flags;
864 	attr->cap.max_send_wr = qp->s_size - 1;
865 	attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
866 	attr->cap.max_send_sge = qp->s_max_sge;
867 	attr->cap.max_recv_sge = qp->r_rq.max_sge;
868 	attr->cap.max_inline_data = 0;
869 	attr->ah_attr = qp->remote_ah_attr;
870 	attr->alt_ah_attr = qp->alt_ah_attr;
871 	attr->pkey_index = qp->s_pkey_index;
872 	attr->alt_pkey_index = qp->s_alt_pkey_index;
873 	attr->en_sqd_async_notify = 0;
874 	attr->sq_draining = qp->s_draining;
875 	attr->max_rd_atomic = qp->s_max_rd_atomic;
876 	attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
877 	attr->min_rnr_timer = qp->r_min_rnr_timer;
878 	attr->port_num = qp->port_num;
879 	attr->timeout = qp->timeout;
880 	attr->retry_cnt = qp->s_retry_cnt;
881 	attr->rnr_retry = qp->s_rnr_retry_cnt;
882 	attr->alt_port_num = qp->alt_ah_attr.port_num;
883 	attr->alt_timeout = qp->alt_timeout;
884 
885 	init_attr->event_handler = qp->ibqp.event_handler;
886 	init_attr->qp_context = qp->ibqp.qp_context;
887 	init_attr->send_cq = qp->ibqp.send_cq;
888 	init_attr->recv_cq = qp->ibqp.recv_cq;
889 	init_attr->srq = qp->ibqp.srq;
890 	init_attr->cap = attr->cap;
891 	if (qp->s_flags & QIB_S_SIGNAL_REQ_WR)
892 		init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
893 	else
894 		init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
895 	init_attr->qp_type = qp->ibqp.qp_type;
896 	init_attr->port_num = qp->port_num;
897 	return 0;
898 }
899 
900 /**
901  * qib_compute_aeth - compute the AETH (syndrome + MSN)
902  * @qp: the queue pair to compute the AETH for
903  *
904  * Returns the AETH.
905  */
qib_compute_aeth(struct qib_qp * qp)906 __be32 qib_compute_aeth(struct qib_qp *qp)
907 {
908 	u32 aeth = qp->r_msn & QIB_MSN_MASK;
909 
910 	if (qp->ibqp.srq) {
911 		/*
912 		 * Shared receive queues don't generate credits.
913 		 * Set the credit field to the invalid value.
914 		 */
915 		aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT;
916 	} else {
917 		u32 min, max, x;
918 		u32 credits;
919 		struct qib_rwq *wq = qp->r_rq.wq;
920 		u32 head;
921 		u32 tail;
922 
923 		/* sanity check pointers before trusting them */
924 		head = wq->head;
925 		if (head >= qp->r_rq.size)
926 			head = 0;
927 		tail = wq->tail;
928 		if (tail >= qp->r_rq.size)
929 			tail = 0;
930 		/*
931 		 * Compute the number of credits available (RWQEs).
932 		 * XXX Not holding the r_rq.lock here so there is a small
933 		 * chance that the pair of reads are not atomic.
934 		 */
935 		credits = head - tail;
936 		if ((int)credits < 0)
937 			credits += qp->r_rq.size;
938 		/*
939 		 * Binary search the credit table to find the code to
940 		 * use.
941 		 */
942 		min = 0;
943 		max = 31;
944 		for (;;) {
945 			x = (min + max) / 2;
946 			if (credit_table[x] == credits)
947 				break;
948 			if (credit_table[x] > credits)
949 				max = x;
950 			else if (min == x)
951 				break;
952 			else
953 				min = x;
954 		}
955 		aeth |= x << QIB_AETH_CREDIT_SHIFT;
956 	}
957 	return cpu_to_be32(aeth);
958 }
959 
960 /**
961  * qib_create_qp - create a queue pair for a device
962  * @ibpd: the protection domain who's device we create the queue pair for
963  * @init_attr: the attributes of the queue pair
964  * @udata: user data for libibverbs.so
965  *
966  * Returns the queue pair on success, otherwise returns an errno.
967  *
968  * Called by the ib_create_qp() core verbs function.
969  */
qib_create_qp(struct ib_pd * ibpd,struct ib_qp_init_attr * init_attr,struct ib_udata * udata)970 struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
971 			    struct ib_qp_init_attr *init_attr,
972 			    struct ib_udata *udata)
973 {
974 	struct qib_qp *qp;
975 	int err;
976 	struct qib_swqe *swq = NULL;
977 	struct qib_ibdev *dev;
978 	struct qib_devdata *dd;
979 	size_t sz;
980 	size_t sg_list_sz;
981 	struct ib_qp *ret;
982 
983 	if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
984 	    init_attr->cap.max_send_wr > ib_qib_max_qp_wrs) {
985 		ret = ERR_PTR(-EINVAL);
986 		goto bail;
987 	}
988 
989 	/* Check receive queue parameters if no SRQ is specified. */
990 	if (!init_attr->srq) {
991 		if (init_attr->cap.max_recv_sge > ib_qib_max_sges ||
992 		    init_attr->cap.max_recv_wr > ib_qib_max_qp_wrs) {
993 			ret = ERR_PTR(-EINVAL);
994 			goto bail;
995 		}
996 		if (init_attr->cap.max_send_sge +
997 		    init_attr->cap.max_send_wr +
998 		    init_attr->cap.max_recv_sge +
999 		    init_attr->cap.max_recv_wr == 0) {
1000 			ret = ERR_PTR(-EINVAL);
1001 			goto bail;
1002 		}
1003 	}
1004 
1005 	switch (init_attr->qp_type) {
1006 	case IB_QPT_SMI:
1007 	case IB_QPT_GSI:
1008 		if (init_attr->port_num == 0 ||
1009 		    init_attr->port_num > ibpd->device->phys_port_cnt) {
1010 			ret = ERR_PTR(-EINVAL);
1011 			goto bail;
1012 		}
1013 	case IB_QPT_UC:
1014 	case IB_QPT_RC:
1015 	case IB_QPT_UD:
1016 		sz = sizeof(struct qib_sge) *
1017 			init_attr->cap.max_send_sge +
1018 			sizeof(struct qib_swqe);
1019 		swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
1020 		if (swq == NULL) {
1021 			ret = ERR_PTR(-ENOMEM);
1022 			goto bail;
1023 		}
1024 		sz = sizeof(*qp);
1025 		sg_list_sz = 0;
1026 		if (init_attr->srq) {
1027 			struct qib_srq *srq = to_isrq(init_attr->srq);
1028 
1029 			if (srq->rq.max_sge > 1)
1030 				sg_list_sz = sizeof(*qp->r_sg_list) *
1031 					(srq->rq.max_sge - 1);
1032 		} else if (init_attr->cap.max_recv_sge > 1)
1033 			sg_list_sz = sizeof(*qp->r_sg_list) *
1034 				(init_attr->cap.max_recv_sge - 1);
1035 		qp = kzalloc(sz + sg_list_sz, GFP_KERNEL);
1036 		if (!qp) {
1037 			ret = ERR_PTR(-ENOMEM);
1038 			goto bail_swq;
1039 		}
1040 		RCU_INIT_POINTER(qp->next, NULL);
1041 		qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), GFP_KERNEL);
1042 		if (!qp->s_hdr) {
1043 			ret = ERR_PTR(-ENOMEM);
1044 			goto bail_qp;
1045 		}
1046 		qp->timeout_jiffies =
1047 			usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1048 				1000UL);
1049 		if (init_attr->srq)
1050 			sz = 0;
1051 		else {
1052 			qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
1053 			qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
1054 			sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
1055 				sizeof(struct qib_rwqe);
1056 			qp->r_rq.wq = vmalloc_user(sizeof(struct qib_rwq) +
1057 						   qp->r_rq.size * sz);
1058 			if (!qp->r_rq.wq) {
1059 				ret = ERR_PTR(-ENOMEM);
1060 				goto bail_qp;
1061 			}
1062 		}
1063 
1064 		/*
1065 		 * ib_create_qp() will initialize qp->ibqp
1066 		 * except for qp->ibqp.qp_num.
1067 		 */
1068 		spin_lock_init(&qp->r_lock);
1069 		spin_lock_init(&qp->s_lock);
1070 		spin_lock_init(&qp->r_rq.lock);
1071 		atomic_set(&qp->refcount, 0);
1072 		init_waitqueue_head(&qp->wait);
1073 		init_waitqueue_head(&qp->wait_dma);
1074 		init_timer(&qp->s_timer);
1075 		qp->s_timer.data = (unsigned long)qp;
1076 		INIT_WORK(&qp->s_work, qib_do_send);
1077 		INIT_LIST_HEAD(&qp->iowait);
1078 		INIT_LIST_HEAD(&qp->rspwait);
1079 		qp->state = IB_QPS_RESET;
1080 		qp->s_wq = swq;
1081 		qp->s_size = init_attr->cap.max_send_wr + 1;
1082 		qp->s_max_sge = init_attr->cap.max_send_sge;
1083 		if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
1084 			qp->s_flags = QIB_S_SIGNAL_REQ_WR;
1085 		dev = to_idev(ibpd->device);
1086 		dd = dd_from_dev(dev);
1087 		err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type,
1088 				init_attr->port_num);
1089 		if (err < 0) {
1090 			ret = ERR_PTR(err);
1091 			vfree(qp->r_rq.wq);
1092 			goto bail_qp;
1093 		}
1094 		qp->ibqp.qp_num = err;
1095 		qp->port_num = init_attr->port_num;
1096 		qib_reset_qp(qp, init_attr->qp_type);
1097 		break;
1098 
1099 	default:
1100 		/* Don't support raw QPs */
1101 		ret = ERR_PTR(-ENOSYS);
1102 		goto bail;
1103 	}
1104 
1105 	init_attr->cap.max_inline_data = 0;
1106 
1107 	/*
1108 	 * Return the address of the RWQ as the offset to mmap.
1109 	 * See qib_mmap() for details.
1110 	 */
1111 	if (udata && udata->outlen >= sizeof(__u64)) {
1112 		if (!qp->r_rq.wq) {
1113 			__u64 offset = 0;
1114 
1115 			err = ib_copy_to_udata(udata, &offset,
1116 					       sizeof(offset));
1117 			if (err) {
1118 				ret = ERR_PTR(err);
1119 				goto bail_ip;
1120 			}
1121 		} else {
1122 			u32 s = sizeof(struct qib_rwq) + qp->r_rq.size * sz;
1123 
1124 			qp->ip = qib_create_mmap_info(dev, s,
1125 						      ibpd->uobject->context,
1126 						      qp->r_rq.wq);
1127 			if (!qp->ip) {
1128 				ret = ERR_PTR(-ENOMEM);
1129 				goto bail_ip;
1130 			}
1131 
1132 			err = ib_copy_to_udata(udata, &(qp->ip->offset),
1133 					       sizeof(qp->ip->offset));
1134 			if (err) {
1135 				ret = ERR_PTR(err);
1136 				goto bail_ip;
1137 			}
1138 		}
1139 	}
1140 
1141 	spin_lock(&dev->n_qps_lock);
1142 	if (dev->n_qps_allocated == ib_qib_max_qps) {
1143 		spin_unlock(&dev->n_qps_lock);
1144 		ret = ERR_PTR(-ENOMEM);
1145 		goto bail_ip;
1146 	}
1147 
1148 	dev->n_qps_allocated++;
1149 	spin_unlock(&dev->n_qps_lock);
1150 
1151 	if (qp->ip) {
1152 		spin_lock_irq(&dev->pending_lock);
1153 		list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
1154 		spin_unlock_irq(&dev->pending_lock);
1155 	}
1156 
1157 	ret = &qp->ibqp;
1158 	goto bail;
1159 
1160 bail_ip:
1161 	if (qp->ip)
1162 		kref_put(&qp->ip->ref, qib_release_mmap_info);
1163 	else
1164 		vfree(qp->r_rq.wq);
1165 	free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
1166 bail_qp:
1167 	kfree(qp->s_hdr);
1168 	kfree(qp);
1169 bail_swq:
1170 	vfree(swq);
1171 bail:
1172 	return ret;
1173 }
1174 
1175 /**
1176  * qib_destroy_qp - destroy a queue pair
1177  * @ibqp: the queue pair to destroy
1178  *
1179  * Returns 0 on success.
1180  *
1181  * Note that this can be called while the QP is actively sending or
1182  * receiving!
1183  */
qib_destroy_qp(struct ib_qp * ibqp)1184 int qib_destroy_qp(struct ib_qp *ibqp)
1185 {
1186 	struct qib_qp *qp = to_iqp(ibqp);
1187 	struct qib_ibdev *dev = to_idev(ibqp->device);
1188 
1189 	/* Make sure HW and driver activity is stopped. */
1190 	spin_lock_irq(&qp->s_lock);
1191 	if (qp->state != IB_QPS_RESET) {
1192 		qp->state = IB_QPS_RESET;
1193 		spin_lock(&dev->pending_lock);
1194 		if (!list_empty(&qp->iowait))
1195 			list_del_init(&qp->iowait);
1196 		spin_unlock(&dev->pending_lock);
1197 		qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
1198 		spin_unlock_irq(&qp->s_lock);
1199 		cancel_work_sync(&qp->s_work);
1200 		del_timer_sync(&qp->s_timer);
1201 		wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
1202 		if (qp->s_tx) {
1203 			qib_put_txreq(qp->s_tx);
1204 			qp->s_tx = NULL;
1205 		}
1206 		remove_qp(dev, qp);
1207 		wait_event(qp->wait, !atomic_read(&qp->refcount));
1208 		clear_mr_refs(qp, 1);
1209 	} else
1210 		spin_unlock_irq(&qp->s_lock);
1211 
1212 	/* all user's cleaned up, mark it available */
1213 	free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
1214 	spin_lock(&dev->n_qps_lock);
1215 	dev->n_qps_allocated--;
1216 	spin_unlock(&dev->n_qps_lock);
1217 
1218 	if (qp->ip)
1219 		kref_put(&qp->ip->ref, qib_release_mmap_info);
1220 	else
1221 		vfree(qp->r_rq.wq);
1222 	vfree(qp->s_wq);
1223 	kfree(qp->s_hdr);
1224 	kfree(qp);
1225 	return 0;
1226 }
1227 
1228 /**
1229  * qib_init_qpn_table - initialize the QP number table for a device
1230  * @qpt: the QPN table
1231  */
qib_init_qpn_table(struct qib_devdata * dd,struct qib_qpn_table * qpt)1232 void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt)
1233 {
1234 	spin_lock_init(&qpt->lock);
1235 	qpt->last = 1;          /* start with QPN 2 */
1236 	qpt->nmaps = 1;
1237 	qpt->mask = dd->qpn_mask;
1238 }
1239 
1240 /**
1241  * qib_free_qpn_table - free the QP number table for a device
1242  * @qpt: the QPN table
1243  */
qib_free_qpn_table(struct qib_qpn_table * qpt)1244 void qib_free_qpn_table(struct qib_qpn_table *qpt)
1245 {
1246 	int i;
1247 
1248 	for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
1249 		if (qpt->map[i].page)
1250 			free_page((unsigned long) qpt->map[i].page);
1251 }
1252 
1253 /**
1254  * qib_get_credit - flush the send work queue of a QP
1255  * @qp: the qp who's send work queue to flush
1256  * @aeth: the Acknowledge Extended Transport Header
1257  *
1258  * The QP s_lock should be held.
1259  */
qib_get_credit(struct qib_qp * qp,u32 aeth)1260 void qib_get_credit(struct qib_qp *qp, u32 aeth)
1261 {
1262 	u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
1263 
1264 	/*
1265 	 * If the credit is invalid, we can send
1266 	 * as many packets as we like.  Otherwise, we have to
1267 	 * honor the credit field.
1268 	 */
1269 	if (credit == QIB_AETH_CREDIT_INVAL) {
1270 		if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
1271 			qp->s_flags |= QIB_S_UNLIMITED_CREDIT;
1272 			if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
1273 				qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
1274 				qib_schedule_send(qp);
1275 			}
1276 		}
1277 	} else if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
1278 		/* Compute new LSN (i.e., MSN + credit) */
1279 		credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
1280 		if (qib_cmp24(credit, qp->s_lsn) > 0) {
1281 			qp->s_lsn = credit;
1282 			if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
1283 				qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
1284 				qib_schedule_send(qp);
1285 			}
1286 		}
1287 	}
1288 }
1289