1 /*
2 * Copyright(c) 2016, 2017 Intel Corporation.
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48 #include <linux/hash.h>
49 #include <linux/bitops.h>
50 #include <linux/lockdep.h>
51 #include <linux/vmalloc.h>
52 #include <linux/slab.h>
53 #include <rdma/ib_verbs.h>
54 #include <rdma/ib_hdrs.h>
55 #include <rdma/opa_addr.h>
56 #include "qp.h"
57 #include "vt.h"
58 #include "trace.h"
59
60 static void rvt_rc_timeout(unsigned long arg);
61
62 /*
63 * Convert the AETH RNR timeout code into the number of microseconds.
64 */
65 static const u32 ib_rvt_rnr_table[32] = {
66 655360, /* 00: 655.36 */
67 10, /* 01: .01 */
68 20, /* 02 .02 */
69 30, /* 03: .03 */
70 40, /* 04: .04 */
71 60, /* 05: .06 */
72 80, /* 06: .08 */
73 120, /* 07: .12 */
74 160, /* 08: .16 */
75 240, /* 09: .24 */
76 320, /* 0A: .32 */
77 480, /* 0B: .48 */
78 640, /* 0C: .64 */
79 960, /* 0D: .96 */
80 1280, /* 0E: 1.28 */
81 1920, /* 0F: 1.92 */
82 2560, /* 10: 2.56 */
83 3840, /* 11: 3.84 */
84 5120, /* 12: 5.12 */
85 7680, /* 13: 7.68 */
86 10240, /* 14: 10.24 */
87 15360, /* 15: 15.36 */
88 20480, /* 16: 20.48 */
89 30720, /* 17: 30.72 */
90 40960, /* 18: 40.96 */
91 61440, /* 19: 61.44 */
92 81920, /* 1A: 81.92 */
93 122880, /* 1B: 122.88 */
94 163840, /* 1C: 163.84 */
95 245760, /* 1D: 245.76 */
96 327680, /* 1E: 327.68 */
97 491520 /* 1F: 491.52 */
98 };
99
100 /*
101 * Note that it is OK to post send work requests in the SQE and ERR
102 * states; rvt_do_send() will process them and generate error
103 * completions as per IB 1.2 C10-96.
104 */
105 const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
106 [IB_QPS_RESET] = 0,
107 [IB_QPS_INIT] = RVT_POST_RECV_OK,
108 [IB_QPS_RTR] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK,
109 [IB_QPS_RTS] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
110 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK |
111 RVT_PROCESS_NEXT_SEND_OK,
112 [IB_QPS_SQD] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
113 RVT_POST_SEND_OK | RVT_PROCESS_SEND_OK,
114 [IB_QPS_SQE] = RVT_POST_RECV_OK | RVT_PROCESS_RECV_OK |
115 RVT_POST_SEND_OK | RVT_FLUSH_SEND,
116 [IB_QPS_ERR] = RVT_POST_RECV_OK | RVT_FLUSH_RECV |
117 RVT_POST_SEND_OK | RVT_FLUSH_SEND,
118 };
119 EXPORT_SYMBOL(ib_rvt_state_ops);
120
get_map_page(struct rvt_qpn_table * qpt,struct rvt_qpn_map * map)121 static void get_map_page(struct rvt_qpn_table *qpt,
122 struct rvt_qpn_map *map)
123 {
124 unsigned long page = get_zeroed_page(GFP_KERNEL);
125
126 /*
127 * Free the page if someone raced with us installing it.
128 */
129
130 spin_lock(&qpt->lock);
131 if (map->page)
132 free_page(page);
133 else
134 map->page = (void *)page;
135 spin_unlock(&qpt->lock);
136 }
137
138 /**
139 * init_qpn_table - initialize the QP number table for a device
140 * @qpt: the QPN table
141 */
init_qpn_table(struct rvt_dev_info * rdi,struct rvt_qpn_table * qpt)142 static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt)
143 {
144 u32 offset, i;
145 struct rvt_qpn_map *map;
146 int ret = 0;
147
148 if (!(rdi->dparms.qpn_res_end >= rdi->dparms.qpn_res_start))
149 return -EINVAL;
150
151 spin_lock_init(&qpt->lock);
152
153 qpt->last = rdi->dparms.qpn_start;
154 qpt->incr = rdi->dparms.qpn_inc << rdi->dparms.qos_shift;
155
156 /*
157 * Drivers may want some QPs beyond what we need for verbs let them use
158 * our qpn table. No need for two. Lets go ahead and mark the bitmaps
159 * for those. The reserved range must be *after* the range which verbs
160 * will pick from.
161 */
162
163 /* Figure out number of bit maps needed before reserved range */
164 qpt->nmaps = rdi->dparms.qpn_res_start / RVT_BITS_PER_PAGE;
165
166 /* This should always be zero */
167 offset = rdi->dparms.qpn_res_start & RVT_BITS_PER_PAGE_MASK;
168
169 /* Starting with the first reserved bit map */
170 map = &qpt->map[qpt->nmaps];
171
172 rvt_pr_info(rdi, "Reserving QPNs from 0x%x to 0x%x for non-verbs use\n",
173 rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end);
174 for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) {
175 if (!map->page) {
176 get_map_page(qpt, map);
177 if (!map->page) {
178 ret = -ENOMEM;
179 break;
180 }
181 }
182 set_bit(offset, map->page);
183 offset++;
184 if (offset == RVT_BITS_PER_PAGE) {
185 /* next page */
186 qpt->nmaps++;
187 map++;
188 offset = 0;
189 }
190 }
191 return ret;
192 }
193
194 /**
195 * free_qpn_table - free the QP number table for a device
196 * @qpt: the QPN table
197 */
free_qpn_table(struct rvt_qpn_table * qpt)198 static void free_qpn_table(struct rvt_qpn_table *qpt)
199 {
200 int i;
201
202 for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
203 free_page((unsigned long)qpt->map[i].page);
204 }
205
206 /**
207 * rvt_driver_qp_init - Init driver qp resources
208 * @rdi: rvt dev strucutre
209 *
210 * Return: 0 on success
211 */
rvt_driver_qp_init(struct rvt_dev_info * rdi)212 int rvt_driver_qp_init(struct rvt_dev_info *rdi)
213 {
214 int i;
215 int ret = -ENOMEM;
216
217 if (!rdi->dparms.qp_table_size)
218 return -EINVAL;
219
220 /*
221 * If driver is not doing any QP allocation then make sure it is
222 * providing the necessary QP functions.
223 */
224 if (!rdi->driver_f.free_all_qps ||
225 !rdi->driver_f.qp_priv_alloc ||
226 !rdi->driver_f.qp_priv_free ||
227 !rdi->driver_f.notify_qp_reset ||
228 !rdi->driver_f.notify_restart_rc)
229 return -EINVAL;
230
231 /* allocate parent object */
232 rdi->qp_dev = kzalloc_node(sizeof(*rdi->qp_dev), GFP_KERNEL,
233 rdi->dparms.node);
234 if (!rdi->qp_dev)
235 return -ENOMEM;
236
237 /* allocate hash table */
238 rdi->qp_dev->qp_table_size = rdi->dparms.qp_table_size;
239 rdi->qp_dev->qp_table_bits = ilog2(rdi->dparms.qp_table_size);
240 rdi->qp_dev->qp_table =
241 kmalloc_node(rdi->qp_dev->qp_table_size *
242 sizeof(*rdi->qp_dev->qp_table),
243 GFP_KERNEL, rdi->dparms.node);
244 if (!rdi->qp_dev->qp_table)
245 goto no_qp_table;
246
247 for (i = 0; i < rdi->qp_dev->qp_table_size; i++)
248 RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL);
249
250 spin_lock_init(&rdi->qp_dev->qpt_lock);
251
252 /* initialize qpn map */
253 if (init_qpn_table(rdi, &rdi->qp_dev->qpn_table))
254 goto fail_table;
255
256 spin_lock_init(&rdi->n_qps_lock);
257
258 return 0;
259
260 fail_table:
261 kfree(rdi->qp_dev->qp_table);
262 free_qpn_table(&rdi->qp_dev->qpn_table);
263
264 no_qp_table:
265 kfree(rdi->qp_dev);
266
267 return ret;
268 }
269
270 /**
271 * free_all_qps - check for QPs still in use
272 * @qpt: the QP table to empty
273 *
274 * There should not be any QPs still in use.
275 * Free memory for table.
276 */
rvt_free_all_qps(struct rvt_dev_info * rdi)277 static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
278 {
279 unsigned long flags;
280 struct rvt_qp *qp;
281 unsigned n, qp_inuse = 0;
282 spinlock_t *ql; /* work around too long line below */
283
284 if (rdi->driver_f.free_all_qps)
285 qp_inuse = rdi->driver_f.free_all_qps(rdi);
286
287 qp_inuse += rvt_mcast_tree_empty(rdi);
288
289 if (!rdi->qp_dev)
290 return qp_inuse;
291
292 ql = &rdi->qp_dev->qpt_lock;
293 spin_lock_irqsave(ql, flags);
294 for (n = 0; n < rdi->qp_dev->qp_table_size; n++) {
295 qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n],
296 lockdep_is_held(ql));
297 RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL);
298
299 for (; qp; qp = rcu_dereference_protected(qp->next,
300 lockdep_is_held(ql)))
301 qp_inuse++;
302 }
303 spin_unlock_irqrestore(ql, flags);
304 synchronize_rcu();
305 return qp_inuse;
306 }
307
308 /**
309 * rvt_qp_exit - clean up qps on device exit
310 * @rdi: rvt dev structure
311 *
312 * Check for qp leaks and free resources.
313 */
rvt_qp_exit(struct rvt_dev_info * rdi)314 void rvt_qp_exit(struct rvt_dev_info *rdi)
315 {
316 u32 qps_inuse = rvt_free_all_qps(rdi);
317
318 if (qps_inuse)
319 rvt_pr_err(rdi, "QP memory leak! %u still in use\n",
320 qps_inuse);
321 if (!rdi->qp_dev)
322 return;
323
324 kfree(rdi->qp_dev->qp_table);
325 free_qpn_table(&rdi->qp_dev->qpn_table);
326 kfree(rdi->qp_dev);
327 }
328
mk_qpn(struct rvt_qpn_table * qpt,struct rvt_qpn_map * map,unsigned off)329 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
330 struct rvt_qpn_map *map, unsigned off)
331 {
332 return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
333 }
334
335 /**
336 * alloc_qpn - Allocate the next available qpn or zero/one for QP type
337 * IB_QPT_SMI/IB_QPT_GSI
338 *@rdi: rvt device info structure
339 *@qpt: queue pair number table pointer
340 *@port_num: IB port number, 1 based, comes from core
341 *
342 * Return: The queue pair number
343 */
alloc_qpn(struct rvt_dev_info * rdi,struct rvt_qpn_table * qpt,enum ib_qp_type type,u8 port_num)344 static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
345 enum ib_qp_type type, u8 port_num)
346 {
347 u32 i, offset, max_scan, qpn;
348 struct rvt_qpn_map *map;
349 u32 ret;
350
351 if (rdi->driver_f.alloc_qpn)
352 return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num);
353
354 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
355 unsigned n;
356
357 ret = type == IB_QPT_GSI;
358 n = 1 << (ret + 2 * (port_num - 1));
359 spin_lock(&qpt->lock);
360 if (qpt->flags & n)
361 ret = -EINVAL;
362 else
363 qpt->flags |= n;
364 spin_unlock(&qpt->lock);
365 goto bail;
366 }
367
368 qpn = qpt->last + qpt->incr;
369 if (qpn >= RVT_QPN_MAX)
370 qpn = qpt->incr | ((qpt->last & 1) ^ 1);
371 /* offset carries bit 0 */
372 offset = qpn & RVT_BITS_PER_PAGE_MASK;
373 map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
374 max_scan = qpt->nmaps - !offset;
375 for (i = 0;;) {
376 if (unlikely(!map->page)) {
377 get_map_page(qpt, map);
378 if (unlikely(!map->page))
379 break;
380 }
381 do {
382 if (!test_and_set_bit(offset, map->page)) {
383 qpt->last = qpn;
384 ret = qpn;
385 goto bail;
386 }
387 offset += qpt->incr;
388 /*
389 * This qpn might be bogus if offset >= BITS_PER_PAGE.
390 * That is OK. It gets re-assigned below
391 */
392 qpn = mk_qpn(qpt, map, offset);
393 } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
394 /*
395 * In order to keep the number of pages allocated to a
396 * minimum, we scan the all existing pages before increasing
397 * the size of the bitmap table.
398 */
399 if (++i > max_scan) {
400 if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
401 break;
402 map = &qpt->map[qpt->nmaps++];
403 /* start at incr with current bit 0 */
404 offset = qpt->incr | (offset & 1);
405 } else if (map < &qpt->map[qpt->nmaps]) {
406 ++map;
407 /* start at incr with current bit 0 */
408 offset = qpt->incr | (offset & 1);
409 } else {
410 map = &qpt->map[0];
411 /* wrap to first map page, invert bit 0 */
412 offset = qpt->incr | ((offset & 1) ^ 1);
413 }
414 /* there can be no set bits in low-order QoS bits */
415 WARN_ON(rdi->dparms.qos_shift > 1 &&
416 offset & ((BIT(rdi->dparms.qos_shift - 1) - 1) << 1));
417 qpn = mk_qpn(qpt, map, offset);
418 }
419
420 ret = -ENOMEM;
421
422 bail:
423 return ret;
424 }
425
426 /**
427 * rvt_clear_mr_refs - Drop help mr refs
428 * @qp: rvt qp data structure
429 * @clr_sends: If shoudl clear send side or not
430 */
rvt_clear_mr_refs(struct rvt_qp * qp,int clr_sends)431 static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
432 {
433 unsigned n;
434 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
435
436 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
437 rvt_put_ss(&qp->s_rdma_read_sge);
438
439 rvt_put_ss(&qp->r_sge);
440
441 if (clr_sends) {
442 while (qp->s_last != qp->s_head) {
443 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_last);
444
445 rvt_put_swqe(wqe);
446
447 if (qp->ibqp.qp_type == IB_QPT_UD ||
448 qp->ibqp.qp_type == IB_QPT_SMI ||
449 qp->ibqp.qp_type == IB_QPT_GSI)
450 atomic_dec(&ibah_to_rvtah(
451 wqe->ud_wr.ah)->refcount);
452 if (++qp->s_last >= qp->s_size)
453 qp->s_last = 0;
454 smp_wmb(); /* see qp_set_savail */
455 }
456 if (qp->s_rdma_mr) {
457 rvt_put_mr(qp->s_rdma_mr);
458 qp->s_rdma_mr = NULL;
459 }
460 }
461
462 for (n = 0; qp->s_ack_queue && n < rvt_max_atomic(rdi); n++) {
463 struct rvt_ack_entry *e = &qp->s_ack_queue[n];
464
465 if (e->rdma_sge.mr) {
466 rvt_put_mr(e->rdma_sge.mr);
467 e->rdma_sge.mr = NULL;
468 }
469 }
470 }
471
472 /**
473 * rvt_swqe_has_lkey - return true if lkey is used by swqe
474 * @wqe - the send wqe
475 * @lkey - the lkey
476 *
477 * Test the swqe for using lkey
478 */
rvt_swqe_has_lkey(struct rvt_swqe * wqe,u32 lkey)479 static bool rvt_swqe_has_lkey(struct rvt_swqe *wqe, u32 lkey)
480 {
481 int i;
482
483 for (i = 0; i < wqe->wr.num_sge; i++) {
484 struct rvt_sge *sge = &wqe->sg_list[i];
485
486 if (rvt_mr_has_lkey(sge->mr, lkey))
487 return true;
488 }
489 return false;
490 }
491
492 /**
493 * rvt_qp_sends_has_lkey - return true is qp sends use lkey
494 * @qp - the rvt_qp
495 * @lkey - the lkey
496 */
rvt_qp_sends_has_lkey(struct rvt_qp * qp,u32 lkey)497 static bool rvt_qp_sends_has_lkey(struct rvt_qp *qp, u32 lkey)
498 {
499 u32 s_last = qp->s_last;
500
501 while (s_last != qp->s_head) {
502 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, s_last);
503
504 if (rvt_swqe_has_lkey(wqe, lkey))
505 return true;
506
507 if (++s_last >= qp->s_size)
508 s_last = 0;
509 }
510 if (qp->s_rdma_mr)
511 if (rvt_mr_has_lkey(qp->s_rdma_mr, lkey))
512 return true;
513 return false;
514 }
515
516 /**
517 * rvt_qp_acks_has_lkey - return true if acks have lkey
518 * @qp - the qp
519 * @lkey - the lkey
520 */
rvt_qp_acks_has_lkey(struct rvt_qp * qp,u32 lkey)521 static bool rvt_qp_acks_has_lkey(struct rvt_qp *qp, u32 lkey)
522 {
523 int i;
524 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
525
526 for (i = 0; qp->s_ack_queue && i < rvt_max_atomic(rdi); i++) {
527 struct rvt_ack_entry *e = &qp->s_ack_queue[i];
528
529 if (rvt_mr_has_lkey(e->rdma_sge.mr, lkey))
530 return true;
531 }
532 return false;
533 }
534
535 /*
536 * rvt_qp_mr_clean - clean up remote ops for lkey
537 * @qp - the qp
538 * @lkey - the lkey that is being de-registered
539 *
540 * This routine checks if the lkey is being used by
541 * the qp.
542 *
543 * If so, the qp is put into an error state to elminate
544 * any references from the qp.
545 */
rvt_qp_mr_clean(struct rvt_qp * qp,u32 lkey)546 void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey)
547 {
548 bool lastwqe = false;
549
550 if (qp->ibqp.qp_type == IB_QPT_SMI ||
551 qp->ibqp.qp_type == IB_QPT_GSI)
552 /* avoid special QPs */
553 return;
554 spin_lock_irq(&qp->r_lock);
555 spin_lock(&qp->s_hlock);
556 spin_lock(&qp->s_lock);
557
558 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
559 goto check_lwqe;
560
561 if (rvt_ss_has_lkey(&qp->r_sge, lkey) ||
562 rvt_qp_sends_has_lkey(qp, lkey) ||
563 rvt_qp_acks_has_lkey(qp, lkey))
564 lastwqe = rvt_error_qp(qp, IB_WC_LOC_PROT_ERR);
565 check_lwqe:
566 spin_unlock(&qp->s_lock);
567 spin_unlock(&qp->s_hlock);
568 spin_unlock_irq(&qp->r_lock);
569 if (lastwqe) {
570 struct ib_event ev;
571
572 ev.device = qp->ibqp.device;
573 ev.element.qp = &qp->ibqp;
574 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
575 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
576 }
577 }
578
579 /**
580 * rvt_remove_qp - remove qp form table
581 * @rdi: rvt dev struct
582 * @qp: qp to remove
583 *
584 * Remove the QP from the table so it can't be found asynchronously by
585 * the receive routine.
586 */
rvt_remove_qp(struct rvt_dev_info * rdi,struct rvt_qp * qp)587 static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
588 {
589 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
590 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
591 unsigned long flags;
592 int removed = 1;
593
594 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
595
596 if (rcu_dereference_protected(rvp->qp[0],
597 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
598 RCU_INIT_POINTER(rvp->qp[0], NULL);
599 } else if (rcu_dereference_protected(rvp->qp[1],
600 lockdep_is_held(&rdi->qp_dev->qpt_lock)) == qp) {
601 RCU_INIT_POINTER(rvp->qp[1], NULL);
602 } else {
603 struct rvt_qp *q;
604 struct rvt_qp __rcu **qpp;
605
606 removed = 0;
607 qpp = &rdi->qp_dev->qp_table[n];
608 for (; (q = rcu_dereference_protected(*qpp,
609 lockdep_is_held(&rdi->qp_dev->qpt_lock))) != NULL;
610 qpp = &q->next) {
611 if (q == qp) {
612 RCU_INIT_POINTER(*qpp,
613 rcu_dereference_protected(qp->next,
614 lockdep_is_held(&rdi->qp_dev->qpt_lock)));
615 removed = 1;
616 trace_rvt_qpremove(qp, n);
617 break;
618 }
619 }
620 }
621
622 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
623 if (removed) {
624 synchronize_rcu();
625 rvt_put_qp(qp);
626 }
627 }
628
629 /**
630 * rvt_init_qp - initialize the QP state to the reset state
631 * @qp: the QP to init or reinit
632 * @type: the QP type
633 *
634 * This function is called from both rvt_create_qp() and
635 * rvt_reset_qp(). The difference is that the reset
636 * patch the necessary locks to protect against concurent
637 * access.
638 */
rvt_init_qp(struct rvt_dev_info * rdi,struct rvt_qp * qp,enum ib_qp_type type)639 static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
640 enum ib_qp_type type)
641 {
642 qp->remote_qpn = 0;
643 qp->qkey = 0;
644 qp->qp_access_flags = 0;
645 qp->s_flags &= RVT_S_SIGNAL_REQ_WR;
646 qp->s_hdrwords = 0;
647 qp->s_wqe = NULL;
648 qp->s_draining = 0;
649 qp->s_next_psn = 0;
650 qp->s_last_psn = 0;
651 qp->s_sending_psn = 0;
652 qp->s_sending_hpsn = 0;
653 qp->s_psn = 0;
654 qp->r_psn = 0;
655 qp->r_msn = 0;
656 if (type == IB_QPT_RC) {
657 qp->s_state = IB_OPCODE_RC_SEND_LAST;
658 qp->r_state = IB_OPCODE_RC_SEND_LAST;
659 } else {
660 qp->s_state = IB_OPCODE_UC_SEND_LAST;
661 qp->r_state = IB_OPCODE_UC_SEND_LAST;
662 }
663 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
664 qp->r_nak_state = 0;
665 qp->r_aflags = 0;
666 qp->r_flags = 0;
667 qp->s_head = 0;
668 qp->s_tail = 0;
669 qp->s_cur = 0;
670 qp->s_acked = 0;
671 qp->s_last = 0;
672 qp->s_ssn = 1;
673 qp->s_lsn = 0;
674 qp->s_mig_state = IB_MIG_MIGRATED;
675 qp->r_head_ack_queue = 0;
676 qp->s_tail_ack_queue = 0;
677 qp->s_num_rd_atomic = 0;
678 if (qp->r_rq.wq) {
679 qp->r_rq.wq->head = 0;
680 qp->r_rq.wq->tail = 0;
681 }
682 qp->r_sge.num_sge = 0;
683 atomic_set(&qp->s_reserved_used, 0);
684 }
685
686 /**
687 * rvt_reset_qp - initialize the QP state to the reset state
688 * @qp: the QP to reset
689 * @type: the QP type
690 *
691 * r_lock, s_hlock, and s_lock are required to be held by the caller
692 */
rvt_reset_qp(struct rvt_dev_info * rdi,struct rvt_qp * qp,enum ib_qp_type type)693 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
694 enum ib_qp_type type)
695 __must_hold(&qp->s_lock)
696 __must_hold(&qp->s_hlock)
697 __must_hold(&qp->r_lock)
698 {
699 lockdep_assert_held(&qp->r_lock);
700 lockdep_assert_held(&qp->s_hlock);
701 lockdep_assert_held(&qp->s_lock);
702 if (qp->state != IB_QPS_RESET) {
703 qp->state = IB_QPS_RESET;
704
705 /* Let drivers flush their waitlist */
706 rdi->driver_f.flush_qp_waiters(qp);
707 rvt_stop_rc_timers(qp);
708 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_ANY_WAIT);
709 spin_unlock(&qp->s_lock);
710 spin_unlock(&qp->s_hlock);
711 spin_unlock_irq(&qp->r_lock);
712
713 /* Stop the send queue and the retry timer */
714 rdi->driver_f.stop_send_queue(qp);
715 rvt_del_timers_sync(qp);
716 /* Wait for things to stop */
717 rdi->driver_f.quiesce_qp(qp);
718
719 /* take qp out the hash and wait for it to be unused */
720 rvt_remove_qp(rdi, qp);
721 wait_event(qp->wait, !atomic_read(&qp->refcount));
722
723 /* grab the lock b/c it was locked at call time */
724 spin_lock_irq(&qp->r_lock);
725 spin_lock(&qp->s_hlock);
726 spin_lock(&qp->s_lock);
727
728 rvt_clear_mr_refs(qp, 1);
729 /*
730 * Let the driver do any tear down or re-init it needs to for
731 * a qp that has been reset
732 */
733 rdi->driver_f.notify_qp_reset(qp);
734 }
735 rvt_init_qp(rdi, qp, type);
736 lockdep_assert_held(&qp->r_lock);
737 lockdep_assert_held(&qp->s_hlock);
738 lockdep_assert_held(&qp->s_lock);
739 }
740
741 /** rvt_free_qpn - Free a qpn from the bit map
742 * @qpt: QP table
743 * @qpn: queue pair number to free
744 */
rvt_free_qpn(struct rvt_qpn_table * qpt,u32 qpn)745 static void rvt_free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
746 {
747 struct rvt_qpn_map *map;
748
749 map = qpt->map + (qpn & RVT_QPN_MASK) / RVT_BITS_PER_PAGE;
750 if (map->page)
751 clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
752 }
753
754 /**
755 * rvt_create_qp - create a queue pair for a device
756 * @ibpd: the protection domain who's device we create the queue pair for
757 * @init_attr: the attributes of the queue pair
758 * @udata: user data for libibverbs.so
759 *
760 * Queue pair creation is mostly an rvt issue. However, drivers have their own
761 * unique idea of what queue pair numbers mean. For instance there is a reserved
762 * range for PSM.
763 *
764 * Return: the queue pair on success, otherwise returns an errno.
765 *
766 * Called by the ib_create_qp() core verbs function.
767 */
rvt_create_qp(struct ib_pd * ibpd,struct ib_qp_init_attr * init_attr,struct ib_udata * udata)768 struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
769 struct ib_qp_init_attr *init_attr,
770 struct ib_udata *udata)
771 {
772 struct rvt_qp *qp;
773 int err;
774 struct rvt_swqe *swq = NULL;
775 size_t sz;
776 size_t sg_list_sz;
777 struct ib_qp *ret = ERR_PTR(-ENOMEM);
778 struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device);
779 void *priv = NULL;
780 size_t sqsize;
781
782 if (!rdi)
783 return ERR_PTR(-EINVAL);
784
785 if (init_attr->cap.max_send_sge > rdi->dparms.props.max_sge ||
786 init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr ||
787 init_attr->create_flags)
788 return ERR_PTR(-EINVAL);
789
790 /* Check receive queue parameters if no SRQ is specified. */
791 if (!init_attr->srq) {
792 if (init_attr->cap.max_recv_sge > rdi->dparms.props.max_sge ||
793 init_attr->cap.max_recv_wr > rdi->dparms.props.max_qp_wr)
794 return ERR_PTR(-EINVAL);
795
796 if (init_attr->cap.max_send_sge +
797 init_attr->cap.max_send_wr +
798 init_attr->cap.max_recv_sge +
799 init_attr->cap.max_recv_wr == 0)
800 return ERR_PTR(-EINVAL);
801 }
802 sqsize =
803 init_attr->cap.max_send_wr + 1 +
804 rdi->dparms.reserved_operations;
805 switch (init_attr->qp_type) {
806 case IB_QPT_SMI:
807 case IB_QPT_GSI:
808 if (init_attr->port_num == 0 ||
809 init_attr->port_num > ibpd->device->phys_port_cnt)
810 return ERR_PTR(-EINVAL);
811 case IB_QPT_UC:
812 case IB_QPT_RC:
813 case IB_QPT_UD:
814 sz = sizeof(struct rvt_sge) *
815 init_attr->cap.max_send_sge +
816 sizeof(struct rvt_swqe);
817 swq = vzalloc_node(sqsize * sz, rdi->dparms.node);
818 if (!swq)
819 return ERR_PTR(-ENOMEM);
820
821 sz = sizeof(*qp);
822 sg_list_sz = 0;
823 if (init_attr->srq) {
824 struct rvt_srq *srq = ibsrq_to_rvtsrq(init_attr->srq);
825
826 if (srq->rq.max_sge > 1)
827 sg_list_sz = sizeof(*qp->r_sg_list) *
828 (srq->rq.max_sge - 1);
829 } else if (init_attr->cap.max_recv_sge > 1)
830 sg_list_sz = sizeof(*qp->r_sg_list) *
831 (init_attr->cap.max_recv_sge - 1);
832 qp = kzalloc_node(sz + sg_list_sz, GFP_KERNEL,
833 rdi->dparms.node);
834 if (!qp)
835 goto bail_swq;
836
837 RCU_INIT_POINTER(qp->next, NULL);
838 if (init_attr->qp_type == IB_QPT_RC) {
839 qp->s_ack_queue =
840 kzalloc_node(
841 sizeof(*qp->s_ack_queue) *
842 rvt_max_atomic(rdi),
843 GFP_KERNEL,
844 rdi->dparms.node);
845 if (!qp->s_ack_queue)
846 goto bail_qp;
847 }
848 /* initialize timers needed for rc qp */
849 setup_timer(&qp->s_timer, rvt_rc_timeout, (unsigned long)qp);
850 hrtimer_init(&qp->s_rnr_timer, CLOCK_MONOTONIC,
851 HRTIMER_MODE_REL);
852 qp->s_rnr_timer.function = rvt_rc_rnr_retry;
853
854 /*
855 * Driver needs to set up it's private QP structure and do any
856 * initialization that is needed.
857 */
858 priv = rdi->driver_f.qp_priv_alloc(rdi, qp);
859 if (IS_ERR(priv)) {
860 ret = priv;
861 goto bail_qp;
862 }
863 qp->priv = priv;
864 qp->timeout_jiffies =
865 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
866 1000UL);
867 if (init_attr->srq) {
868 sz = 0;
869 } else {
870 qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
871 qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
872 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
873 sizeof(struct rvt_rwqe);
874 if (udata)
875 qp->r_rq.wq = vmalloc_user(
876 sizeof(struct rvt_rwq) +
877 qp->r_rq.size * sz);
878 else
879 qp->r_rq.wq = vzalloc_node(
880 sizeof(struct rvt_rwq) +
881 qp->r_rq.size * sz,
882 rdi->dparms.node);
883 if (!qp->r_rq.wq)
884 goto bail_driver_priv;
885 }
886
887 /*
888 * ib_create_qp() will initialize qp->ibqp
889 * except for qp->ibqp.qp_num.
890 */
891 spin_lock_init(&qp->r_lock);
892 spin_lock_init(&qp->s_hlock);
893 spin_lock_init(&qp->s_lock);
894 spin_lock_init(&qp->r_rq.lock);
895 atomic_set(&qp->refcount, 0);
896 atomic_set(&qp->local_ops_pending, 0);
897 init_waitqueue_head(&qp->wait);
898 init_timer(&qp->s_timer);
899 qp->s_timer.data = (unsigned long)qp;
900 INIT_LIST_HEAD(&qp->rspwait);
901 qp->state = IB_QPS_RESET;
902 qp->s_wq = swq;
903 qp->s_size = sqsize;
904 qp->s_avail = init_attr->cap.max_send_wr;
905 qp->s_max_sge = init_attr->cap.max_send_sge;
906 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
907 qp->s_flags = RVT_S_SIGNAL_REQ_WR;
908
909 err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table,
910 init_attr->qp_type,
911 init_attr->port_num);
912 if (err < 0) {
913 ret = ERR_PTR(err);
914 goto bail_rq_wq;
915 }
916 qp->ibqp.qp_num = err;
917 qp->port_num = init_attr->port_num;
918 rvt_init_qp(rdi, qp, init_attr->qp_type);
919 break;
920
921 default:
922 /* Don't support raw QPs */
923 return ERR_PTR(-EINVAL);
924 }
925
926 init_attr->cap.max_inline_data = 0;
927
928 /*
929 * Return the address of the RWQ as the offset to mmap.
930 * See rvt_mmap() for details.
931 */
932 if (udata && udata->outlen >= sizeof(__u64)) {
933 if (!qp->r_rq.wq) {
934 __u64 offset = 0;
935
936 err = ib_copy_to_udata(udata, &offset,
937 sizeof(offset));
938 if (err) {
939 ret = ERR_PTR(err);
940 goto bail_qpn;
941 }
942 } else {
943 u32 s = sizeof(struct rvt_rwq) + qp->r_rq.size * sz;
944
945 qp->ip = rvt_create_mmap_info(rdi, s,
946 ibpd->uobject->context,
947 qp->r_rq.wq);
948 if (!qp->ip) {
949 ret = ERR_PTR(-ENOMEM);
950 goto bail_qpn;
951 }
952
953 err = ib_copy_to_udata(udata, &qp->ip->offset,
954 sizeof(qp->ip->offset));
955 if (err) {
956 ret = ERR_PTR(err);
957 goto bail_ip;
958 }
959 }
960 qp->pid = current->pid;
961 }
962
963 spin_lock(&rdi->n_qps_lock);
964 if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
965 spin_unlock(&rdi->n_qps_lock);
966 ret = ERR_PTR(-ENOMEM);
967 goto bail_ip;
968 }
969
970 rdi->n_qps_allocated++;
971 /*
972 * Maintain a busy_jiffies variable that will be added to the timeout
973 * period in mod_retry_timer and add_retry_timer. This busy jiffies
974 * is scaled by the number of rc qps created for the device to reduce
975 * the number of timeouts occurring when there is a large number of
976 * qps. busy_jiffies is incremented every rc qp scaling interval.
977 * The scaling interval is selected based on extensive performance
978 * evaluation of targeted workloads.
979 */
980 if (init_attr->qp_type == IB_QPT_RC) {
981 rdi->n_rc_qps++;
982 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
983 }
984 spin_unlock(&rdi->n_qps_lock);
985
986 if (qp->ip) {
987 spin_lock_irq(&rdi->pending_lock);
988 list_add(&qp->ip->pending_mmaps, &rdi->pending_mmaps);
989 spin_unlock_irq(&rdi->pending_lock);
990 }
991
992 ret = &qp->ibqp;
993
994 /*
995 * We have our QP and its good, now keep track of what types of opcodes
996 * can be processed on this QP. We do this by keeping track of what the
997 * 3 high order bits of the opcode are.
998 */
999 switch (init_attr->qp_type) {
1000 case IB_QPT_SMI:
1001 case IB_QPT_GSI:
1002 case IB_QPT_UD:
1003 qp->allowed_ops = IB_OPCODE_UD;
1004 break;
1005 case IB_QPT_RC:
1006 qp->allowed_ops = IB_OPCODE_RC;
1007 break;
1008 case IB_QPT_UC:
1009 qp->allowed_ops = IB_OPCODE_UC;
1010 break;
1011 default:
1012 ret = ERR_PTR(-EINVAL);
1013 goto bail_ip;
1014 }
1015
1016 return ret;
1017
1018 bail_ip:
1019 if (qp->ip)
1020 kref_put(&qp->ip->ref, rvt_release_mmap_info);
1021
1022 bail_qpn:
1023 rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1024
1025 bail_rq_wq:
1026 if (!qp->ip)
1027 vfree(qp->r_rq.wq);
1028
1029 bail_driver_priv:
1030 rdi->driver_f.qp_priv_free(rdi, qp);
1031
1032 bail_qp:
1033 kfree(qp->s_ack_queue);
1034 kfree(qp);
1035
1036 bail_swq:
1037 vfree(swq);
1038
1039 return ret;
1040 }
1041
1042 /**
1043 * rvt_error_qp - put a QP into the error state
1044 * @qp: the QP to put into the error state
1045 * @err: the receive completion error to signal if a RWQE is active
1046 *
1047 * Flushes both send and receive work queues.
1048 *
1049 * Return: true if last WQE event should be generated.
1050 * The QP r_lock and s_lock should be held and interrupts disabled.
1051 * If we are already in error state, just return.
1052 */
rvt_error_qp(struct rvt_qp * qp,enum ib_wc_status err)1053 int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err)
1054 {
1055 struct ib_wc wc;
1056 int ret = 0;
1057 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1058
1059 lockdep_assert_held(&qp->r_lock);
1060 lockdep_assert_held(&qp->s_lock);
1061 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
1062 goto bail;
1063
1064 qp->state = IB_QPS_ERR;
1065
1066 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
1067 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
1068 del_timer(&qp->s_timer);
1069 }
1070
1071 if (qp->s_flags & RVT_S_ANY_WAIT_SEND)
1072 qp->s_flags &= ~RVT_S_ANY_WAIT_SEND;
1073
1074 rdi->driver_f.notify_error_qp(qp);
1075
1076 /* Schedule the sending tasklet to drain the send work queue. */
1077 if (ACCESS_ONCE(qp->s_last) != qp->s_head)
1078 rdi->driver_f.schedule_send(qp);
1079
1080 rvt_clear_mr_refs(qp, 0);
1081
1082 memset(&wc, 0, sizeof(wc));
1083 wc.qp = &qp->ibqp;
1084 wc.opcode = IB_WC_RECV;
1085
1086 if (test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags)) {
1087 wc.wr_id = qp->r_wr_id;
1088 wc.status = err;
1089 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1090 }
1091 wc.status = IB_WC_WR_FLUSH_ERR;
1092
1093 if (qp->r_rq.wq) {
1094 struct rvt_rwq *wq;
1095 u32 head;
1096 u32 tail;
1097
1098 spin_lock(&qp->r_rq.lock);
1099
1100 /* sanity check pointers before trusting them */
1101 wq = qp->r_rq.wq;
1102 head = wq->head;
1103 if (head >= qp->r_rq.size)
1104 head = 0;
1105 tail = wq->tail;
1106 if (tail >= qp->r_rq.size)
1107 tail = 0;
1108 while (tail != head) {
1109 wc.wr_id = rvt_get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
1110 if (++tail >= qp->r_rq.size)
1111 tail = 0;
1112 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1113 }
1114 wq->tail = tail;
1115
1116 spin_unlock(&qp->r_rq.lock);
1117 } else if (qp->ibqp.event_handler) {
1118 ret = 1;
1119 }
1120
1121 bail:
1122 return ret;
1123 }
1124 EXPORT_SYMBOL(rvt_error_qp);
1125
1126 /*
1127 * Put the QP into the hash table.
1128 * The hash table holds a reference to the QP.
1129 */
rvt_insert_qp(struct rvt_dev_info * rdi,struct rvt_qp * qp)1130 static void rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp)
1131 {
1132 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
1133 unsigned long flags;
1134
1135 rvt_get_qp(qp);
1136 spin_lock_irqsave(&rdi->qp_dev->qpt_lock, flags);
1137
1138 if (qp->ibqp.qp_num <= 1) {
1139 rcu_assign_pointer(rvp->qp[qp->ibqp.qp_num], qp);
1140 } else {
1141 u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
1142
1143 qp->next = rdi->qp_dev->qp_table[n];
1144 rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp);
1145 trace_rvt_qpinsert(qp, n);
1146 }
1147
1148 spin_unlock_irqrestore(&rdi->qp_dev->qpt_lock, flags);
1149 }
1150
1151 /**
1152 * rvt_modify_qp - modify the attributes of a queue pair
1153 * @ibqp: the queue pair who's attributes we're modifying
1154 * @attr: the new attributes
1155 * @attr_mask: the mask of attributes to modify
1156 * @udata: user data for libibverbs.so
1157 *
1158 * Return: 0 on success, otherwise returns an errno.
1159 */
rvt_modify_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)1160 int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1161 int attr_mask, struct ib_udata *udata)
1162 {
1163 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1164 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1165 enum ib_qp_state cur_state, new_state;
1166 struct ib_event ev;
1167 int lastwqe = 0;
1168 int mig = 0;
1169 int pmtu = 0; /* for gcc warning only */
1170 enum rdma_link_layer link;
1171 int opa_ah;
1172
1173 link = rdma_port_get_link_layer(ibqp->device, qp->port_num);
1174
1175 spin_lock_irq(&qp->r_lock);
1176 spin_lock(&qp->s_hlock);
1177 spin_lock(&qp->s_lock);
1178
1179 cur_state = attr_mask & IB_QP_CUR_STATE ?
1180 attr->cur_qp_state : qp->state;
1181 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1182 opa_ah = rdma_cap_opa_ah(ibqp->device, qp->port_num);
1183
1184 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
1185 attr_mask, link))
1186 goto inval;
1187
1188 if (rdi->driver_f.check_modify_qp &&
1189 rdi->driver_f.check_modify_qp(qp, attr, attr_mask, udata))
1190 goto inval;
1191
1192 if (attr_mask & IB_QP_AV) {
1193 if (opa_ah) {
1194 if (rdma_ah_get_dlid(&attr->ah_attr) >=
1195 opa_get_mcast_base(OPA_MCAST_NR))
1196 goto inval;
1197 } else {
1198 if (rdma_ah_get_dlid(&attr->ah_attr) >=
1199 be16_to_cpu(IB_MULTICAST_LID_BASE))
1200 goto inval;
1201 }
1202
1203 if (rvt_check_ah(qp->ibqp.device, &attr->ah_attr))
1204 goto inval;
1205 }
1206
1207 if (attr_mask & IB_QP_ALT_PATH) {
1208 if (opa_ah) {
1209 if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
1210 opa_get_mcast_base(OPA_MCAST_NR))
1211 goto inval;
1212 } else {
1213 if (rdma_ah_get_dlid(&attr->alt_ah_attr) >=
1214 be16_to_cpu(IB_MULTICAST_LID_BASE))
1215 goto inval;
1216 }
1217
1218 if (rvt_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
1219 goto inval;
1220 if (attr->alt_pkey_index >= rvt_get_npkeys(rdi))
1221 goto inval;
1222 }
1223
1224 if (attr_mask & IB_QP_PKEY_INDEX)
1225 if (attr->pkey_index >= rvt_get_npkeys(rdi))
1226 goto inval;
1227
1228 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1229 if (attr->min_rnr_timer > 31)
1230 goto inval;
1231
1232 if (attr_mask & IB_QP_PORT)
1233 if (qp->ibqp.qp_type == IB_QPT_SMI ||
1234 qp->ibqp.qp_type == IB_QPT_GSI ||
1235 attr->port_num == 0 ||
1236 attr->port_num > ibqp->device->phys_port_cnt)
1237 goto inval;
1238
1239 if (attr_mask & IB_QP_DEST_QPN)
1240 if (attr->dest_qp_num > RVT_QPN_MASK)
1241 goto inval;
1242
1243 if (attr_mask & IB_QP_RETRY_CNT)
1244 if (attr->retry_cnt > 7)
1245 goto inval;
1246
1247 if (attr_mask & IB_QP_RNR_RETRY)
1248 if (attr->rnr_retry > 7)
1249 goto inval;
1250
1251 /*
1252 * Don't allow invalid path_mtu values. OK to set greater
1253 * than the active mtu (or even the max_cap, if we have tuned
1254 * that to a small mtu. We'll set qp->path_mtu
1255 * to the lesser of requested attribute mtu and active,
1256 * for packetizing messages.
1257 * Note that the QP port has to be set in INIT and MTU in RTR.
1258 */
1259 if (attr_mask & IB_QP_PATH_MTU) {
1260 pmtu = rdi->driver_f.get_pmtu_from_attr(rdi, qp, attr);
1261 if (pmtu < 0)
1262 goto inval;
1263 }
1264
1265 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1266 if (attr->path_mig_state == IB_MIG_REARM) {
1267 if (qp->s_mig_state == IB_MIG_ARMED)
1268 goto inval;
1269 if (new_state != IB_QPS_RTS)
1270 goto inval;
1271 } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
1272 if (qp->s_mig_state == IB_MIG_REARM)
1273 goto inval;
1274 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
1275 goto inval;
1276 if (qp->s_mig_state == IB_MIG_ARMED)
1277 mig = 1;
1278 } else {
1279 goto inval;
1280 }
1281 }
1282
1283 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1284 if (attr->max_dest_rd_atomic > rdi->dparms.max_rdma_atomic)
1285 goto inval;
1286
1287 switch (new_state) {
1288 case IB_QPS_RESET:
1289 if (qp->state != IB_QPS_RESET)
1290 rvt_reset_qp(rdi, qp, ibqp->qp_type);
1291 break;
1292
1293 case IB_QPS_RTR:
1294 /* Allow event to re-trigger if QP set to RTR more than once */
1295 qp->r_flags &= ~RVT_R_COMM_EST;
1296 qp->state = new_state;
1297 break;
1298
1299 case IB_QPS_SQD:
1300 qp->s_draining = qp->s_last != qp->s_cur;
1301 qp->state = new_state;
1302 break;
1303
1304 case IB_QPS_SQE:
1305 if (qp->ibqp.qp_type == IB_QPT_RC)
1306 goto inval;
1307 qp->state = new_state;
1308 break;
1309
1310 case IB_QPS_ERR:
1311 lastwqe = rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1312 break;
1313
1314 default:
1315 qp->state = new_state;
1316 break;
1317 }
1318
1319 if (attr_mask & IB_QP_PKEY_INDEX)
1320 qp->s_pkey_index = attr->pkey_index;
1321
1322 if (attr_mask & IB_QP_PORT)
1323 qp->port_num = attr->port_num;
1324
1325 if (attr_mask & IB_QP_DEST_QPN)
1326 qp->remote_qpn = attr->dest_qp_num;
1327
1328 if (attr_mask & IB_QP_SQ_PSN) {
1329 qp->s_next_psn = attr->sq_psn & rdi->dparms.psn_modify_mask;
1330 qp->s_psn = qp->s_next_psn;
1331 qp->s_sending_psn = qp->s_next_psn;
1332 qp->s_last_psn = qp->s_next_psn - 1;
1333 qp->s_sending_hpsn = qp->s_last_psn;
1334 }
1335
1336 if (attr_mask & IB_QP_RQ_PSN)
1337 qp->r_psn = attr->rq_psn & rdi->dparms.psn_modify_mask;
1338
1339 if (attr_mask & IB_QP_ACCESS_FLAGS)
1340 qp->qp_access_flags = attr->qp_access_flags;
1341
1342 if (attr_mask & IB_QP_AV) {
1343 qp->remote_ah_attr = attr->ah_attr;
1344 qp->s_srate = rdma_ah_get_static_rate(&attr->ah_attr);
1345 qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
1346 }
1347
1348 if (attr_mask & IB_QP_ALT_PATH) {
1349 qp->alt_ah_attr = attr->alt_ah_attr;
1350 qp->s_alt_pkey_index = attr->alt_pkey_index;
1351 }
1352
1353 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1354 qp->s_mig_state = attr->path_mig_state;
1355 if (mig) {
1356 qp->remote_ah_attr = qp->alt_ah_attr;
1357 qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr);
1358 qp->s_pkey_index = qp->s_alt_pkey_index;
1359 }
1360 }
1361
1362 if (attr_mask & IB_QP_PATH_MTU) {
1363 qp->pmtu = rdi->driver_f.mtu_from_qp(rdi, qp, pmtu);
1364 qp->log_pmtu = ilog2(qp->pmtu);
1365 }
1366
1367 if (attr_mask & IB_QP_RETRY_CNT) {
1368 qp->s_retry_cnt = attr->retry_cnt;
1369 qp->s_retry = attr->retry_cnt;
1370 }
1371
1372 if (attr_mask & IB_QP_RNR_RETRY) {
1373 qp->s_rnr_retry_cnt = attr->rnr_retry;
1374 qp->s_rnr_retry = attr->rnr_retry;
1375 }
1376
1377 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1378 qp->r_min_rnr_timer = attr->min_rnr_timer;
1379
1380 if (attr_mask & IB_QP_TIMEOUT) {
1381 qp->timeout = attr->timeout;
1382 qp->timeout_jiffies = rvt_timeout_to_jiffies(qp->timeout);
1383 }
1384
1385 if (attr_mask & IB_QP_QKEY)
1386 qp->qkey = attr->qkey;
1387
1388 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1389 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
1390
1391 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
1392 qp->s_max_rd_atomic = attr->max_rd_atomic;
1393
1394 if (rdi->driver_f.modify_qp)
1395 rdi->driver_f.modify_qp(qp, attr, attr_mask, udata);
1396
1397 spin_unlock(&qp->s_lock);
1398 spin_unlock(&qp->s_hlock);
1399 spin_unlock_irq(&qp->r_lock);
1400
1401 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1402 rvt_insert_qp(rdi, qp);
1403
1404 if (lastwqe) {
1405 ev.device = qp->ibqp.device;
1406 ev.element.qp = &qp->ibqp;
1407 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
1408 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1409 }
1410 if (mig) {
1411 ev.device = qp->ibqp.device;
1412 ev.element.qp = &qp->ibqp;
1413 ev.event = IB_EVENT_PATH_MIG;
1414 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1415 }
1416 return 0;
1417
1418 inval:
1419 spin_unlock(&qp->s_lock);
1420 spin_unlock(&qp->s_hlock);
1421 spin_unlock_irq(&qp->r_lock);
1422 return -EINVAL;
1423 }
1424
1425 /**
1426 * rvt_destroy_qp - destroy a queue pair
1427 * @ibqp: the queue pair to destroy
1428 *
1429 * Note that this can be called while the QP is actively sending or
1430 * receiving!
1431 *
1432 * Return: 0 on success.
1433 */
rvt_destroy_qp(struct ib_qp * ibqp)1434 int rvt_destroy_qp(struct ib_qp *ibqp)
1435 {
1436 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1437 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1438
1439 spin_lock_irq(&qp->r_lock);
1440 spin_lock(&qp->s_hlock);
1441 spin_lock(&qp->s_lock);
1442 rvt_reset_qp(rdi, qp, ibqp->qp_type);
1443 spin_unlock(&qp->s_lock);
1444 spin_unlock(&qp->s_hlock);
1445 spin_unlock_irq(&qp->r_lock);
1446
1447 /* qpn is now available for use again */
1448 rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
1449
1450 spin_lock(&rdi->n_qps_lock);
1451 rdi->n_qps_allocated--;
1452 if (qp->ibqp.qp_type == IB_QPT_RC) {
1453 rdi->n_rc_qps--;
1454 rdi->busy_jiffies = rdi->n_rc_qps / RC_QP_SCALING_INTERVAL;
1455 }
1456 spin_unlock(&rdi->n_qps_lock);
1457
1458 if (qp->ip)
1459 kref_put(&qp->ip->ref, rvt_release_mmap_info);
1460 else
1461 vfree(qp->r_rq.wq);
1462 vfree(qp->s_wq);
1463 rdi->driver_f.qp_priv_free(rdi, qp);
1464 kfree(qp->s_ack_queue);
1465 kfree(qp);
1466 return 0;
1467 }
1468
1469 /**
1470 * rvt_query_qp - query an ipbq
1471 * @ibqp: IB qp to query
1472 * @attr: attr struct to fill in
1473 * @attr_mask: attr mask ignored
1474 * @init_attr: struct to fill in
1475 *
1476 * Return: always 0
1477 */
rvt_query_qp(struct ib_qp * ibqp,struct ib_qp_attr * attr,int attr_mask,struct ib_qp_init_attr * init_attr)1478 int rvt_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1479 int attr_mask, struct ib_qp_init_attr *init_attr)
1480 {
1481 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1482 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1483
1484 attr->qp_state = qp->state;
1485 attr->cur_qp_state = attr->qp_state;
1486 attr->path_mtu = rdi->driver_f.mtu_to_path_mtu(qp->pmtu);
1487 attr->path_mig_state = qp->s_mig_state;
1488 attr->qkey = qp->qkey;
1489 attr->rq_psn = qp->r_psn & rdi->dparms.psn_mask;
1490 attr->sq_psn = qp->s_next_psn & rdi->dparms.psn_mask;
1491 attr->dest_qp_num = qp->remote_qpn;
1492 attr->qp_access_flags = qp->qp_access_flags;
1493 attr->cap.max_send_wr = qp->s_size - 1 -
1494 rdi->dparms.reserved_operations;
1495 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
1496 attr->cap.max_send_sge = qp->s_max_sge;
1497 attr->cap.max_recv_sge = qp->r_rq.max_sge;
1498 attr->cap.max_inline_data = 0;
1499 attr->ah_attr = qp->remote_ah_attr;
1500 attr->alt_ah_attr = qp->alt_ah_attr;
1501 attr->pkey_index = qp->s_pkey_index;
1502 attr->alt_pkey_index = qp->s_alt_pkey_index;
1503 attr->en_sqd_async_notify = 0;
1504 attr->sq_draining = qp->s_draining;
1505 attr->max_rd_atomic = qp->s_max_rd_atomic;
1506 attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
1507 attr->min_rnr_timer = qp->r_min_rnr_timer;
1508 attr->port_num = qp->port_num;
1509 attr->timeout = qp->timeout;
1510 attr->retry_cnt = qp->s_retry_cnt;
1511 attr->rnr_retry = qp->s_rnr_retry_cnt;
1512 attr->alt_port_num =
1513 rdma_ah_get_port_num(&qp->alt_ah_attr);
1514 attr->alt_timeout = qp->alt_timeout;
1515
1516 init_attr->event_handler = qp->ibqp.event_handler;
1517 init_attr->qp_context = qp->ibqp.qp_context;
1518 init_attr->send_cq = qp->ibqp.send_cq;
1519 init_attr->recv_cq = qp->ibqp.recv_cq;
1520 init_attr->srq = qp->ibqp.srq;
1521 init_attr->cap = attr->cap;
1522 if (qp->s_flags & RVT_S_SIGNAL_REQ_WR)
1523 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
1524 else
1525 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
1526 init_attr->qp_type = qp->ibqp.qp_type;
1527 init_attr->port_num = qp->port_num;
1528 return 0;
1529 }
1530
1531 /**
1532 * rvt_post_receive - post a receive on a QP
1533 * @ibqp: the QP to post the receive on
1534 * @wr: the WR to post
1535 * @bad_wr: the first bad WR is put here
1536 *
1537 * This may be called from interrupt context.
1538 *
1539 * Return: 0 on success otherwise errno
1540 */
rvt_post_recv(struct ib_qp * ibqp,struct ib_recv_wr * wr,struct ib_recv_wr ** bad_wr)1541 int rvt_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1542 struct ib_recv_wr **bad_wr)
1543 {
1544 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1545 struct rvt_rwq *wq = qp->r_rq.wq;
1546 unsigned long flags;
1547 int qp_err_flush = (ib_rvt_state_ops[qp->state] & RVT_FLUSH_RECV) &&
1548 !qp->ibqp.srq;
1549
1550 /* Check that state is OK to post receive. */
1551 if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) {
1552 *bad_wr = wr;
1553 return -EINVAL;
1554 }
1555
1556 for (; wr; wr = wr->next) {
1557 struct rvt_rwqe *wqe;
1558 u32 next;
1559 int i;
1560
1561 if ((unsigned)wr->num_sge > qp->r_rq.max_sge) {
1562 *bad_wr = wr;
1563 return -EINVAL;
1564 }
1565
1566 spin_lock_irqsave(&qp->r_rq.lock, flags);
1567 next = wq->head + 1;
1568 if (next >= qp->r_rq.size)
1569 next = 0;
1570 if (next == wq->tail) {
1571 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1572 *bad_wr = wr;
1573 return -ENOMEM;
1574 }
1575 if (unlikely(qp_err_flush)) {
1576 struct ib_wc wc;
1577
1578 memset(&wc, 0, sizeof(wc));
1579 wc.qp = &qp->ibqp;
1580 wc.opcode = IB_WC_RECV;
1581 wc.wr_id = wr->wr_id;
1582 wc.status = IB_WC_WR_FLUSH_ERR;
1583 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, 1);
1584 } else {
1585 wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head);
1586 wqe->wr_id = wr->wr_id;
1587 wqe->num_sge = wr->num_sge;
1588 for (i = 0; i < wr->num_sge; i++)
1589 wqe->sg_list[i] = wr->sg_list[i];
1590 /*
1591 * Make sure queue entry is written
1592 * before the head index.
1593 */
1594 smp_wmb();
1595 wq->head = next;
1596 }
1597 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1598 }
1599 return 0;
1600 }
1601
1602 /**
1603 * rvt_qp_valid_operation - validate post send wr request
1604 * @qp - the qp
1605 * @post-parms - the post send table for the driver
1606 * @wr - the work request
1607 *
1608 * The routine validates the operation based on the
1609 * validation table an returns the length of the operation
1610 * which can extend beyond the ib_send_bw. Operation
1611 * dependent flags key atomic operation validation.
1612 *
1613 * There is an exception for UD qps that validates the pd and
1614 * overrides the length to include the additional UD specific
1615 * length.
1616 *
1617 * Returns a negative error or the length of the work request
1618 * for building the swqe.
1619 */
rvt_qp_valid_operation(struct rvt_qp * qp,const struct rvt_operation_params * post_parms,struct ib_send_wr * wr)1620 static inline int rvt_qp_valid_operation(
1621 struct rvt_qp *qp,
1622 const struct rvt_operation_params *post_parms,
1623 struct ib_send_wr *wr)
1624 {
1625 int len;
1626
1627 if (wr->opcode >= RVT_OPERATION_MAX || !post_parms[wr->opcode].length)
1628 return -EINVAL;
1629 if (!(post_parms[wr->opcode].qpt_support & BIT(qp->ibqp.qp_type)))
1630 return -EINVAL;
1631 if ((post_parms[wr->opcode].flags & RVT_OPERATION_PRIV) &&
1632 ibpd_to_rvtpd(qp->ibqp.pd)->user)
1633 return -EINVAL;
1634 if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC_SGE &&
1635 (wr->num_sge == 0 ||
1636 wr->sg_list[0].length < sizeof(u64) ||
1637 wr->sg_list[0].addr & (sizeof(u64) - 1)))
1638 return -EINVAL;
1639 if (post_parms[wr->opcode].flags & RVT_OPERATION_ATOMIC &&
1640 !qp->s_max_rd_atomic)
1641 return -EINVAL;
1642 len = post_parms[wr->opcode].length;
1643 /* UD specific */
1644 if (qp->ibqp.qp_type != IB_QPT_UC &&
1645 qp->ibqp.qp_type != IB_QPT_RC) {
1646 if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
1647 return -EINVAL;
1648 len = sizeof(struct ib_ud_wr);
1649 }
1650 return len;
1651 }
1652
1653 /**
1654 * rvt_qp_is_avail - determine queue capacity
1655 * @qp - the qp
1656 * @rdi - the rdmavt device
1657 * @reserved_op - is reserved operation
1658 *
1659 * This assumes the s_hlock is held but the s_last
1660 * qp variable is uncontrolled.
1661 *
1662 * For non reserved operations, the qp->s_avail
1663 * may be changed.
1664 *
1665 * The return value is zero or a -ENOMEM.
1666 */
rvt_qp_is_avail(struct rvt_qp * qp,struct rvt_dev_info * rdi,bool reserved_op)1667 static inline int rvt_qp_is_avail(
1668 struct rvt_qp *qp,
1669 struct rvt_dev_info *rdi,
1670 bool reserved_op)
1671 {
1672 u32 slast;
1673 u32 avail;
1674 u32 reserved_used;
1675
1676 /* see rvt_qp_wqe_unreserve() */
1677 smp_mb__before_atomic();
1678 reserved_used = atomic_read(&qp->s_reserved_used);
1679 if (unlikely(reserved_op)) {
1680 /* see rvt_qp_wqe_unreserve() */
1681 smp_mb__before_atomic();
1682 if (reserved_used >= rdi->dparms.reserved_operations)
1683 return -ENOMEM;
1684 return 0;
1685 }
1686 /* non-reserved operations */
1687 if (likely(qp->s_avail))
1688 return 0;
1689 smp_read_barrier_depends(); /* see rc.c */
1690 slast = ACCESS_ONCE(qp->s_last);
1691 if (qp->s_head >= slast)
1692 avail = qp->s_size - (qp->s_head - slast);
1693 else
1694 avail = slast - qp->s_head;
1695
1696 /* see rvt_qp_wqe_unreserve() */
1697 smp_mb__before_atomic();
1698 reserved_used = atomic_read(&qp->s_reserved_used);
1699 avail = avail - 1 -
1700 (rdi->dparms.reserved_operations - reserved_used);
1701 /* insure we don't assign a negative s_avail */
1702 if ((s32)avail <= 0)
1703 return -ENOMEM;
1704 qp->s_avail = avail;
1705 if (WARN_ON(qp->s_avail >
1706 (qp->s_size - 1 - rdi->dparms.reserved_operations)))
1707 rvt_pr_err(rdi,
1708 "More avail entries than QP RB size.\nQP: %u, size: %u, avail: %u\nhead: %u, tail: %u, cur: %u, acked: %u, last: %u",
1709 qp->ibqp.qp_num, qp->s_size, qp->s_avail,
1710 qp->s_head, qp->s_tail, qp->s_cur,
1711 qp->s_acked, qp->s_last);
1712 return 0;
1713 }
1714
1715 /**
1716 * rvt_post_one_wr - post one RC, UC, or UD send work request
1717 * @qp: the QP to post on
1718 * @wr: the work request to send
1719 */
rvt_post_one_wr(struct rvt_qp * qp,struct ib_send_wr * wr,int * call_send)1720 static int rvt_post_one_wr(struct rvt_qp *qp,
1721 struct ib_send_wr *wr,
1722 int *call_send)
1723 {
1724 struct rvt_swqe *wqe;
1725 u32 next;
1726 int i;
1727 int j;
1728 int acc;
1729 struct rvt_lkey_table *rkt;
1730 struct rvt_pd *pd;
1731 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
1732 u8 log_pmtu;
1733 int ret;
1734 size_t cplen;
1735 bool reserved_op;
1736 int local_ops_delayed = 0;
1737
1738 BUILD_BUG_ON(IB_QPT_MAX >= (sizeof(u32) * BITS_PER_BYTE));
1739
1740 /* IB spec says that num_sge == 0 is OK. */
1741 if (unlikely(wr->num_sge > qp->s_max_sge))
1742 return -EINVAL;
1743
1744 ret = rvt_qp_valid_operation(qp, rdi->post_parms, wr);
1745 if (ret < 0)
1746 return ret;
1747 cplen = ret;
1748
1749 /*
1750 * Local operations include fast register and local invalidate.
1751 * Fast register needs to be processed immediately because the
1752 * registered lkey may be used by following work requests and the
1753 * lkey needs to be valid at the time those requests are posted.
1754 * Local invalidate can be processed immediately if fencing is
1755 * not required and no previous local invalidate ops are pending.
1756 * Signaled local operations that have been processed immediately
1757 * need to have requests with "completion only" flags set posted
1758 * to the send queue in order to generate completions.
1759 */
1760 if ((rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL)) {
1761 switch (wr->opcode) {
1762 case IB_WR_REG_MR:
1763 ret = rvt_fast_reg_mr(qp,
1764 reg_wr(wr)->mr,
1765 reg_wr(wr)->key,
1766 reg_wr(wr)->access);
1767 if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
1768 return ret;
1769 break;
1770 case IB_WR_LOCAL_INV:
1771 if ((wr->send_flags & IB_SEND_FENCE) ||
1772 atomic_read(&qp->local_ops_pending)) {
1773 local_ops_delayed = 1;
1774 } else {
1775 ret = rvt_invalidate_rkey(
1776 qp, wr->ex.invalidate_rkey);
1777 if (ret || !(wr->send_flags & IB_SEND_SIGNALED))
1778 return ret;
1779 }
1780 break;
1781 default:
1782 return -EINVAL;
1783 }
1784 }
1785
1786 reserved_op = rdi->post_parms[wr->opcode].flags &
1787 RVT_OPERATION_USE_RESERVE;
1788 /* check for avail */
1789 ret = rvt_qp_is_avail(qp, rdi, reserved_op);
1790 if (ret)
1791 return ret;
1792 next = qp->s_head + 1;
1793 if (next >= qp->s_size)
1794 next = 0;
1795
1796 rkt = &rdi->lkey_table;
1797 pd = ibpd_to_rvtpd(qp->ibqp.pd);
1798 wqe = rvt_get_swqe_ptr(qp, qp->s_head);
1799
1800 /* cplen has length from above */
1801 memcpy(&wqe->wr, wr, cplen);
1802
1803 wqe->length = 0;
1804 j = 0;
1805 if (wr->num_sge) {
1806 struct rvt_sge *last_sge = NULL;
1807
1808 acc = wr->opcode >= IB_WR_RDMA_READ ?
1809 IB_ACCESS_LOCAL_WRITE : 0;
1810 for (i = 0; i < wr->num_sge; i++) {
1811 u32 length = wr->sg_list[i].length;
1812
1813 if (length == 0)
1814 continue;
1815 ret = rvt_lkey_ok(rkt, pd, &wqe->sg_list[j], last_sge,
1816 &wr->sg_list[i], acc);
1817 if (unlikely(ret < 0))
1818 goto bail_inval_free;
1819 wqe->length += length;
1820 if (ret)
1821 last_sge = &wqe->sg_list[j];
1822 j += ret;
1823 }
1824 wqe->wr.num_sge = j;
1825 }
1826
1827 /* general part of wqe valid - allow for driver checks */
1828 if (rdi->driver_f.check_send_wqe) {
1829 ret = rdi->driver_f.check_send_wqe(qp, wqe);
1830 if (ret < 0)
1831 goto bail_inval_free;
1832 if (ret)
1833 *call_send = ret;
1834 }
1835
1836 log_pmtu = qp->log_pmtu;
1837 if (qp->ibqp.qp_type != IB_QPT_UC &&
1838 qp->ibqp.qp_type != IB_QPT_RC) {
1839 struct rvt_ah *ah = ibah_to_rvtah(wqe->ud_wr.ah);
1840
1841 log_pmtu = ah->log_pmtu;
1842 atomic_inc(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount);
1843 }
1844
1845 if (rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL) {
1846 if (local_ops_delayed)
1847 atomic_inc(&qp->local_ops_pending);
1848 else
1849 wqe->wr.send_flags |= RVT_SEND_COMPLETION_ONLY;
1850 wqe->ssn = 0;
1851 wqe->psn = 0;
1852 wqe->lpsn = 0;
1853 } else {
1854 wqe->ssn = qp->s_ssn++;
1855 wqe->psn = qp->s_next_psn;
1856 wqe->lpsn = wqe->psn +
1857 (wqe->length ?
1858 ((wqe->length - 1) >> log_pmtu) :
1859 0);
1860 qp->s_next_psn = wqe->lpsn + 1;
1861 }
1862 if (unlikely(reserved_op)) {
1863 wqe->wr.send_flags |= RVT_SEND_RESERVE_USED;
1864 rvt_qp_wqe_reserve(qp, wqe);
1865 } else {
1866 wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED;
1867 qp->s_avail--;
1868 }
1869 trace_rvt_post_one_wr(qp, wqe, wr->num_sge);
1870 smp_wmb(); /* see request builders */
1871 qp->s_head = next;
1872
1873 return 0;
1874
1875 bail_inval_free:
1876 /* release mr holds */
1877 while (j) {
1878 struct rvt_sge *sge = &wqe->sg_list[--j];
1879
1880 rvt_put_mr(sge->mr);
1881 }
1882 return ret;
1883 }
1884
1885 /**
1886 * rvt_post_send - post a send on a QP
1887 * @ibqp: the QP to post the send on
1888 * @wr: the list of work requests to post
1889 * @bad_wr: the first bad WR is put here
1890 *
1891 * This may be called from interrupt context.
1892 *
1893 * Return: 0 on success else errno
1894 */
rvt_post_send(struct ib_qp * ibqp,struct ib_send_wr * wr,struct ib_send_wr ** bad_wr)1895 int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1896 struct ib_send_wr **bad_wr)
1897 {
1898 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
1899 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
1900 unsigned long flags = 0;
1901 int call_send;
1902 unsigned nreq = 0;
1903 int err = 0;
1904
1905 spin_lock_irqsave(&qp->s_hlock, flags);
1906
1907 /*
1908 * Ensure QP state is such that we can send. If not bail out early,
1909 * there is no need to do this every time we post a send.
1910 */
1911 if (unlikely(!(ib_rvt_state_ops[qp->state] & RVT_POST_SEND_OK))) {
1912 spin_unlock_irqrestore(&qp->s_hlock, flags);
1913 return -EINVAL;
1914 }
1915
1916 /*
1917 * If the send queue is empty, and we only have a single WR then just go
1918 * ahead and kick the send engine into gear. Otherwise we will always
1919 * just schedule the send to happen later.
1920 */
1921 call_send = qp->s_head == ACCESS_ONCE(qp->s_last) && !wr->next;
1922
1923 for (; wr; wr = wr->next) {
1924 err = rvt_post_one_wr(qp, wr, &call_send);
1925 if (unlikely(err)) {
1926 *bad_wr = wr;
1927 goto bail;
1928 }
1929 nreq++;
1930 }
1931 bail:
1932 spin_unlock_irqrestore(&qp->s_hlock, flags);
1933 if (nreq) {
1934 if (call_send)
1935 rdi->driver_f.do_send(qp);
1936 else
1937 rdi->driver_f.schedule_send_no_lock(qp);
1938 }
1939 return err;
1940 }
1941
1942 /**
1943 * rvt_post_srq_receive - post a receive on a shared receive queue
1944 * @ibsrq: the SRQ to post the receive on
1945 * @wr: the list of work requests to post
1946 * @bad_wr: A pointer to the first WR to cause a problem is put here
1947 *
1948 * This may be called from interrupt context.
1949 *
1950 * Return: 0 on success else errno
1951 */
rvt_post_srq_recv(struct ib_srq * ibsrq,struct ib_recv_wr * wr,struct ib_recv_wr ** bad_wr)1952 int rvt_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
1953 struct ib_recv_wr **bad_wr)
1954 {
1955 struct rvt_srq *srq = ibsrq_to_rvtsrq(ibsrq);
1956 struct rvt_rwq *wq;
1957 unsigned long flags;
1958
1959 for (; wr; wr = wr->next) {
1960 struct rvt_rwqe *wqe;
1961 u32 next;
1962 int i;
1963
1964 if ((unsigned)wr->num_sge > srq->rq.max_sge) {
1965 *bad_wr = wr;
1966 return -EINVAL;
1967 }
1968
1969 spin_lock_irqsave(&srq->rq.lock, flags);
1970 wq = srq->rq.wq;
1971 next = wq->head + 1;
1972 if (next >= srq->rq.size)
1973 next = 0;
1974 if (next == wq->tail) {
1975 spin_unlock_irqrestore(&srq->rq.lock, flags);
1976 *bad_wr = wr;
1977 return -ENOMEM;
1978 }
1979
1980 wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head);
1981 wqe->wr_id = wr->wr_id;
1982 wqe->num_sge = wr->num_sge;
1983 for (i = 0; i < wr->num_sge; i++)
1984 wqe->sg_list[i] = wr->sg_list[i];
1985 /* Make sure queue entry is written before the head index. */
1986 smp_wmb();
1987 wq->head = next;
1988 spin_unlock_irqrestore(&srq->rq.lock, flags);
1989 }
1990 return 0;
1991 }
1992
1993 /**
1994 * qp_comm_est - handle trap with QP established
1995 * @qp: the QP
1996 */
rvt_comm_est(struct rvt_qp * qp)1997 void rvt_comm_est(struct rvt_qp *qp)
1998 {
1999 qp->r_flags |= RVT_R_COMM_EST;
2000 if (qp->ibqp.event_handler) {
2001 struct ib_event ev;
2002
2003 ev.device = qp->ibqp.device;
2004 ev.element.qp = &qp->ibqp;
2005 ev.event = IB_EVENT_COMM_EST;
2006 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
2007 }
2008 }
2009 EXPORT_SYMBOL(rvt_comm_est);
2010
rvt_rc_error(struct rvt_qp * qp,enum ib_wc_status err)2011 void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err)
2012 {
2013 unsigned long flags;
2014 int lastwqe;
2015
2016 spin_lock_irqsave(&qp->s_lock, flags);
2017 lastwqe = rvt_error_qp(qp, err);
2018 spin_unlock_irqrestore(&qp->s_lock, flags);
2019
2020 if (lastwqe) {
2021 struct ib_event ev;
2022
2023 ev.device = qp->ibqp.device;
2024 ev.element.qp = &qp->ibqp;
2025 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
2026 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
2027 }
2028 }
2029 EXPORT_SYMBOL(rvt_rc_error);
2030
2031 /*
2032 * rvt_rnr_tbl_to_usec - return index into ib_rvt_rnr_table
2033 * @index - the index
2034 * return usec from an index into ib_rvt_rnr_table
2035 */
rvt_rnr_tbl_to_usec(u32 index)2036 unsigned long rvt_rnr_tbl_to_usec(u32 index)
2037 {
2038 return ib_rvt_rnr_table[(index & IB_AETH_CREDIT_MASK)];
2039 }
2040 EXPORT_SYMBOL(rvt_rnr_tbl_to_usec);
2041
rvt_aeth_to_usec(u32 aeth)2042 static inline unsigned long rvt_aeth_to_usec(u32 aeth)
2043 {
2044 return ib_rvt_rnr_table[(aeth >> IB_AETH_CREDIT_SHIFT) &
2045 IB_AETH_CREDIT_MASK];
2046 }
2047
2048 /*
2049 * rvt_add_retry_timer - add/start a retry timer
2050 * @qp - the QP
2051 * add a retry timer on the QP
2052 */
rvt_add_retry_timer(struct rvt_qp * qp)2053 void rvt_add_retry_timer(struct rvt_qp *qp)
2054 {
2055 struct ib_qp *ibqp = &qp->ibqp;
2056 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
2057
2058 lockdep_assert_held(&qp->s_lock);
2059 qp->s_flags |= RVT_S_TIMER;
2060 /* 4.096 usec. * (1 << qp->timeout) */
2061 qp->s_timer.expires = jiffies + qp->timeout_jiffies +
2062 rdi->busy_jiffies;
2063 add_timer(&qp->s_timer);
2064 }
2065 EXPORT_SYMBOL(rvt_add_retry_timer);
2066
2067 /**
2068 * rvt_add_rnr_timer - add/start an rnr timer
2069 * @qp - the QP
2070 * @aeth - aeth of RNR timeout, simulated aeth for loopback
2071 * add an rnr timer on the QP
2072 */
rvt_add_rnr_timer(struct rvt_qp * qp,u32 aeth)2073 void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth)
2074 {
2075 u32 to;
2076
2077 lockdep_assert_held(&qp->s_lock);
2078 qp->s_flags |= RVT_S_WAIT_RNR;
2079 to = rvt_aeth_to_usec(aeth);
2080 hrtimer_start(&qp->s_rnr_timer,
2081 ns_to_ktime(1000 * to), HRTIMER_MODE_REL);
2082 }
2083 EXPORT_SYMBOL(rvt_add_rnr_timer);
2084
2085 /**
2086 * rvt_stop_rc_timers - stop all timers
2087 * @qp - the QP
2088 * stop any pending timers
2089 */
rvt_stop_rc_timers(struct rvt_qp * qp)2090 void rvt_stop_rc_timers(struct rvt_qp *qp)
2091 {
2092 lockdep_assert_held(&qp->s_lock);
2093 /* Remove QP from all timers */
2094 if (qp->s_flags & (RVT_S_TIMER | RVT_S_WAIT_RNR)) {
2095 qp->s_flags &= ~(RVT_S_TIMER | RVT_S_WAIT_RNR);
2096 del_timer(&qp->s_timer);
2097 hrtimer_try_to_cancel(&qp->s_rnr_timer);
2098 }
2099 }
2100 EXPORT_SYMBOL(rvt_stop_rc_timers);
2101
2102 /**
2103 * rvt_stop_rnr_timer - stop an rnr timer
2104 * @qp - the QP
2105 *
2106 * stop an rnr timer and return if the timer
2107 * had been pending.
2108 */
rvt_stop_rnr_timer(struct rvt_qp * qp)2109 static int rvt_stop_rnr_timer(struct rvt_qp *qp)
2110 {
2111 int rval = 0;
2112
2113 lockdep_assert_held(&qp->s_lock);
2114 /* Remove QP from rnr timer */
2115 if (qp->s_flags & RVT_S_WAIT_RNR) {
2116 qp->s_flags &= ~RVT_S_WAIT_RNR;
2117 rval = hrtimer_try_to_cancel(&qp->s_rnr_timer);
2118 }
2119 return rval;
2120 }
2121
2122 /**
2123 * rvt_del_timers_sync - wait for any timeout routines to exit
2124 * @qp - the QP
2125 */
rvt_del_timers_sync(struct rvt_qp * qp)2126 void rvt_del_timers_sync(struct rvt_qp *qp)
2127 {
2128 del_timer_sync(&qp->s_timer);
2129 hrtimer_cancel(&qp->s_rnr_timer);
2130 }
2131 EXPORT_SYMBOL(rvt_del_timers_sync);
2132
2133 /**
2134 * This is called from s_timer for missing responses.
2135 */
rvt_rc_timeout(unsigned long arg)2136 static void rvt_rc_timeout(unsigned long arg)
2137 {
2138 struct rvt_qp *qp = (struct rvt_qp *)arg;
2139 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2140 unsigned long flags;
2141
2142 spin_lock_irqsave(&qp->r_lock, flags);
2143 spin_lock(&qp->s_lock);
2144 if (qp->s_flags & RVT_S_TIMER) {
2145 struct rvt_ibport *rvp = rdi->ports[qp->port_num - 1];
2146
2147 qp->s_flags &= ~RVT_S_TIMER;
2148 rvp->n_rc_timeouts++;
2149 del_timer(&qp->s_timer);
2150 trace_rvt_rc_timeout(qp, qp->s_last_psn + 1);
2151 if (rdi->driver_f.notify_restart_rc)
2152 rdi->driver_f.notify_restart_rc(qp,
2153 qp->s_last_psn + 1,
2154 1);
2155 rdi->driver_f.schedule_send(qp);
2156 }
2157 spin_unlock(&qp->s_lock);
2158 spin_unlock_irqrestore(&qp->r_lock, flags);
2159 }
2160
2161 /*
2162 * This is called from s_timer for RNR timeouts.
2163 */
rvt_rc_rnr_retry(struct hrtimer * t)2164 enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t)
2165 {
2166 struct rvt_qp *qp = container_of(t, struct rvt_qp, s_rnr_timer);
2167 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
2168 unsigned long flags;
2169
2170 spin_lock_irqsave(&qp->s_lock, flags);
2171 rvt_stop_rnr_timer(qp);
2172 rdi->driver_f.schedule_send(qp);
2173 spin_unlock_irqrestore(&qp->s_lock, flags);
2174 return HRTIMER_NORESTART;
2175 }
2176 EXPORT_SYMBOL(rvt_rc_rnr_retry);
2177
2178 /**
2179 * rvt_qp_iter_init - initial for QP iteration
2180 * @rdi - rvt devinfo
2181 * @v - u64 value
2182 *
2183 * This returns an iterator suitable for iterating QPs
2184 * in the system.
2185 *
2186 * The @cb is a user defined callback and @v is a 64
2187 * bit value passed to and relevant for processing in the
2188 * @cb. An example use case would be to alter QP processing
2189 * based on criteria not part of the rvt_qp.
2190 *
2191 * Use cases that require memory allocation to succeed
2192 * must preallocate appropriately.
2193 *
2194 * Return: a pointer to an rvt_qp_iter or NULL
2195 */
rvt_qp_iter_init(struct rvt_dev_info * rdi,u64 v,void (* cb)(struct rvt_qp * qp,u64 v))2196 struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
2197 u64 v,
2198 void (*cb)(struct rvt_qp *qp, u64 v))
2199 {
2200 struct rvt_qp_iter *i;
2201
2202 i = kzalloc(sizeof(*i), GFP_KERNEL);
2203 if (!i)
2204 return NULL;
2205
2206 i->rdi = rdi;
2207 /* number of special QPs (SMI/GSI) for device */
2208 i->specials = rdi->ibdev.phys_port_cnt * 2;
2209 i->v = v;
2210 i->cb = cb;
2211
2212 return i;
2213 }
2214 EXPORT_SYMBOL(rvt_qp_iter_init);
2215
2216 /**
2217 * rvt_qp_iter_next - return the next QP in iter
2218 * @iter - the iterator
2219 *
2220 * Fine grained QP iterator suitable for use
2221 * with debugfs seq_file mechanisms.
2222 *
2223 * Updates iter->qp with the current QP when the return
2224 * value is 0.
2225 *
2226 * Return: 0 - iter->qp is valid 1 - no more QPs
2227 */
rvt_qp_iter_next(struct rvt_qp_iter * iter)2228 int rvt_qp_iter_next(struct rvt_qp_iter *iter)
2229 __must_hold(RCU)
2230 {
2231 int n = iter->n;
2232 int ret = 1;
2233 struct rvt_qp *pqp = iter->qp;
2234 struct rvt_qp *qp;
2235 struct rvt_dev_info *rdi = iter->rdi;
2236
2237 /*
2238 * The approach is to consider the special qps
2239 * as additional table entries before the
2240 * real hash table. Since the qp code sets
2241 * the qp->next hash link to NULL, this works just fine.
2242 *
2243 * iter->specials is 2 * # ports
2244 *
2245 * n = 0..iter->specials is the special qp indices
2246 *
2247 * n = iter->specials..rdi->qp_dev->qp_table_size+iter->specials are
2248 * the potential hash bucket entries
2249 *
2250 */
2251 for (; n < rdi->qp_dev->qp_table_size + iter->specials; n++) {
2252 if (pqp) {
2253 qp = rcu_dereference(pqp->next);
2254 } else {
2255 if (n < iter->specials) {
2256 struct rvt_ibport *rvp;
2257 int pidx;
2258
2259 pidx = n % rdi->ibdev.phys_port_cnt;
2260 rvp = rdi->ports[pidx];
2261 qp = rcu_dereference(rvp->qp[n & 1]);
2262 } else {
2263 qp = rcu_dereference(
2264 rdi->qp_dev->qp_table[
2265 (n - iter->specials)]);
2266 }
2267 }
2268 pqp = qp;
2269 if (qp) {
2270 iter->qp = qp;
2271 iter->n = n;
2272 return 0;
2273 }
2274 }
2275 return ret;
2276 }
2277 EXPORT_SYMBOL(rvt_qp_iter_next);
2278
2279 /**
2280 * rvt_qp_iter - iterate all QPs
2281 * @rdi - rvt devinfo
2282 * @v - a 64 bit value
2283 * @cb - a callback
2284 *
2285 * This provides a way for iterating all QPs.
2286 *
2287 * The @cb is a user defined callback and @v is a 64
2288 * bit value passed to and relevant for processing in the
2289 * cb. An example use case would be to alter QP processing
2290 * based on criteria not part of the rvt_qp.
2291 *
2292 * The code has an internal iterator to simplify
2293 * non seq_file use cases.
2294 */
rvt_qp_iter(struct rvt_dev_info * rdi,u64 v,void (* cb)(struct rvt_qp * qp,u64 v))2295 void rvt_qp_iter(struct rvt_dev_info *rdi,
2296 u64 v,
2297 void (*cb)(struct rvt_qp *qp, u64 v))
2298 {
2299 int ret;
2300 struct rvt_qp_iter i = {
2301 .rdi = rdi,
2302 .specials = rdi->ibdev.phys_port_cnt * 2,
2303 .v = v,
2304 .cb = cb
2305 };
2306
2307 rcu_read_lock();
2308 do {
2309 ret = rvt_qp_iter_next(&i);
2310 if (!ret) {
2311 rvt_get_qp(i.qp);
2312 rcu_read_unlock();
2313 i.cb(i.qp, i.v);
2314 rcu_read_lock();
2315 rvt_put_qp(i.qp);
2316 }
2317 } while (!ret);
2318 rcu_read_unlock();
2319 }
2320 EXPORT_SYMBOL(rvt_qp_iter);
2321