1 /*
2 * Copyright(c) 2015, 2016 Intel Corporation.
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48 #include <linux/err.h>
49 #include <linux/vmalloc.h>
50 #include <linux/hash.h>
51 #include <linux/module.h>
52 #include <linux/seq_file.h>
53 #include <rdma/rdma_vt.h>
54 #include <rdma/rdmavt_qp.h>
55 #include <rdma/ib_verbs.h>
56
57 #include "hfi.h"
58 #include "qp.h"
59 #include "trace.h"
60 #include "verbs_txreq.h"
61
62 unsigned int hfi1_qp_table_size = 256;
63 module_param_named(qp_table_size, hfi1_qp_table_size, uint, S_IRUGO);
64 MODULE_PARM_DESC(qp_table_size, "QP table size");
65
66 static void flush_tx_list(struct rvt_qp *qp);
67 static int iowait_sleep(
68 struct sdma_engine *sde,
69 struct iowait *wait,
70 struct sdma_txreq *stx,
71 unsigned seq);
72 static void iowait_wakeup(struct iowait *wait, int reason);
73 static void iowait_sdma_drained(struct iowait *wait);
74 static void qp_pio_drain(struct rvt_qp *qp);
75
mk_qpn(struct rvt_qpn_table * qpt,struct rvt_qpn_map * map,unsigned off)76 static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
77 struct rvt_qpn_map *map, unsigned off)
78 {
79 return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
80 }
81
82 /*
83 * Convert the AETH credit code into the number of credits.
84 */
85 static const u16 credit_table[31] = {
86 0, /* 0 */
87 1, /* 1 */
88 2, /* 2 */
89 3, /* 3 */
90 4, /* 4 */
91 6, /* 5 */
92 8, /* 6 */
93 12, /* 7 */
94 16, /* 8 */
95 24, /* 9 */
96 32, /* A */
97 48, /* B */
98 64, /* C */
99 96, /* D */
100 128, /* E */
101 192, /* F */
102 256, /* 10 */
103 384, /* 11 */
104 512, /* 12 */
105 768, /* 13 */
106 1024, /* 14 */
107 1536, /* 15 */
108 2048, /* 16 */
109 3072, /* 17 */
110 4096, /* 18 */
111 6144, /* 19 */
112 8192, /* 1A */
113 12288, /* 1B */
114 16384, /* 1C */
115 24576, /* 1D */
116 32768 /* 1E */
117 };
118
119 const struct rvt_operation_params hfi1_post_parms[RVT_OPERATION_MAX] = {
120 [IB_WR_RDMA_WRITE] = {
121 .length = sizeof(struct ib_rdma_wr),
122 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
123 },
124
125 [IB_WR_RDMA_READ] = {
126 .length = sizeof(struct ib_rdma_wr),
127 .qpt_support = BIT(IB_QPT_RC),
128 .flags = RVT_OPERATION_ATOMIC,
129 },
130
131 [IB_WR_ATOMIC_CMP_AND_SWP] = {
132 .length = sizeof(struct ib_atomic_wr),
133 .qpt_support = BIT(IB_QPT_RC),
134 .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
135 },
136
137 [IB_WR_ATOMIC_FETCH_AND_ADD] = {
138 .length = sizeof(struct ib_atomic_wr),
139 .qpt_support = BIT(IB_QPT_RC),
140 .flags = RVT_OPERATION_ATOMIC | RVT_OPERATION_ATOMIC_SGE,
141 },
142
143 [IB_WR_RDMA_WRITE_WITH_IMM] = {
144 .length = sizeof(struct ib_rdma_wr),
145 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
146 },
147
148 [IB_WR_SEND] = {
149 .length = sizeof(struct ib_send_wr),
150 .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
151 BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
152 },
153
154 [IB_WR_SEND_WITH_IMM] = {
155 .length = sizeof(struct ib_send_wr),
156 .qpt_support = BIT(IB_QPT_UD) | BIT(IB_QPT_SMI) | BIT(IB_QPT_GSI) |
157 BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
158 },
159
160 [IB_WR_REG_MR] = {
161 .length = sizeof(struct ib_reg_wr),
162 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
163 .flags = RVT_OPERATION_LOCAL,
164 },
165
166 [IB_WR_LOCAL_INV] = {
167 .length = sizeof(struct ib_send_wr),
168 .qpt_support = BIT(IB_QPT_UC) | BIT(IB_QPT_RC),
169 .flags = RVT_OPERATION_LOCAL,
170 },
171
172 [IB_WR_SEND_WITH_INV] = {
173 .length = sizeof(struct ib_send_wr),
174 .qpt_support = BIT(IB_QPT_RC),
175 },
176
177 };
178
flush_tx_list(struct rvt_qp * qp)179 static void flush_tx_list(struct rvt_qp *qp)
180 {
181 struct hfi1_qp_priv *priv = qp->priv;
182
183 while (!list_empty(&priv->s_iowait.tx_head)) {
184 struct sdma_txreq *tx;
185
186 tx = list_first_entry(
187 &priv->s_iowait.tx_head,
188 struct sdma_txreq,
189 list);
190 list_del_init(&tx->list);
191 hfi1_put_txreq(
192 container_of(tx, struct verbs_txreq, txreq));
193 }
194 }
195
flush_iowait(struct rvt_qp * qp)196 static void flush_iowait(struct rvt_qp *qp)
197 {
198 struct hfi1_qp_priv *priv = qp->priv;
199 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
200 unsigned long flags;
201
202 write_seqlock_irqsave(&dev->iowait_lock, flags);
203 if (!list_empty(&priv->s_iowait.list)) {
204 list_del_init(&priv->s_iowait.list);
205 rvt_put_qp(qp);
206 }
207 write_sequnlock_irqrestore(&dev->iowait_lock, flags);
208 }
209
opa_mtu_enum_to_int(int mtu)210 static inline int opa_mtu_enum_to_int(int mtu)
211 {
212 switch (mtu) {
213 case OPA_MTU_8192: return 8192;
214 case OPA_MTU_10240: return 10240;
215 default: return -1;
216 }
217 }
218
219 /**
220 * This function is what we would push to the core layer if we wanted to be a
221 * "first class citizen". Instead we hide this here and rely on Verbs ULPs
222 * to blindly pass the MTU enum value from the PathRecord to us.
223 */
verbs_mtu_enum_to_int(struct ib_device * dev,enum ib_mtu mtu)224 static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu)
225 {
226 int val;
227
228 /* Constraining 10KB packets to 8KB packets */
229 if (mtu == (enum ib_mtu)OPA_MTU_10240)
230 mtu = OPA_MTU_8192;
231 val = opa_mtu_enum_to_int((int)mtu);
232 if (val > 0)
233 return val;
234 return ib_mtu_enum_to_int(mtu);
235 }
236
hfi1_check_modify_qp(struct rvt_qp * qp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)237 int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
238 int attr_mask, struct ib_udata *udata)
239 {
240 struct ib_qp *ibqp = &qp->ibqp;
241 struct hfi1_ibdev *dev = to_idev(ibqp->device);
242 struct hfi1_devdata *dd = dd_from_dev(dev);
243 u8 sc;
244
245 if (attr_mask & IB_QP_AV) {
246 sc = ah_to_sc(ibqp->device, &attr->ah_attr);
247 if (sc == 0xf)
248 return -EINVAL;
249
250 if (!qp_to_sdma_engine(qp, sc) &&
251 dd->flags & HFI1_HAS_SEND_DMA)
252 return -EINVAL;
253
254 if (!qp_to_send_context(qp, sc))
255 return -EINVAL;
256 }
257
258 if (attr_mask & IB_QP_ALT_PATH) {
259 sc = ah_to_sc(ibqp->device, &attr->alt_ah_attr);
260 if (sc == 0xf)
261 return -EINVAL;
262
263 if (!qp_to_sdma_engine(qp, sc) &&
264 dd->flags & HFI1_HAS_SEND_DMA)
265 return -EINVAL;
266
267 if (!qp_to_send_context(qp, sc))
268 return -EINVAL;
269 }
270
271 return 0;
272 }
273
hfi1_modify_qp(struct rvt_qp * qp,struct ib_qp_attr * attr,int attr_mask,struct ib_udata * udata)274 void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
275 int attr_mask, struct ib_udata *udata)
276 {
277 struct ib_qp *ibqp = &qp->ibqp;
278 struct hfi1_qp_priv *priv = qp->priv;
279
280 if (attr_mask & IB_QP_AV) {
281 priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
282 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
283 priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc);
284 }
285
286 if (attr_mask & IB_QP_PATH_MIG_STATE &&
287 attr->path_mig_state == IB_MIG_MIGRATED &&
288 qp->s_mig_state == IB_MIG_ARMED) {
289 qp->s_flags |= RVT_S_AHG_CLEAR;
290 priv->s_sc = ah_to_sc(ibqp->device, &qp->remote_ah_attr);
291 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
292 priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc);
293 }
294 }
295
296 /**
297 * hfi1_check_send_wqe - validate wqe
298 * @qp - The qp
299 * @wqe - The built wqe
300 *
301 * validate wqe. This is called
302 * prior to inserting the wqe into
303 * the ring but after the wqe has been
304 * setup.
305 *
306 * Returns 0 on success, -EINVAL on failure
307 *
308 */
hfi1_check_send_wqe(struct rvt_qp * qp,struct rvt_swqe * wqe)309 int hfi1_check_send_wqe(struct rvt_qp *qp,
310 struct rvt_swqe *wqe)
311 {
312 struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
313 struct rvt_ah *ah;
314
315 switch (qp->ibqp.qp_type) {
316 case IB_QPT_RC:
317 case IB_QPT_UC:
318 if (wqe->length > 0x80000000U)
319 return -EINVAL;
320 break;
321 case IB_QPT_SMI:
322 ah = ibah_to_rvtah(wqe->ud_wr.ah);
323 if (wqe->length > (1 << ah->log_pmtu))
324 return -EINVAL;
325 break;
326 case IB_QPT_GSI:
327 case IB_QPT_UD:
328 ah = ibah_to_rvtah(wqe->ud_wr.ah);
329 if (wqe->length > (1 << ah->log_pmtu))
330 return -EINVAL;
331 if (ibp->sl_to_sc[ah->attr.sl] == 0xf)
332 return -EINVAL;
333 default:
334 break;
335 }
336 return wqe->length <= piothreshold;
337 }
338
339 /**
340 * hfi1_compute_aeth - compute the AETH (syndrome + MSN)
341 * @qp: the queue pair to compute the AETH for
342 *
343 * Returns the AETH.
344 */
hfi1_compute_aeth(struct rvt_qp * qp)345 __be32 hfi1_compute_aeth(struct rvt_qp *qp)
346 {
347 u32 aeth = qp->r_msn & HFI1_MSN_MASK;
348
349 if (qp->ibqp.srq) {
350 /*
351 * Shared receive queues don't generate credits.
352 * Set the credit field to the invalid value.
353 */
354 aeth |= HFI1_AETH_CREDIT_INVAL << HFI1_AETH_CREDIT_SHIFT;
355 } else {
356 u32 min, max, x;
357 u32 credits;
358 struct rvt_rwq *wq = qp->r_rq.wq;
359 u32 head;
360 u32 tail;
361
362 /* sanity check pointers before trusting them */
363 head = wq->head;
364 if (head >= qp->r_rq.size)
365 head = 0;
366 tail = wq->tail;
367 if (tail >= qp->r_rq.size)
368 tail = 0;
369 /*
370 * Compute the number of credits available (RWQEs).
371 * There is a small chance that the pair of reads are
372 * not atomic, which is OK, since the fuzziness is
373 * resolved as further ACKs go out.
374 */
375 credits = head - tail;
376 if ((int)credits < 0)
377 credits += qp->r_rq.size;
378 /*
379 * Binary search the credit table to find the code to
380 * use.
381 */
382 min = 0;
383 max = 31;
384 for (;;) {
385 x = (min + max) / 2;
386 if (credit_table[x] == credits)
387 break;
388 if (credit_table[x] > credits) {
389 max = x;
390 } else {
391 if (min == x)
392 break;
393 min = x;
394 }
395 }
396 aeth |= x << HFI1_AETH_CREDIT_SHIFT;
397 }
398 return cpu_to_be32(aeth);
399 }
400
401 /**
402 * _hfi1_schedule_send - schedule progress
403 * @qp: the QP
404 *
405 * This schedules qp progress w/o regard to the s_flags.
406 *
407 * It is only used in the post send, which doesn't hold
408 * the s_lock.
409 */
_hfi1_schedule_send(struct rvt_qp * qp)410 void _hfi1_schedule_send(struct rvt_qp *qp)
411 {
412 struct hfi1_qp_priv *priv = qp->priv;
413 struct hfi1_ibport *ibp =
414 to_iport(qp->ibqp.device, qp->port_num);
415 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
416 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
417
418 iowait_schedule(&priv->s_iowait, ppd->hfi1_wq,
419 priv->s_sde ?
420 priv->s_sde->cpu :
421 cpumask_first(cpumask_of_node(dd->node)));
422 }
423
qp_pio_drain(struct rvt_qp * qp)424 static void qp_pio_drain(struct rvt_qp *qp)
425 {
426 struct hfi1_ibdev *dev;
427 struct hfi1_qp_priv *priv = qp->priv;
428
429 if (!priv->s_sendcontext)
430 return;
431 dev = to_idev(qp->ibqp.device);
432 while (iowait_pio_pending(&priv->s_iowait)) {
433 write_seqlock_irq(&dev->iowait_lock);
434 hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 1);
435 write_sequnlock_irq(&dev->iowait_lock);
436 iowait_pio_drain(&priv->s_iowait);
437 write_seqlock_irq(&dev->iowait_lock);
438 hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 0);
439 write_sequnlock_irq(&dev->iowait_lock);
440 }
441 }
442
443 /**
444 * hfi1_schedule_send - schedule progress
445 * @qp: the QP
446 *
447 * This schedules qp progress and caller should hold
448 * the s_lock.
449 */
hfi1_schedule_send(struct rvt_qp * qp)450 void hfi1_schedule_send(struct rvt_qp *qp)
451 {
452 lockdep_assert_held(&qp->s_lock);
453 if (hfi1_send_ok(qp))
454 _hfi1_schedule_send(qp);
455 }
456
457 /**
458 * hfi1_get_credit - handle credit in aeth
459 * @qp: the qp
460 * @aeth: the Acknowledge Extended Transport Header
461 *
462 * The QP s_lock should be held.
463 */
hfi1_get_credit(struct rvt_qp * qp,u32 aeth)464 void hfi1_get_credit(struct rvt_qp *qp, u32 aeth)
465 {
466 u32 credit = (aeth >> HFI1_AETH_CREDIT_SHIFT) & HFI1_AETH_CREDIT_MASK;
467
468 lockdep_assert_held(&qp->s_lock);
469 /*
470 * If the credit is invalid, we can send
471 * as many packets as we like. Otherwise, we have to
472 * honor the credit field.
473 */
474 if (credit == HFI1_AETH_CREDIT_INVAL) {
475 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
476 qp->s_flags |= RVT_S_UNLIMITED_CREDIT;
477 if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
478 qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
479 hfi1_schedule_send(qp);
480 }
481 }
482 } else if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT)) {
483 /* Compute new LSN (i.e., MSN + credit) */
484 credit = (aeth + credit_table[credit]) & HFI1_MSN_MASK;
485 if (cmp_msn(credit, qp->s_lsn) > 0) {
486 qp->s_lsn = credit;
487 if (qp->s_flags & RVT_S_WAIT_SSN_CREDIT) {
488 qp->s_flags &= ~RVT_S_WAIT_SSN_CREDIT;
489 hfi1_schedule_send(qp);
490 }
491 }
492 }
493 }
494
hfi1_qp_wakeup(struct rvt_qp * qp,u32 flag)495 void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag)
496 {
497 unsigned long flags;
498
499 spin_lock_irqsave(&qp->s_lock, flags);
500 if (qp->s_flags & flag) {
501 qp->s_flags &= ~flag;
502 trace_hfi1_qpwakeup(qp, flag);
503 hfi1_schedule_send(qp);
504 }
505 spin_unlock_irqrestore(&qp->s_lock, flags);
506 /* Notify hfi1_destroy_qp() if it is waiting. */
507 rvt_put_qp(qp);
508 }
509
iowait_sleep(struct sdma_engine * sde,struct iowait * wait,struct sdma_txreq * stx,unsigned seq)510 static int iowait_sleep(
511 struct sdma_engine *sde,
512 struct iowait *wait,
513 struct sdma_txreq *stx,
514 unsigned seq)
515 {
516 struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq);
517 struct rvt_qp *qp;
518 struct hfi1_qp_priv *priv;
519 unsigned long flags;
520 int ret = 0;
521 struct hfi1_ibdev *dev;
522
523 qp = tx->qp;
524 priv = qp->priv;
525
526 spin_lock_irqsave(&qp->s_lock, flags);
527 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
528 /*
529 * If we couldn't queue the DMA request, save the info
530 * and try again later rather than destroying the
531 * buffer and undoing the side effects of the copy.
532 */
533 /* Make a common routine? */
534 dev = &sde->dd->verbs_dev;
535 list_add_tail(&stx->list, &wait->tx_head);
536 write_seqlock(&dev->iowait_lock);
537 if (sdma_progress(sde, seq, stx))
538 goto eagain;
539 if (list_empty(&priv->s_iowait.list)) {
540 struct hfi1_ibport *ibp =
541 to_iport(qp->ibqp.device, qp->port_num);
542
543 ibp->rvp.n_dmawait++;
544 qp->s_flags |= RVT_S_WAIT_DMA_DESC;
545 list_add_tail(&priv->s_iowait.list, &sde->dmawait);
546 trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC);
547 rvt_get_qp(qp);
548 }
549 write_sequnlock(&dev->iowait_lock);
550 qp->s_flags &= ~RVT_S_BUSY;
551 spin_unlock_irqrestore(&qp->s_lock, flags);
552 ret = -EBUSY;
553 } else {
554 spin_unlock_irqrestore(&qp->s_lock, flags);
555 hfi1_put_txreq(tx);
556 }
557 return ret;
558 eagain:
559 write_sequnlock(&dev->iowait_lock);
560 spin_unlock_irqrestore(&qp->s_lock, flags);
561 list_del_init(&stx->list);
562 return -EAGAIN;
563 }
564
iowait_wakeup(struct iowait * wait,int reason)565 static void iowait_wakeup(struct iowait *wait, int reason)
566 {
567 struct rvt_qp *qp = iowait_to_qp(wait);
568
569 WARN_ON(reason != SDMA_AVAIL_REASON);
570 hfi1_qp_wakeup(qp, RVT_S_WAIT_DMA_DESC);
571 }
572
iowait_sdma_drained(struct iowait * wait)573 static void iowait_sdma_drained(struct iowait *wait)
574 {
575 struct rvt_qp *qp = iowait_to_qp(wait);
576 unsigned long flags;
577
578 /*
579 * This happens when the send engine notes
580 * a QP in the error state and cannot
581 * do the flush work until that QP's
582 * sdma work has finished.
583 */
584 spin_lock_irqsave(&qp->s_lock, flags);
585 if (qp->s_flags & RVT_S_WAIT_DMA) {
586 qp->s_flags &= ~RVT_S_WAIT_DMA;
587 hfi1_schedule_send(qp);
588 }
589 spin_unlock_irqrestore(&qp->s_lock, flags);
590 }
591
592 /**
593 *
594 * qp_to_sdma_engine - map a qp to a send engine
595 * @qp: the QP
596 * @sc5: the 5 bit sc
597 *
598 * Return:
599 * A send engine for the qp or NULL for SMI type qp.
600 */
qp_to_sdma_engine(struct rvt_qp * qp,u8 sc5)601 struct sdma_engine *qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5)
602 {
603 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
604 struct sdma_engine *sde;
605
606 if (!(dd->flags & HFI1_HAS_SEND_DMA))
607 return NULL;
608 switch (qp->ibqp.qp_type) {
609 case IB_QPT_SMI:
610 return NULL;
611 default:
612 break;
613 }
614 sde = sdma_select_engine_sc(dd, qp->ibqp.qp_num >> dd->qos_shift, sc5);
615 return sde;
616 }
617
618 /*
619 * qp_to_send_context - map a qp to a send context
620 * @qp: the QP
621 * @sc5: the 5 bit sc
622 *
623 * Return:
624 * A send context for the qp
625 */
qp_to_send_context(struct rvt_qp * qp,u8 sc5)626 struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5)
627 {
628 struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
629
630 switch (qp->ibqp.qp_type) {
631 case IB_QPT_SMI:
632 /* SMA packets to VL15 */
633 return dd->vld[15].sc;
634 default:
635 break;
636 }
637
638 return pio_select_send_context_sc(dd, qp->ibqp.qp_num >> dd->qos_shift,
639 sc5);
640 }
641
642 struct qp_iter {
643 struct hfi1_ibdev *dev;
644 struct rvt_qp *qp;
645 int specials;
646 int n;
647 };
648
qp_iter_init(struct hfi1_ibdev * dev)649 struct qp_iter *qp_iter_init(struct hfi1_ibdev *dev)
650 {
651 struct qp_iter *iter;
652
653 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
654 if (!iter)
655 return NULL;
656
657 iter->dev = dev;
658 iter->specials = dev->rdi.ibdev.phys_port_cnt * 2;
659
660 return iter;
661 }
662
qp_iter_next(struct qp_iter * iter)663 int qp_iter_next(struct qp_iter *iter)
664 {
665 struct hfi1_ibdev *dev = iter->dev;
666 int n = iter->n;
667 int ret = 1;
668 struct rvt_qp *pqp = iter->qp;
669 struct rvt_qp *qp;
670
671 /*
672 * The approach is to consider the special qps
673 * as an additional table entries before the
674 * real hash table. Since the qp code sets
675 * the qp->next hash link to NULL, this works just fine.
676 *
677 * iter->specials is 2 * # ports
678 *
679 * n = 0..iter->specials is the special qp indices
680 *
681 * n = iter->specials..dev->rdi.qp_dev->qp_table_size+iter->specials are
682 * the potential hash bucket entries
683 *
684 */
685 for (; n < dev->rdi.qp_dev->qp_table_size + iter->specials; n++) {
686 if (pqp) {
687 qp = rcu_dereference(pqp->next);
688 } else {
689 if (n < iter->specials) {
690 struct hfi1_pportdata *ppd;
691 struct hfi1_ibport *ibp;
692 int pidx;
693
694 pidx = n % dev->rdi.ibdev.phys_port_cnt;
695 ppd = &dd_from_dev(dev)->pport[pidx];
696 ibp = &ppd->ibport_data;
697
698 if (!(n & 1))
699 qp = rcu_dereference(ibp->rvp.qp[0]);
700 else
701 qp = rcu_dereference(ibp->rvp.qp[1]);
702 } else {
703 qp = rcu_dereference(
704 dev->rdi.qp_dev->qp_table[
705 (n - iter->specials)]);
706 }
707 }
708 pqp = qp;
709 if (qp) {
710 iter->qp = qp;
711 iter->n = n;
712 return 0;
713 }
714 }
715 return ret;
716 }
717
718 static const char * const qp_type_str[] = {
719 "SMI", "GSI", "RC", "UC", "UD",
720 };
721
qp_idle(struct rvt_qp * qp)722 static int qp_idle(struct rvt_qp *qp)
723 {
724 return
725 qp->s_last == qp->s_acked &&
726 qp->s_acked == qp->s_cur &&
727 qp->s_cur == qp->s_tail &&
728 qp->s_tail == qp->s_head;
729 }
730
qp_iter_print(struct seq_file * s,struct qp_iter * iter)731 void qp_iter_print(struct seq_file *s, struct qp_iter *iter)
732 {
733 struct rvt_swqe *wqe;
734 struct rvt_qp *qp = iter->qp;
735 struct hfi1_qp_priv *priv = qp->priv;
736 struct sdma_engine *sde;
737 struct send_context *send_context;
738
739 sde = qp_to_sdma_engine(qp, priv->s_sc);
740 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
741 send_context = qp_to_send_context(qp, priv->s_sc);
742 seq_printf(s,
743 "N %d %s QP %x R %u %s %u %u %u f=%x %u %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d\n",
744 iter->n,
745 qp_idle(qp) ? "I" : "B",
746 qp->ibqp.qp_num,
747 atomic_read(&qp->refcount),
748 qp_type_str[qp->ibqp.qp_type],
749 qp->state,
750 wqe ? wqe->wr.opcode : 0,
751 qp->s_hdrwords,
752 qp->s_flags,
753 iowait_sdma_pending(&priv->s_iowait),
754 iowait_pio_pending(&priv->s_iowait),
755 !list_empty(&priv->s_iowait.list),
756 qp->timeout,
757 wqe ? wqe->ssn : 0,
758 qp->s_lsn,
759 qp->s_last_psn,
760 qp->s_psn, qp->s_next_psn,
761 qp->s_sending_psn, qp->s_sending_hpsn,
762 qp->s_last, qp->s_acked, qp->s_cur,
763 qp->s_tail, qp->s_head, qp->s_size,
764 qp->s_avail,
765 qp->remote_qpn,
766 qp->remote_ah_attr.dlid,
767 qp->remote_ah_attr.sl,
768 qp->pmtu,
769 qp->s_retry,
770 qp->s_retry_cnt,
771 qp->s_rnr_retry_cnt,
772 sde,
773 sde ? sde->this_idx : 0,
774 send_context,
775 send_context ? send_context->sw_index : 0,
776 ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->head,
777 ibcq_to_rvtcq(qp->ibqp.send_cq)->queue->tail,
778 qp->pid);
779 }
780
qp_comm_est(struct rvt_qp * qp)781 void qp_comm_est(struct rvt_qp *qp)
782 {
783 qp->r_flags |= RVT_R_COMM_EST;
784 if (qp->ibqp.event_handler) {
785 struct ib_event ev;
786
787 ev.device = qp->ibqp.device;
788 ev.element.qp = &qp->ibqp;
789 ev.event = IB_EVENT_COMM_EST;
790 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
791 }
792 }
793
qp_priv_alloc(struct rvt_dev_info * rdi,struct rvt_qp * qp,gfp_t gfp)794 void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp,
795 gfp_t gfp)
796 {
797 struct hfi1_qp_priv *priv;
798
799 priv = kzalloc_node(sizeof(*priv), gfp, rdi->dparms.node);
800 if (!priv)
801 return ERR_PTR(-ENOMEM);
802
803 priv->owner = qp;
804
805 priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), gfp,
806 rdi->dparms.node);
807 if (!priv->s_ahg) {
808 kfree(priv);
809 return ERR_PTR(-ENOMEM);
810 }
811 iowait_init(
812 &priv->s_iowait,
813 1,
814 _hfi1_do_send,
815 iowait_sleep,
816 iowait_wakeup,
817 iowait_sdma_drained);
818 setup_timer(&priv->s_rnr_timer, hfi1_rc_rnr_retry, (unsigned long)qp);
819 qp->s_timer.function = hfi1_rc_timeout;
820 return priv;
821 }
822
qp_priv_free(struct rvt_dev_info * rdi,struct rvt_qp * qp)823 void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp)
824 {
825 struct hfi1_qp_priv *priv = qp->priv;
826
827 kfree(priv->s_ahg);
828 kfree(priv);
829 }
830
free_all_qps(struct rvt_dev_info * rdi)831 unsigned free_all_qps(struct rvt_dev_info *rdi)
832 {
833 struct hfi1_ibdev *verbs_dev = container_of(rdi,
834 struct hfi1_ibdev,
835 rdi);
836 struct hfi1_devdata *dd = container_of(verbs_dev,
837 struct hfi1_devdata,
838 verbs_dev);
839 int n;
840 unsigned qp_inuse = 0;
841
842 for (n = 0; n < dd->num_pports; n++) {
843 struct hfi1_ibport *ibp = &dd->pport[n].ibport_data;
844
845 rcu_read_lock();
846 if (rcu_dereference(ibp->rvp.qp[0]))
847 qp_inuse++;
848 if (rcu_dereference(ibp->rvp.qp[1]))
849 qp_inuse++;
850 rcu_read_unlock();
851 }
852
853 return qp_inuse;
854 }
855
flush_qp_waiters(struct rvt_qp * qp)856 void flush_qp_waiters(struct rvt_qp *qp)
857 {
858 lockdep_assert_held(&qp->s_lock);
859 flush_iowait(qp);
860 hfi1_stop_rc_timers(qp);
861 }
862
stop_send_queue(struct rvt_qp * qp)863 void stop_send_queue(struct rvt_qp *qp)
864 {
865 struct hfi1_qp_priv *priv = qp->priv;
866
867 cancel_work_sync(&priv->s_iowait.iowork);
868 hfi1_del_timers_sync(qp);
869 }
870
quiesce_qp(struct rvt_qp * qp)871 void quiesce_qp(struct rvt_qp *qp)
872 {
873 struct hfi1_qp_priv *priv = qp->priv;
874
875 iowait_sdma_drain(&priv->s_iowait);
876 qp_pio_drain(qp);
877 flush_tx_list(qp);
878 }
879
notify_qp_reset(struct rvt_qp * qp)880 void notify_qp_reset(struct rvt_qp *qp)
881 {
882 struct hfi1_qp_priv *priv = qp->priv;
883
884 priv->r_adefered = 0;
885 clear_ahg(qp);
886 }
887
888 /*
889 * Switch to alternate path.
890 * The QP s_lock should be held and interrupts disabled.
891 */
hfi1_migrate_qp(struct rvt_qp * qp)892 void hfi1_migrate_qp(struct rvt_qp *qp)
893 {
894 struct hfi1_qp_priv *priv = qp->priv;
895 struct ib_event ev;
896
897 qp->s_mig_state = IB_MIG_MIGRATED;
898 qp->remote_ah_attr = qp->alt_ah_attr;
899 qp->port_num = qp->alt_ah_attr.port_num;
900 qp->s_pkey_index = qp->s_alt_pkey_index;
901 qp->s_flags |= RVT_S_AHG_CLEAR;
902 priv->s_sc = ah_to_sc(qp->ibqp.device, &qp->remote_ah_attr);
903 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
904
905 ev.device = qp->ibqp.device;
906 ev.element.qp = &qp->ibqp;
907 ev.event = IB_EVENT_PATH_MIG;
908 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
909 }
910
mtu_to_path_mtu(u32 mtu)911 int mtu_to_path_mtu(u32 mtu)
912 {
913 return mtu_to_enum(mtu, OPA_MTU_8192);
914 }
915
mtu_from_qp(struct rvt_dev_info * rdi,struct rvt_qp * qp,u32 pmtu)916 u32 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
917 {
918 u32 mtu;
919 struct hfi1_ibdev *verbs_dev = container_of(rdi,
920 struct hfi1_ibdev,
921 rdi);
922 struct hfi1_devdata *dd = container_of(verbs_dev,
923 struct hfi1_devdata,
924 verbs_dev);
925 struct hfi1_ibport *ibp;
926 u8 sc, vl;
927
928 ibp = &dd->pport[qp->port_num - 1].ibport_data;
929 sc = ibp->sl_to_sc[qp->remote_ah_attr.sl];
930 vl = sc_to_vlt(dd, sc);
931
932 mtu = verbs_mtu_enum_to_int(qp->ibqp.device, pmtu);
933 if (vl < PER_VL_SEND_CONTEXTS)
934 mtu = min_t(u32, mtu, dd->vld[vl].mtu);
935 return mtu;
936 }
937
get_pmtu_from_attr(struct rvt_dev_info * rdi,struct rvt_qp * qp,struct ib_qp_attr * attr)938 int get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp,
939 struct ib_qp_attr *attr)
940 {
941 int mtu, pidx = qp->port_num - 1;
942 struct hfi1_ibdev *verbs_dev = container_of(rdi,
943 struct hfi1_ibdev,
944 rdi);
945 struct hfi1_devdata *dd = container_of(verbs_dev,
946 struct hfi1_devdata,
947 verbs_dev);
948 mtu = verbs_mtu_enum_to_int(qp->ibqp.device, attr->path_mtu);
949 if (mtu == -1)
950 return -1; /* values less than 0 are error */
951
952 if (mtu > dd->pport[pidx].ibmtu)
953 return mtu_to_enum(dd->pport[pidx].ibmtu, IB_MTU_2048);
954 else
955 return attr->path_mtu;
956 }
957
notify_error_qp(struct rvt_qp * qp)958 void notify_error_qp(struct rvt_qp *qp)
959 {
960 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
961 struct hfi1_qp_priv *priv = qp->priv;
962
963 write_seqlock(&dev->iowait_lock);
964 if (!list_empty(&priv->s_iowait.list) && !(qp->s_flags & RVT_S_BUSY)) {
965 qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
966 list_del_init(&priv->s_iowait.list);
967 rvt_put_qp(qp);
968 }
969 write_sequnlock(&dev->iowait_lock);
970
971 if (!(qp->s_flags & RVT_S_BUSY)) {
972 qp->s_hdrwords = 0;
973 if (qp->s_rdma_mr) {
974 rvt_put_mr(qp->s_rdma_mr);
975 qp->s_rdma_mr = NULL;
976 }
977 flush_tx_list(qp);
978 }
979 }
980
981 /**
982 * hfi1_error_port_qps - put a port's RC/UC qps into error state
983 * @ibp: the ibport.
984 * @sl: the service level.
985 *
986 * This function places all RC/UC qps with a given service level into error
987 * state. It is generally called to force upper lay apps to abandon stale qps
988 * after an sl->sc mapping change.
989 */
hfi1_error_port_qps(struct hfi1_ibport * ibp,u8 sl)990 void hfi1_error_port_qps(struct hfi1_ibport *ibp, u8 sl)
991 {
992 struct rvt_qp *qp = NULL;
993 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
994 struct hfi1_ibdev *dev = &ppd->dd->verbs_dev;
995 int n;
996 int lastwqe;
997 struct ib_event ev;
998
999 rcu_read_lock();
1000
1001 /* Deal only with RC/UC qps that use the given SL. */
1002 for (n = 0; n < dev->rdi.qp_dev->qp_table_size; n++) {
1003 for (qp = rcu_dereference(dev->rdi.qp_dev->qp_table[n]); qp;
1004 qp = rcu_dereference(qp->next)) {
1005 if (qp->port_num == ppd->port &&
1006 (qp->ibqp.qp_type == IB_QPT_UC ||
1007 qp->ibqp.qp_type == IB_QPT_RC) &&
1008 qp->remote_ah_attr.sl == sl &&
1009 (ib_rvt_state_ops[qp->state] &
1010 RVT_POST_SEND_OK)) {
1011 spin_lock_irq(&qp->r_lock);
1012 spin_lock(&qp->s_hlock);
1013 spin_lock(&qp->s_lock);
1014 lastwqe = rvt_error_qp(qp,
1015 IB_WC_WR_FLUSH_ERR);
1016 spin_unlock(&qp->s_lock);
1017 spin_unlock(&qp->s_hlock);
1018 spin_unlock_irq(&qp->r_lock);
1019 if (lastwqe) {
1020 ev.device = qp->ibqp.device;
1021 ev.element.qp = &qp->ibqp;
1022 ev.event =
1023 IB_EVENT_QP_LAST_WQE_REACHED;
1024 qp->ibqp.event_handler(&ev,
1025 qp->ibqp.qp_context);
1026 }
1027 }
1028 }
1029 }
1030
1031 rcu_read_unlock();
1032 }
1033