1 /*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include <linux/spinlock.h>
35 #include <rdma/ib_smi.h>
36
37 #include "qib.h"
38 #include "qib_mad.h"
39
40 /*
41 * Switch to alternate path.
42 * The QP s_lock should be held and interrupts disabled.
43 */
qib_migrate_qp(struct rvt_qp * qp)44 void qib_migrate_qp(struct rvt_qp *qp)
45 {
46 struct ib_event ev;
47
48 qp->s_mig_state = IB_MIG_MIGRATED;
49 qp->remote_ah_attr = qp->alt_ah_attr;
50 qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr);
51 qp->s_pkey_index = qp->s_alt_pkey_index;
52
53 ev.device = qp->ibqp.device;
54 ev.element.qp = &qp->ibqp;
55 ev.event = IB_EVENT_PATH_MIG;
56 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
57 }
58
get_sguid(struct qib_ibport * ibp,unsigned index)59 static __be64 get_sguid(struct qib_ibport *ibp, unsigned index)
60 {
61 if (!index) {
62 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
63
64 return ppd->guid;
65 }
66 return ibp->guids[index - 1];
67 }
68
gid_ok(union ib_gid * gid,__be64 gid_prefix,__be64 id)69 static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
70 {
71 return (gid->global.interface_id == id &&
72 (gid->global.subnet_prefix == gid_prefix ||
73 gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
74 }
75
76 /*
77 *
78 * This should be called with the QP r_lock held.
79 *
80 * The s_lock will be acquired around the qib_migrate_qp() call.
81 */
qib_ruc_check_hdr(struct qib_ibport * ibp,struct ib_header * hdr,int has_grh,struct rvt_qp * qp,u32 bth0)82 int qib_ruc_check_hdr(struct qib_ibport *ibp, struct ib_header *hdr,
83 int has_grh, struct rvt_qp *qp, u32 bth0)
84 {
85 __be64 guid;
86 unsigned long flags;
87
88 if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) {
89 if (!has_grh) {
90 if (rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
91 IB_AH_GRH)
92 goto err;
93 } else {
94 const struct ib_global_route *grh;
95
96 if (!(rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
97 IB_AH_GRH))
98 goto err;
99 grh = rdma_ah_read_grh(&qp->alt_ah_attr);
100 guid = get_sguid(ibp, grh->sgid_index);
101 if (!gid_ok(&hdr->u.l.grh.dgid,
102 ibp->rvp.gid_prefix, guid))
103 goto err;
104 if (!gid_ok(&hdr->u.l.grh.sgid,
105 grh->dgid.global.subnet_prefix,
106 grh->dgid.global.interface_id))
107 goto err;
108 }
109 if (!qib_pkey_ok((u16)bth0,
110 qib_get_pkey(ibp, qp->s_alt_pkey_index))) {
111 qib_bad_pkey(ibp,
112 (u16)bth0,
113 (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
114 0, qp->ibqp.qp_num,
115 hdr->lrh[3], hdr->lrh[1]);
116 goto err;
117 }
118 /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
119 if ((be16_to_cpu(hdr->lrh[3]) !=
120 rdma_ah_get_dlid(&qp->alt_ah_attr)) ||
121 ppd_from_ibp(ibp)->port !=
122 rdma_ah_get_port_num(&qp->alt_ah_attr))
123 goto err;
124 spin_lock_irqsave(&qp->s_lock, flags);
125 qib_migrate_qp(qp);
126 spin_unlock_irqrestore(&qp->s_lock, flags);
127 } else {
128 if (!has_grh) {
129 if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
130 IB_AH_GRH)
131 goto err;
132 } else {
133 const struct ib_global_route *grh;
134
135 if (!(rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
136 IB_AH_GRH))
137 goto err;
138 grh = rdma_ah_read_grh(&qp->remote_ah_attr);
139 guid = get_sguid(ibp, grh->sgid_index);
140 if (!gid_ok(&hdr->u.l.grh.dgid,
141 ibp->rvp.gid_prefix, guid))
142 goto err;
143 if (!gid_ok(&hdr->u.l.grh.sgid,
144 grh->dgid.global.subnet_prefix,
145 grh->dgid.global.interface_id))
146 goto err;
147 }
148 if (!qib_pkey_ok((u16)bth0,
149 qib_get_pkey(ibp, qp->s_pkey_index))) {
150 qib_bad_pkey(ibp,
151 (u16)bth0,
152 (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
153 0, qp->ibqp.qp_num,
154 hdr->lrh[3], hdr->lrh[1]);
155 goto err;
156 }
157 /* Validate the SLID. See Ch. 9.6.1.5 */
158 if (be16_to_cpu(hdr->lrh[3]) !=
159 rdma_ah_get_dlid(&qp->remote_ah_attr) ||
160 ppd_from_ibp(ibp)->port != qp->port_num)
161 goto err;
162 if (qp->s_mig_state == IB_MIG_REARM &&
163 !(bth0 & IB_BTH_MIG_REQ))
164 qp->s_mig_state = IB_MIG_ARMED;
165 }
166
167 return 0;
168
169 err:
170 return 1;
171 }
172
173 /**
174 * qib_ruc_loopback - handle UC and RC lookback requests
175 * @sqp: the sending QP
176 *
177 * This is called from qib_do_send() to
178 * forward a WQE addressed to the same HCA.
179 * Note that although we are single threaded due to the tasklet, we still
180 * have to protect against post_send(). We don't have to worry about
181 * receive interrupts since this is a connected protocol and all packets
182 * will pass through here.
183 */
qib_ruc_loopback(struct rvt_qp * sqp)184 static void qib_ruc_loopback(struct rvt_qp *sqp)
185 {
186 struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
187 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
188 struct qib_devdata *dd = ppd->dd;
189 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
190 struct rvt_qp *qp;
191 struct rvt_swqe *wqe;
192 struct rvt_sge *sge;
193 unsigned long flags;
194 struct ib_wc wc;
195 u64 sdata;
196 atomic64_t *maddr;
197 enum ib_wc_status send_status;
198 int release;
199 int ret;
200
201 rcu_read_lock();
202 /*
203 * Note that we check the responder QP state after
204 * checking the requester's state.
205 */
206 qp = rvt_lookup_qpn(rdi, &ibp->rvp, sqp->remote_qpn);
207 if (!qp)
208 goto done;
209
210 spin_lock_irqsave(&sqp->s_lock, flags);
211
212 /* Return if we are already busy processing a work request. */
213 if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
214 !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
215 goto unlock;
216
217 sqp->s_flags |= RVT_S_BUSY;
218
219 again:
220 if (sqp->s_last == READ_ONCE(sqp->s_head))
221 goto clr_busy;
222 wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
223
224 /* Return if it is not OK to start a new work reqeust. */
225 if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
226 if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
227 goto clr_busy;
228 /* We are in the error state, flush the work request. */
229 send_status = IB_WC_WR_FLUSH_ERR;
230 goto flush_send;
231 }
232
233 /*
234 * We can rely on the entry not changing without the s_lock
235 * being held until we update s_last.
236 * We increment s_cur to indicate s_last is in progress.
237 */
238 if (sqp->s_last == sqp->s_cur) {
239 if (++sqp->s_cur >= sqp->s_size)
240 sqp->s_cur = 0;
241 }
242 spin_unlock_irqrestore(&sqp->s_lock, flags);
243
244 if (!qp || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
245 qp->ibqp.qp_type != sqp->ibqp.qp_type) {
246 ibp->rvp.n_pkt_drops++;
247 /*
248 * For RC, the requester would timeout and retry so
249 * shortcut the timeouts and just signal too many retries.
250 */
251 if (sqp->ibqp.qp_type == IB_QPT_RC)
252 send_status = IB_WC_RETRY_EXC_ERR;
253 else
254 send_status = IB_WC_SUCCESS;
255 goto serr;
256 }
257
258 memset(&wc, 0, sizeof(wc));
259 send_status = IB_WC_SUCCESS;
260
261 release = 1;
262 sqp->s_sge.sge = wqe->sg_list[0];
263 sqp->s_sge.sg_list = wqe->sg_list + 1;
264 sqp->s_sge.num_sge = wqe->wr.num_sge;
265 sqp->s_len = wqe->length;
266 switch (wqe->wr.opcode) {
267 case IB_WR_SEND_WITH_IMM:
268 wc.wc_flags = IB_WC_WITH_IMM;
269 wc.ex.imm_data = wqe->wr.ex.imm_data;
270 /* FALLTHROUGH */
271 case IB_WR_SEND:
272 ret = rvt_get_rwqe(qp, false);
273 if (ret < 0)
274 goto op_err;
275 if (!ret)
276 goto rnr_nak;
277 if (wqe->length > qp->r_len)
278 goto inv_err;
279 break;
280
281 case IB_WR_RDMA_WRITE_WITH_IMM:
282 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
283 goto inv_err;
284 wc.wc_flags = IB_WC_WITH_IMM;
285 wc.ex.imm_data = wqe->wr.ex.imm_data;
286 ret = rvt_get_rwqe(qp, true);
287 if (ret < 0)
288 goto op_err;
289 if (!ret)
290 goto rnr_nak;
291 /* FALLTHROUGH */
292 case IB_WR_RDMA_WRITE:
293 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
294 goto inv_err;
295 if (wqe->length == 0)
296 break;
297 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
298 wqe->rdma_wr.remote_addr,
299 wqe->rdma_wr.rkey,
300 IB_ACCESS_REMOTE_WRITE)))
301 goto acc_err;
302 qp->r_sge.sg_list = NULL;
303 qp->r_sge.num_sge = 1;
304 qp->r_sge.total_len = wqe->length;
305 break;
306
307 case IB_WR_RDMA_READ:
308 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
309 goto inv_err;
310 if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
311 wqe->rdma_wr.remote_addr,
312 wqe->rdma_wr.rkey,
313 IB_ACCESS_REMOTE_READ)))
314 goto acc_err;
315 release = 0;
316 sqp->s_sge.sg_list = NULL;
317 sqp->s_sge.num_sge = 1;
318 qp->r_sge.sge = wqe->sg_list[0];
319 qp->r_sge.sg_list = wqe->sg_list + 1;
320 qp->r_sge.num_sge = wqe->wr.num_sge;
321 qp->r_sge.total_len = wqe->length;
322 break;
323
324 case IB_WR_ATOMIC_CMP_AND_SWP:
325 case IB_WR_ATOMIC_FETCH_AND_ADD:
326 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
327 goto inv_err;
328 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
329 wqe->atomic_wr.remote_addr,
330 wqe->atomic_wr.rkey,
331 IB_ACCESS_REMOTE_ATOMIC)))
332 goto acc_err;
333 /* Perform atomic OP and save result. */
334 maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
335 sdata = wqe->atomic_wr.compare_add;
336 *(u64 *) sqp->s_sge.sge.vaddr =
337 (wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
338 (u64) atomic64_add_return(sdata, maddr) - sdata :
339 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
340 sdata, wqe->atomic_wr.swap);
341 rvt_put_mr(qp->r_sge.sge.mr);
342 qp->r_sge.num_sge = 0;
343 goto send_comp;
344
345 default:
346 send_status = IB_WC_LOC_QP_OP_ERR;
347 goto serr;
348 }
349
350 sge = &sqp->s_sge.sge;
351 while (sqp->s_len) {
352 u32 len = sqp->s_len;
353
354 if (len > sge->length)
355 len = sge->length;
356 if (len > sge->sge_length)
357 len = sge->sge_length;
358 BUG_ON(len == 0);
359 qib_copy_sge(&qp->r_sge, sge->vaddr, len, release);
360 sge->vaddr += len;
361 sge->length -= len;
362 sge->sge_length -= len;
363 if (sge->sge_length == 0) {
364 if (!release)
365 rvt_put_mr(sge->mr);
366 if (--sqp->s_sge.num_sge)
367 *sge = *sqp->s_sge.sg_list++;
368 } else if (sge->length == 0 && sge->mr->lkey) {
369 if (++sge->n >= RVT_SEGSZ) {
370 if (++sge->m >= sge->mr->mapsz)
371 break;
372 sge->n = 0;
373 }
374 sge->vaddr =
375 sge->mr->map[sge->m]->segs[sge->n].vaddr;
376 sge->length =
377 sge->mr->map[sge->m]->segs[sge->n].length;
378 }
379 sqp->s_len -= len;
380 }
381 if (release)
382 rvt_put_ss(&qp->r_sge);
383
384 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
385 goto send_comp;
386
387 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
388 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
389 else
390 wc.opcode = IB_WC_RECV;
391 wc.wr_id = qp->r_wr_id;
392 wc.status = IB_WC_SUCCESS;
393 wc.byte_len = wqe->length;
394 wc.qp = &qp->ibqp;
395 wc.src_qp = qp->remote_qpn;
396 wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
397 wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
398 wc.port_num = 1;
399 /* Signal completion event if the solicited bit is set. */
400 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
401 wqe->wr.send_flags & IB_SEND_SOLICITED);
402
403 send_comp:
404 spin_lock_irqsave(&sqp->s_lock, flags);
405 ibp->rvp.n_loop_pkts++;
406 flush_send:
407 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
408 qib_send_complete(sqp, wqe, send_status);
409 goto again;
410
411 rnr_nak:
412 /* Handle RNR NAK */
413 if (qp->ibqp.qp_type == IB_QPT_UC)
414 goto send_comp;
415 ibp->rvp.n_rnr_naks++;
416 /*
417 * Note: we don't need the s_lock held since the BUSY flag
418 * makes this single threaded.
419 */
420 if (sqp->s_rnr_retry == 0) {
421 send_status = IB_WC_RNR_RETRY_EXC_ERR;
422 goto serr;
423 }
424 if (sqp->s_rnr_retry_cnt < 7)
425 sqp->s_rnr_retry--;
426 spin_lock_irqsave(&sqp->s_lock, flags);
427 if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
428 goto clr_busy;
429 rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
430 IB_AETH_CREDIT_SHIFT);
431 goto clr_busy;
432
433 op_err:
434 send_status = IB_WC_REM_OP_ERR;
435 wc.status = IB_WC_LOC_QP_OP_ERR;
436 goto err;
437
438 inv_err:
439 send_status =
440 sqp->ibqp.qp_type == IB_QPT_RC ?
441 IB_WC_REM_INV_REQ_ERR :
442 IB_WC_SUCCESS;
443 wc.status = IB_WC_LOC_QP_OP_ERR;
444 goto err;
445
446 acc_err:
447 send_status = IB_WC_REM_ACCESS_ERR;
448 wc.status = IB_WC_LOC_PROT_ERR;
449 err:
450 /* responder goes to error state */
451 rvt_rc_error(qp, wc.status);
452
453 serr:
454 spin_lock_irqsave(&sqp->s_lock, flags);
455 qib_send_complete(sqp, wqe, send_status);
456 if (sqp->ibqp.qp_type == IB_QPT_RC) {
457 int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
458
459 sqp->s_flags &= ~RVT_S_BUSY;
460 spin_unlock_irqrestore(&sqp->s_lock, flags);
461 if (lastwqe) {
462 struct ib_event ev;
463
464 ev.device = sqp->ibqp.device;
465 ev.element.qp = &sqp->ibqp;
466 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
467 sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
468 }
469 goto done;
470 }
471 clr_busy:
472 sqp->s_flags &= ~RVT_S_BUSY;
473 unlock:
474 spin_unlock_irqrestore(&sqp->s_lock, flags);
475 done:
476 rcu_read_unlock();
477 }
478
479 /**
480 * qib_make_grh - construct a GRH header
481 * @ibp: a pointer to the IB port
482 * @hdr: a pointer to the GRH header being constructed
483 * @grh: the global route address to send to
484 * @hwords: the number of 32 bit words of header being sent
485 * @nwords: the number of 32 bit words of data being sent
486 *
487 * Return the size of the header in 32 bit words.
488 */
qib_make_grh(struct qib_ibport * ibp,struct ib_grh * hdr,const struct ib_global_route * grh,u32 hwords,u32 nwords)489 u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
490 const struct ib_global_route *grh, u32 hwords, u32 nwords)
491 {
492 hdr->version_tclass_flow =
493 cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
494 (grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
495 (grh->flow_label << IB_GRH_FLOW_SHIFT));
496 hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
497 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
498 hdr->next_hdr = IB_GRH_NEXT_HDR;
499 hdr->hop_limit = grh->hop_limit;
500 /* The SGID is 32-bit aligned. */
501 hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
502 if (!grh->sgid_index)
503 hdr->sgid.global.interface_id = ppd_from_ibp(ibp)->guid;
504 else if (grh->sgid_index < QIB_GUIDS_PER_PORT)
505 hdr->sgid.global.interface_id = ibp->guids[grh->sgid_index - 1];
506 hdr->dgid = grh->dgid;
507
508 /* GRH header size in 32-bit words. */
509 return sizeof(struct ib_grh) / sizeof(u32);
510 }
511
qib_make_ruc_header(struct rvt_qp * qp,struct ib_other_headers * ohdr,u32 bth0,u32 bth2)512 void qib_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
513 u32 bth0, u32 bth2)
514 {
515 struct qib_qp_priv *priv = qp->priv;
516 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
517 u16 lrh0;
518 u32 nwords;
519 u32 extra_bytes;
520
521 /* Construct the header. */
522 extra_bytes = -qp->s_cur_size & 3;
523 nwords = (qp->s_cur_size + extra_bytes) >> 2;
524 lrh0 = QIB_LRH_BTH;
525 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) {
526 qp->s_hdrwords +=
527 qib_make_grh(ibp, &priv->s_hdr->u.l.grh,
528 rdma_ah_read_grh(&qp->remote_ah_attr),
529 qp->s_hdrwords, nwords);
530 lrh0 = QIB_LRH_GRH;
531 }
532 lrh0 |= ibp->sl_to_vl[rdma_ah_get_sl(&qp->remote_ah_attr)] << 12 |
533 rdma_ah_get_sl(&qp->remote_ah_attr) << 4;
534 priv->s_hdr->lrh[0] = cpu_to_be16(lrh0);
535 priv->s_hdr->lrh[1] =
536 cpu_to_be16(rdma_ah_get_dlid(&qp->remote_ah_attr));
537 priv->s_hdr->lrh[2] =
538 cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
539 priv->s_hdr->lrh[3] =
540 cpu_to_be16(ppd_from_ibp(ibp)->lid |
541 rdma_ah_get_path_bits(&qp->remote_ah_attr));
542 bth0 |= qib_get_pkey(ibp, qp->s_pkey_index);
543 bth0 |= extra_bytes << 20;
544 if (qp->s_mig_state == IB_MIG_MIGRATED)
545 bth0 |= IB_BTH_MIG_REQ;
546 ohdr->bth[0] = cpu_to_be32(bth0);
547 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
548 ohdr->bth[2] = cpu_to_be32(bth2);
549 this_cpu_inc(ibp->pmastats->n_unicast_xmit);
550 }
551
_qib_do_send(struct work_struct * work)552 void _qib_do_send(struct work_struct *work)
553 {
554 struct qib_qp_priv *priv = container_of(work, struct qib_qp_priv,
555 s_work);
556 struct rvt_qp *qp = priv->owner;
557
558 qib_do_send(qp);
559 }
560
561 /**
562 * qib_do_send - perform a send on a QP
563 * @qp: pointer to the QP
564 *
565 * Process entries in the send work queue until credit or queue is
566 * exhausted. Only allow one CPU to send a packet per QP (tasklet).
567 * Otherwise, two threads could send packets out of order.
568 */
qib_do_send(struct rvt_qp * qp)569 void qib_do_send(struct rvt_qp *qp)
570 {
571 struct qib_qp_priv *priv = qp->priv;
572 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
573 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
574 int (*make_req)(struct rvt_qp *qp, unsigned long *flags);
575 unsigned long flags;
576
577 if ((qp->ibqp.qp_type == IB_QPT_RC ||
578 qp->ibqp.qp_type == IB_QPT_UC) &&
579 (rdma_ah_get_dlid(&qp->remote_ah_attr) &
580 ~((1 << ppd->lmc) - 1)) == ppd->lid) {
581 qib_ruc_loopback(qp);
582 return;
583 }
584
585 if (qp->ibqp.qp_type == IB_QPT_RC)
586 make_req = qib_make_rc_req;
587 else if (qp->ibqp.qp_type == IB_QPT_UC)
588 make_req = qib_make_uc_req;
589 else
590 make_req = qib_make_ud_req;
591
592 spin_lock_irqsave(&qp->s_lock, flags);
593
594 /* Return if we are already busy processing a work request. */
595 if (!qib_send_ok(qp)) {
596 spin_unlock_irqrestore(&qp->s_lock, flags);
597 return;
598 }
599
600 qp->s_flags |= RVT_S_BUSY;
601
602 do {
603 /* Check for a constructed packet to be sent. */
604 if (qp->s_hdrwords != 0) {
605 spin_unlock_irqrestore(&qp->s_lock, flags);
606 /*
607 * If the packet cannot be sent now, return and
608 * the send tasklet will be woken up later.
609 */
610 if (qib_verbs_send(qp, priv->s_hdr, qp->s_hdrwords,
611 qp->s_cur_sge, qp->s_cur_size))
612 return;
613 /* Record that s_hdr is empty. */
614 qp->s_hdrwords = 0;
615 spin_lock_irqsave(&qp->s_lock, flags);
616 }
617 } while (make_req(qp, &flags));
618
619 spin_unlock_irqrestore(&qp->s_lock, flags);
620 }
621
622 /*
623 * This should be called with s_lock held.
624 */
qib_send_complete(struct rvt_qp * qp,struct rvt_swqe * wqe,enum ib_wc_status status)625 void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
626 enum ib_wc_status status)
627 {
628 u32 old_last, last;
629
630 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
631 return;
632
633 last = qp->s_last;
634 old_last = last;
635 if (++last >= qp->s_size)
636 last = 0;
637 qp->s_last = last;
638 /* See post_send() */
639 barrier();
640 rvt_put_swqe(wqe);
641 if (qp->ibqp.qp_type == IB_QPT_UD ||
642 qp->ibqp.qp_type == IB_QPT_SMI ||
643 qp->ibqp.qp_type == IB_QPT_GSI)
644 atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
645
646 rvt_qp_swqe_complete(qp,
647 wqe,
648 ib_qib_wc_opcode[wqe->wr.opcode],
649 status);
650
651 if (qp->s_acked == old_last)
652 qp->s_acked = last;
653 if (qp->s_cur == old_last)
654 qp->s_cur = last;
655 if (qp->s_tail == old_last)
656 qp->s_tail = last;
657 if (qp->state == IB_QPS_SQD && last == qp->s_cur)
658 qp->s_draining = 0;
659 }
660