1 /*
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2015 Intel Corporation.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * BSD LICENSE
20 *
21 * Copyright(c) 2015 Intel Corporation.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 *
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
32 * distribution.
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
36 *
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 */
50
51 #include <linux/net.h>
52 #include <rdma/ib_smi.h>
53
54 #include "hfi.h"
55 #include "mad.h"
56 #include "qp.h"
57
58 /**
59 * ud_loopback - handle send on loopback QPs
60 * @sqp: the sending QP
61 * @swqe: the send work request
62 *
63 * This is called from hfi1_make_ud_req() to forward a WQE addressed
64 * to the same HFI.
65 * Note that the receive interrupt handler may be calling hfi1_ud_rcv()
66 * while this is being called.
67 */
ud_loopback(struct hfi1_qp * sqp,struct hfi1_swqe * swqe)68 static void ud_loopback(struct hfi1_qp *sqp, struct hfi1_swqe *swqe)
69 {
70 struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
71 struct hfi1_pportdata *ppd;
72 struct hfi1_qp *qp;
73 struct ib_ah_attr *ah_attr;
74 unsigned long flags;
75 struct hfi1_sge_state ssge;
76 struct hfi1_sge *sge;
77 struct ib_wc wc;
78 u32 length;
79 enum ib_qp_type sqptype, dqptype;
80
81 rcu_read_lock();
82
83 qp = hfi1_lookup_qpn(ibp, swqe->ud_wr.remote_qpn);
84 if (!qp) {
85 ibp->n_pkt_drops++;
86 rcu_read_unlock();
87 return;
88 }
89
90 sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
91 IB_QPT_UD : sqp->ibqp.qp_type;
92 dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
93 IB_QPT_UD : qp->ibqp.qp_type;
94
95 if (dqptype != sqptype ||
96 !(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_RECV_OK)) {
97 ibp->n_pkt_drops++;
98 goto drop;
99 }
100
101 ah_attr = &to_iah(swqe->ud_wr.ah)->attr;
102 ppd = ppd_from_ibp(ibp);
103
104 if (qp->ibqp.qp_num > 1) {
105 u16 pkey;
106 u16 slid;
107 u8 sc5 = ibp->sl_to_sc[ah_attr->sl];
108
109 pkey = hfi1_get_pkey(ibp, sqp->s_pkey_index);
110 slid = ppd->lid | (ah_attr->src_path_bits &
111 ((1 << ppd->lmc) - 1));
112 if (unlikely(ingress_pkey_check(ppd, pkey, sc5,
113 qp->s_pkey_index, slid))) {
114 hfi1_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, pkey,
115 ah_attr->sl,
116 sqp->ibqp.qp_num, qp->ibqp.qp_num,
117 cpu_to_be16(slid),
118 cpu_to_be16(ah_attr->dlid));
119 goto drop;
120 }
121 }
122
123 /*
124 * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
125 * Qkeys with the high order bit set mean use the
126 * qkey from the QP context instead of the WR (see 10.2.5).
127 */
128 if (qp->ibqp.qp_num) {
129 u32 qkey;
130
131 qkey = (int)swqe->ud_wr.remote_qkey < 0 ?
132 sqp->qkey : swqe->ud_wr.remote_qkey;
133 if (unlikely(qkey != qp->qkey)) {
134 u16 lid;
135
136 lid = ppd->lid | (ah_attr->src_path_bits &
137 ((1 << ppd->lmc) - 1));
138 hfi1_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
139 ah_attr->sl,
140 sqp->ibqp.qp_num, qp->ibqp.qp_num,
141 cpu_to_be16(lid),
142 cpu_to_be16(ah_attr->dlid));
143 goto drop;
144 }
145 }
146
147 /*
148 * A GRH is expected to precede the data even if not
149 * present on the wire.
150 */
151 length = swqe->length;
152 memset(&wc, 0, sizeof(wc));
153 wc.byte_len = length + sizeof(struct ib_grh);
154
155 if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
156 wc.wc_flags = IB_WC_WITH_IMM;
157 wc.ex.imm_data = swqe->wr.ex.imm_data;
158 }
159
160 spin_lock_irqsave(&qp->r_lock, flags);
161
162 /*
163 * Get the next work request entry to find where to put the data.
164 */
165 if (qp->r_flags & HFI1_R_REUSE_SGE)
166 qp->r_flags &= ~HFI1_R_REUSE_SGE;
167 else {
168 int ret;
169
170 ret = hfi1_get_rwqe(qp, 0);
171 if (ret < 0) {
172 hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
173 goto bail_unlock;
174 }
175 if (!ret) {
176 if (qp->ibqp.qp_num == 0)
177 ibp->n_vl15_dropped++;
178 goto bail_unlock;
179 }
180 }
181 /* Silently drop packets which are too big. */
182 if (unlikely(wc.byte_len > qp->r_len)) {
183 qp->r_flags |= HFI1_R_REUSE_SGE;
184 ibp->n_pkt_drops++;
185 goto bail_unlock;
186 }
187
188 if (ah_attr->ah_flags & IB_AH_GRH) {
189 hfi1_copy_sge(&qp->r_sge, &ah_attr->grh,
190 sizeof(struct ib_grh), 1);
191 wc.wc_flags |= IB_WC_GRH;
192 } else
193 hfi1_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
194 ssge.sg_list = swqe->sg_list + 1;
195 ssge.sge = *swqe->sg_list;
196 ssge.num_sge = swqe->wr.num_sge;
197 sge = &ssge.sge;
198 while (length) {
199 u32 len = sge->length;
200
201 if (len > length)
202 len = length;
203 if (len > sge->sge_length)
204 len = sge->sge_length;
205 WARN_ON_ONCE(len == 0);
206 hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, 1);
207 sge->vaddr += len;
208 sge->length -= len;
209 sge->sge_length -= len;
210 if (sge->sge_length == 0) {
211 if (--ssge.num_sge)
212 *sge = *ssge.sg_list++;
213 } else if (sge->length == 0 && sge->mr->lkey) {
214 if (++sge->n >= HFI1_SEGSZ) {
215 if (++sge->m >= sge->mr->mapsz)
216 break;
217 sge->n = 0;
218 }
219 sge->vaddr =
220 sge->mr->map[sge->m]->segs[sge->n].vaddr;
221 sge->length =
222 sge->mr->map[sge->m]->segs[sge->n].length;
223 }
224 length -= len;
225 }
226 hfi1_put_ss(&qp->r_sge);
227 if (!test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags))
228 goto bail_unlock;
229 wc.wr_id = qp->r_wr_id;
230 wc.status = IB_WC_SUCCESS;
231 wc.opcode = IB_WC_RECV;
232 wc.qp = &qp->ibqp;
233 wc.src_qp = sqp->ibqp.qp_num;
234 if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI) {
235 if (sqp->ibqp.qp_type == IB_QPT_GSI ||
236 sqp->ibqp.qp_type == IB_QPT_SMI)
237 wc.pkey_index = swqe->ud_wr.pkey_index;
238 else
239 wc.pkey_index = sqp->s_pkey_index;
240 } else {
241 wc.pkey_index = 0;
242 }
243 wc.slid = ppd->lid | (ah_attr->src_path_bits & ((1 << ppd->lmc) - 1));
244 /* Check for loopback when the port lid is not set */
245 if (wc.slid == 0 && sqp->ibqp.qp_type == IB_QPT_GSI)
246 wc.slid = HFI1_PERMISSIVE_LID;
247 wc.sl = ah_attr->sl;
248 wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1);
249 wc.port_num = qp->port_num;
250 /* Signal completion event if the solicited bit is set. */
251 hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
252 swqe->wr.send_flags & IB_SEND_SOLICITED);
253 ibp->n_loop_pkts++;
254 bail_unlock:
255 spin_unlock_irqrestore(&qp->r_lock, flags);
256 drop:
257 rcu_read_unlock();
258 }
259
260 /**
261 * hfi1_make_ud_req - construct a UD request packet
262 * @qp: the QP
263 *
264 * Return 1 if constructed; otherwise, return 0.
265 */
hfi1_make_ud_req(struct hfi1_qp * qp)266 int hfi1_make_ud_req(struct hfi1_qp *qp)
267 {
268 struct hfi1_other_headers *ohdr;
269 struct ib_ah_attr *ah_attr;
270 struct hfi1_pportdata *ppd;
271 struct hfi1_ibport *ibp;
272 struct hfi1_swqe *wqe;
273 unsigned long flags;
274 u32 nwords;
275 u32 extra_bytes;
276 u32 bth0;
277 u16 lrh0;
278 u16 lid;
279 int ret = 0;
280 int next_cur;
281 u8 sc5;
282
283 spin_lock_irqsave(&qp->s_lock, flags);
284
285 if (!(ib_hfi1_state_ops[qp->state] & HFI1_PROCESS_NEXT_SEND_OK)) {
286 if (!(ib_hfi1_state_ops[qp->state] & HFI1_FLUSH_SEND))
287 goto bail;
288 /* We are in the error state, flush the work request. */
289 if (qp->s_last == qp->s_head)
290 goto bail;
291 /* If DMAs are in progress, we can't flush immediately. */
292 if (atomic_read(&qp->s_iowait.sdma_busy)) {
293 qp->s_flags |= HFI1_S_WAIT_DMA;
294 goto bail;
295 }
296 wqe = get_swqe_ptr(qp, qp->s_last);
297 hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
298 goto done;
299 }
300
301 if (qp->s_cur == qp->s_head)
302 goto bail;
303
304 wqe = get_swqe_ptr(qp, qp->s_cur);
305 next_cur = qp->s_cur + 1;
306 if (next_cur >= qp->s_size)
307 next_cur = 0;
308
309 /* Construct the header. */
310 ibp = to_iport(qp->ibqp.device, qp->port_num);
311 ppd = ppd_from_ibp(ibp);
312 ah_attr = &to_iah(wqe->ud_wr.ah)->attr;
313 if (ah_attr->dlid < HFI1_MULTICAST_LID_BASE ||
314 ah_attr->dlid == HFI1_PERMISSIVE_LID) {
315 lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1);
316 if (unlikely(!loopback && (lid == ppd->lid ||
317 (lid == HFI1_PERMISSIVE_LID &&
318 qp->ibqp.qp_type == IB_QPT_GSI)))) {
319 /*
320 * If DMAs are in progress, we can't generate
321 * a completion for the loopback packet since
322 * it would be out of order.
323 * Instead of waiting, we could queue a
324 * zero length descriptor so we get a callback.
325 */
326 if (atomic_read(&qp->s_iowait.sdma_busy)) {
327 qp->s_flags |= HFI1_S_WAIT_DMA;
328 goto bail;
329 }
330 qp->s_cur = next_cur;
331 spin_unlock_irqrestore(&qp->s_lock, flags);
332 ud_loopback(qp, wqe);
333 spin_lock_irqsave(&qp->s_lock, flags);
334 hfi1_send_complete(qp, wqe, IB_WC_SUCCESS);
335 goto done;
336 }
337 }
338
339 qp->s_cur = next_cur;
340 extra_bytes = -wqe->length & 3;
341 nwords = (wqe->length + extra_bytes) >> 2;
342
343 /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
344 qp->s_hdrwords = 7;
345 qp->s_cur_size = wqe->length;
346 qp->s_cur_sge = &qp->s_sge;
347 qp->s_srate = ah_attr->static_rate;
348 qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
349 qp->s_wqe = wqe;
350 qp->s_sge.sge = wqe->sg_list[0];
351 qp->s_sge.sg_list = wqe->sg_list + 1;
352 qp->s_sge.num_sge = wqe->wr.num_sge;
353 qp->s_sge.total_len = wqe->length;
354
355 if (ah_attr->ah_flags & IB_AH_GRH) {
356 /* Header size in 32-bit words. */
357 qp->s_hdrwords += hfi1_make_grh(ibp, &qp->s_hdr->ibh.u.l.grh,
358 &ah_attr->grh,
359 qp->s_hdrwords, nwords);
360 lrh0 = HFI1_LRH_GRH;
361 ohdr = &qp->s_hdr->ibh.u.l.oth;
362 /*
363 * Don't worry about sending to locally attached multicast
364 * QPs. It is unspecified by the spec. what happens.
365 */
366 } else {
367 /* Header size in 32-bit words. */
368 lrh0 = HFI1_LRH_BTH;
369 ohdr = &qp->s_hdr->ibh.u.oth;
370 }
371 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
372 qp->s_hdrwords++;
373 ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
374 bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
375 } else
376 bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
377 sc5 = ibp->sl_to_sc[ah_attr->sl];
378 lrh0 |= (ah_attr->sl & 0xf) << 4;
379 if (qp->ibqp.qp_type == IB_QPT_SMI) {
380 lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
381 qp->s_sc = 0xf;
382 } else {
383 lrh0 |= (sc5 & 0xf) << 12;
384 qp->s_sc = sc5;
385 }
386 qp->s_hdr->ibh.lrh[0] = cpu_to_be16(lrh0);
387 qp->s_hdr->ibh.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */
388 qp->s_hdr->ibh.lrh[2] =
389 cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
390 if (ah_attr->dlid == be16_to_cpu(IB_LID_PERMISSIVE))
391 qp->s_hdr->ibh.lrh[3] = IB_LID_PERMISSIVE;
392 else {
393 lid = ppd->lid;
394 if (lid) {
395 lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1);
396 qp->s_hdr->ibh.lrh[3] = cpu_to_be16(lid);
397 } else
398 qp->s_hdr->ibh.lrh[3] = IB_LID_PERMISSIVE;
399 }
400 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
401 bth0 |= IB_BTH_SOLICITED;
402 bth0 |= extra_bytes << 20;
403 if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI)
404 bth0 |= hfi1_get_pkey(ibp, wqe->ud_wr.pkey_index);
405 else
406 bth0 |= hfi1_get_pkey(ibp, qp->s_pkey_index);
407 ohdr->bth[0] = cpu_to_be32(bth0);
408 ohdr->bth[1] = cpu_to_be32(wqe->ud_wr.remote_qpn);
409 ohdr->bth[2] = cpu_to_be32(mask_psn(qp->s_next_psn++));
410 /*
411 * Qkeys with the high order bit set mean use the
412 * qkey from the QP context instead of the WR (see 10.2.5).
413 */
414 ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.remote_qkey < 0 ?
415 qp->qkey : wqe->ud_wr.remote_qkey);
416 ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
417 /* disarm any ahg */
418 qp->s_hdr->ahgcount = 0;
419 qp->s_hdr->ahgidx = 0;
420 qp->s_hdr->tx_flags = 0;
421 qp->s_hdr->sde = NULL;
422
423 done:
424 ret = 1;
425 goto unlock;
426
427 bail:
428 qp->s_flags &= ~HFI1_S_BUSY;
429 unlock:
430 spin_unlock_irqrestore(&qp->s_lock, flags);
431 return ret;
432 }
433
434 /*
435 * Hardware can't check this so we do it here.
436 *
437 * This is a slightly different algorithm than the standard pkey check. It
438 * special cases the management keys and allows for 0x7fff and 0xffff to be in
439 * the table at the same time.
440 *
441 * @returns the index found or -1 if not found
442 */
hfi1_lookup_pkey_idx(struct hfi1_ibport * ibp,u16 pkey)443 int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey)
444 {
445 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
446 unsigned i;
447
448 if (pkey == FULL_MGMT_P_KEY || pkey == LIM_MGMT_P_KEY) {
449 unsigned lim_idx = -1;
450
451 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); ++i) {
452 /* here we look for an exact match */
453 if (ppd->pkeys[i] == pkey)
454 return i;
455 if (ppd->pkeys[i] == LIM_MGMT_P_KEY)
456 lim_idx = i;
457 }
458
459 /* did not find 0xffff return 0x7fff idx if found */
460 if (pkey == FULL_MGMT_P_KEY)
461 return lim_idx;
462
463 /* no match... */
464 return -1;
465 }
466
467 pkey &= 0x7fff; /* remove limited/full membership bit */
468
469 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); ++i)
470 if ((ppd->pkeys[i] & 0x7fff) == pkey)
471 return i;
472
473 /*
474 * Should not get here, this means hardware failed to validate pkeys.
475 */
476 return -1;
477 }
478
return_cnp(struct hfi1_ibport * ibp,struct hfi1_qp * qp,u32 remote_qpn,u32 pkey,u32 slid,u32 dlid,u8 sc5,const struct ib_grh * old_grh)479 void return_cnp(struct hfi1_ibport *ibp, struct hfi1_qp *qp, u32 remote_qpn,
480 u32 pkey, u32 slid, u32 dlid, u8 sc5,
481 const struct ib_grh *old_grh)
482 {
483 u64 pbc, pbc_flags = 0;
484 u32 bth0, plen, vl, hwords = 5;
485 u16 lrh0;
486 u8 sl = ibp->sc_to_sl[sc5];
487 struct hfi1_ib_header hdr;
488 struct hfi1_other_headers *ohdr;
489 struct pio_buf *pbuf;
490 struct send_context *ctxt = qp_to_send_context(qp, sc5);
491 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
492
493 if (old_grh) {
494 struct ib_grh *grh = &hdr.u.l.grh;
495
496 grh->version_tclass_flow = old_grh->version_tclass_flow;
497 grh->paylen = cpu_to_be16((hwords - 2 + SIZE_OF_CRC) << 2);
498 grh->hop_limit = 0xff;
499 grh->sgid = old_grh->dgid;
500 grh->dgid = old_grh->sgid;
501 ohdr = &hdr.u.l.oth;
502 lrh0 = HFI1_LRH_GRH;
503 hwords += sizeof(struct ib_grh) / sizeof(u32);
504 } else {
505 ohdr = &hdr.u.oth;
506 lrh0 = HFI1_LRH_BTH;
507 }
508
509 lrh0 |= (sc5 & 0xf) << 12 | sl << 4;
510
511 bth0 = pkey | (IB_OPCODE_CNP << 24);
512 ohdr->bth[0] = cpu_to_be32(bth0);
513
514 ohdr->bth[1] = cpu_to_be32(remote_qpn | (1 << HFI1_BECN_SHIFT));
515 ohdr->bth[2] = 0; /* PSN 0 */
516
517 hdr.lrh[0] = cpu_to_be16(lrh0);
518 hdr.lrh[1] = cpu_to_be16(dlid);
519 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
520 hdr.lrh[3] = cpu_to_be16(slid);
521
522 plen = 2 /* PBC */ + hwords;
523 pbc_flags |= (!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT;
524 vl = sc_to_vlt(ppd->dd, sc5);
525 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
526 if (ctxt) {
527 pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL);
528 if (pbuf)
529 ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
530 &hdr, hwords);
531 }
532 }
533
534 /*
535 * opa_smp_check() - Do the regular pkey checking, and the additional
536 * checks for SMPs specified in OPAv1 rev 0.90, section 9.10.26
537 * ("SMA Packet Checks").
538 *
539 * Note that:
540 * - Checks are done using the pkey directly from the packet's BTH,
541 * and specifically _not_ the pkey that we attach to the completion,
542 * which may be different.
543 * - These checks are specifically for "non-local" SMPs (i.e., SMPs
544 * which originated on another node). SMPs which are sent from, and
545 * destined to this node are checked in opa_local_smp_check().
546 *
547 * At the point where opa_smp_check() is called, we know:
548 * - destination QP is QP0
549 *
550 * opa_smp_check() returns 0 if all checks succeed, 1 otherwise.
551 */
opa_smp_check(struct hfi1_ibport * ibp,u16 pkey,u8 sc5,struct hfi1_qp * qp,u16 slid,struct opa_smp * smp)552 static int opa_smp_check(struct hfi1_ibport *ibp, u16 pkey, u8 sc5,
553 struct hfi1_qp *qp, u16 slid, struct opa_smp *smp)
554 {
555 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
556
557 /*
558 * I don't think it's possible for us to get here with sc != 0xf,
559 * but check it to be certain.
560 */
561 if (sc5 != 0xf)
562 return 1;
563
564 if (rcv_pkey_check(ppd, pkey, sc5, slid))
565 return 1;
566
567 /*
568 * At this point we know (and so don't need to check again) that
569 * the pkey is either LIM_MGMT_P_KEY, or FULL_MGMT_P_KEY
570 * (see ingress_pkey_check).
571 */
572 if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE &&
573 smp->mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED) {
574 ingress_pkey_table_fail(ppd, pkey, slid);
575 return 1;
576 }
577
578 /*
579 * SMPs fall into one of four (disjoint) categories:
580 * SMA request, SMA response, trap, or trap repress.
581 * Our response depends, in part, on which type of
582 * SMP we're processing.
583 *
584 * If this is not an SMA request, or trap repress:
585 * - accept MAD if the port is running an SM
586 * - pkey == FULL_MGMT_P_KEY =>
587 * reply with unsupported method (i.e., just mark
588 * the smp's status field here, and let it be
589 * processed normally)
590 * - pkey != LIM_MGMT_P_KEY =>
591 * increment port recv constraint errors, drop MAD
592 * If this is an SMA request or trap repress:
593 * - pkey != FULL_MGMT_P_KEY =>
594 * increment port recv constraint errors, drop MAD
595 */
596 switch (smp->method) {
597 case IB_MGMT_METHOD_GET:
598 case IB_MGMT_METHOD_SET:
599 case IB_MGMT_METHOD_REPORT:
600 case IB_MGMT_METHOD_TRAP_REPRESS:
601 if (pkey != FULL_MGMT_P_KEY) {
602 ingress_pkey_table_fail(ppd, pkey, slid);
603 return 1;
604 }
605 break;
606 case IB_MGMT_METHOD_SEND:
607 case IB_MGMT_METHOD_TRAP:
608 case IB_MGMT_METHOD_GET_RESP:
609 case IB_MGMT_METHOD_REPORT_RESP:
610 if (ibp->port_cap_flags & IB_PORT_SM)
611 return 0;
612 if (pkey == FULL_MGMT_P_KEY) {
613 smp->status |= IB_SMP_UNSUP_METHOD;
614 return 0;
615 }
616 if (pkey != LIM_MGMT_P_KEY) {
617 ingress_pkey_table_fail(ppd, pkey, slid);
618 return 1;
619 }
620 break;
621 default:
622 break;
623 }
624 return 0;
625 }
626
627
628 /**
629 * hfi1_ud_rcv - receive an incoming UD packet
630 * @ibp: the port the packet came in on
631 * @hdr: the packet header
632 * @rcv_flags: flags relevant to rcv processing
633 * @data: the packet data
634 * @tlen: the packet length
635 * @qp: the QP the packet came on
636 *
637 * This is called from qp_rcv() to process an incoming UD packet
638 * for the given QP.
639 * Called at interrupt level.
640 */
hfi1_ud_rcv(struct hfi1_packet * packet)641 void hfi1_ud_rcv(struct hfi1_packet *packet)
642 {
643 struct hfi1_other_headers *ohdr = packet->ohdr;
644 int opcode;
645 u32 hdrsize = packet->hlen;
646 u32 pad;
647 struct ib_wc wc;
648 u32 qkey;
649 u32 src_qp;
650 u16 dlid, pkey;
651 int mgmt_pkey_idx = -1;
652 struct hfi1_ibport *ibp = &packet->rcd->ppd->ibport_data;
653 struct hfi1_ib_header *hdr = packet->hdr;
654 u32 rcv_flags = packet->rcv_flags;
655 void *data = packet->ebuf;
656 u32 tlen = packet->tlen;
657 struct hfi1_qp *qp = packet->qp;
658 bool has_grh = rcv_flags & HFI1_HAS_GRH;
659 bool sc4_bit = has_sc4_bit(packet);
660 u8 sc;
661 u32 bth1;
662 int is_mcast;
663 struct ib_grh *grh = NULL;
664
665 qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
666 src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & HFI1_QPN_MASK;
667 dlid = be16_to_cpu(hdr->lrh[1]);
668 is_mcast = (dlid > HFI1_MULTICAST_LID_BASE) &&
669 (dlid != HFI1_PERMISSIVE_LID);
670 bth1 = be32_to_cpu(ohdr->bth[1]);
671 if (unlikely(bth1 & HFI1_BECN_SMASK)) {
672 /*
673 * In pre-B0 h/w the CNP_OPCODE is handled via an
674 * error path (errata 291394).
675 */
676 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
677 u32 lqpn = be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK;
678 u8 sl, sc5;
679
680 sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
681 sc5 |= sc4_bit;
682 sl = ibp->sc_to_sl[sc5];
683
684 process_becn(ppd, sl, 0, lqpn, 0, IB_CC_SVCTYPE_UD);
685 }
686
687 /*
688 * The opcode is in the low byte when its in network order
689 * (top byte when in host order).
690 */
691 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
692 opcode &= 0xff;
693
694 pkey = (u16)be32_to_cpu(ohdr->bth[0]);
695
696 if (!is_mcast && (opcode != IB_OPCODE_CNP) && bth1 & HFI1_FECN_SMASK) {
697 u16 slid = be16_to_cpu(hdr->lrh[3]);
698 u8 sc5;
699
700 sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
701 sc5 |= sc4_bit;
702
703 return_cnp(ibp, qp, src_qp, pkey, dlid, slid, sc5, grh);
704 }
705 /*
706 * Get the number of bytes the message was padded by
707 * and drop incomplete packets.
708 */
709 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
710 if (unlikely(tlen < (hdrsize + pad + 4)))
711 goto drop;
712
713 tlen -= hdrsize + pad + 4;
714
715 /*
716 * Check that the permissive LID is only used on QP0
717 * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
718 */
719 if (qp->ibqp.qp_num) {
720 if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
721 hdr->lrh[3] == IB_LID_PERMISSIVE))
722 goto drop;
723 if (qp->ibqp.qp_num > 1) {
724 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
725 u16 slid;
726 u8 sc5;
727
728 sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
729 sc5 |= sc4_bit;
730
731 slid = be16_to_cpu(hdr->lrh[3]);
732 if (unlikely(rcv_pkey_check(ppd, pkey, sc5, slid))) {
733 /*
734 * Traps will not be sent for packets dropped
735 * by the HW. This is fine, as sending trap
736 * for invalid pkeys is optional according to
737 * IB spec (release 1.3, section 10.9.4)
738 */
739 hfi1_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
740 pkey,
741 (be16_to_cpu(hdr->lrh[0]) >> 4) &
742 0xF,
743 src_qp, qp->ibqp.qp_num,
744 hdr->lrh[3], hdr->lrh[1]);
745 return;
746 }
747 } else {
748 /* GSI packet */
749 mgmt_pkey_idx = hfi1_lookup_pkey_idx(ibp, pkey);
750 if (mgmt_pkey_idx < 0)
751 goto drop;
752
753 }
754 if (unlikely(qkey != qp->qkey)) {
755 hfi1_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
756 (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
757 src_qp, qp->ibqp.qp_num,
758 hdr->lrh[3], hdr->lrh[1]);
759 return;
760 }
761 /* Drop invalid MAD packets (see 13.5.3.1). */
762 if (unlikely(qp->ibqp.qp_num == 1 &&
763 (tlen > 2048 ||
764 (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))
765 goto drop;
766 } else {
767 /* Received on QP0, and so by definition, this is an SMP */
768 struct opa_smp *smp = (struct opa_smp *)data;
769 u16 slid = be16_to_cpu(hdr->lrh[3]);
770 u8 sc5;
771
772 sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
773 sc5 |= sc4_bit;
774
775 if (opa_smp_check(ibp, pkey, sc5, qp, slid, smp))
776 goto drop;
777
778 if (tlen > 2048)
779 goto drop;
780 if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
781 hdr->lrh[3] == IB_LID_PERMISSIVE) &&
782 smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
783 goto drop;
784
785 /* look up SMI pkey */
786 mgmt_pkey_idx = hfi1_lookup_pkey_idx(ibp, pkey);
787 if (mgmt_pkey_idx < 0)
788 goto drop;
789
790 }
791
792 if (qp->ibqp.qp_num > 1 &&
793 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
794 wc.ex.imm_data = ohdr->u.ud.imm_data;
795 wc.wc_flags = IB_WC_WITH_IMM;
796 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
797 wc.ex.imm_data = 0;
798 wc.wc_flags = 0;
799 } else
800 goto drop;
801
802 /*
803 * A GRH is expected to precede the data even if not
804 * present on the wire.
805 */
806 wc.byte_len = tlen + sizeof(struct ib_grh);
807
808 /*
809 * Get the next work request entry to find where to put the data.
810 */
811 if (qp->r_flags & HFI1_R_REUSE_SGE)
812 qp->r_flags &= ~HFI1_R_REUSE_SGE;
813 else {
814 int ret;
815
816 ret = hfi1_get_rwqe(qp, 0);
817 if (ret < 0) {
818 hfi1_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
819 return;
820 }
821 if (!ret) {
822 if (qp->ibqp.qp_num == 0)
823 ibp->n_vl15_dropped++;
824 return;
825 }
826 }
827 /* Silently drop packets which are too big. */
828 if (unlikely(wc.byte_len > qp->r_len)) {
829 qp->r_flags |= HFI1_R_REUSE_SGE;
830 goto drop;
831 }
832 if (has_grh) {
833 hfi1_copy_sge(&qp->r_sge, &hdr->u.l.grh,
834 sizeof(struct ib_grh), 1);
835 wc.wc_flags |= IB_WC_GRH;
836 } else
837 hfi1_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
838 hfi1_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
839 hfi1_put_ss(&qp->r_sge);
840 if (!test_and_clear_bit(HFI1_R_WRID_VALID, &qp->r_aflags))
841 return;
842 wc.wr_id = qp->r_wr_id;
843 wc.status = IB_WC_SUCCESS;
844 wc.opcode = IB_WC_RECV;
845 wc.vendor_err = 0;
846 wc.qp = &qp->ibqp;
847 wc.src_qp = src_qp;
848
849 if (qp->ibqp.qp_type == IB_QPT_GSI ||
850 qp->ibqp.qp_type == IB_QPT_SMI) {
851 if (mgmt_pkey_idx < 0) {
852 if (net_ratelimit()) {
853 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
854 struct hfi1_devdata *dd = ppd->dd;
855
856 dd_dev_err(dd, "QP type %d mgmt_pkey_idx < 0 and packet not dropped???\n",
857 qp->ibqp.qp_type);
858 mgmt_pkey_idx = 0;
859 }
860 }
861 wc.pkey_index = (unsigned)mgmt_pkey_idx;
862 } else
863 wc.pkey_index = 0;
864
865 wc.slid = be16_to_cpu(hdr->lrh[3]);
866 sc = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
867 sc |= sc4_bit;
868 wc.sl = ibp->sc_to_sl[sc];
869
870 /*
871 * Save the LMC lower bits if the destination LID is a unicast LID.
872 */
873 wc.dlid_path_bits = dlid >= HFI1_MULTICAST_LID_BASE ? 0 :
874 dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
875 wc.port_num = qp->port_num;
876 /* Signal completion event if the solicited bit is set. */
877 hfi1_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
878 (ohdr->bth[0] &
879 cpu_to_be32(IB_BTH_SOLICITED)) != 0);
880 return;
881
882 drop:
883 ibp->n_pkt_drops++;
884 }
885