1 /*
2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include <linux/skbuff.h>
35
36 #include "rxe.h"
37 #include "rxe_loc.h"
38
39 /* check that QP matches packet opcode type and is in a valid state */
check_type_state(struct rxe_dev * rxe,struct rxe_pkt_info * pkt,struct rxe_qp * qp)40 static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
41 struct rxe_qp *qp)
42 {
43 unsigned int pkt_type;
44
45 if (unlikely(!qp->valid))
46 goto err1;
47
48 pkt_type = pkt->opcode & 0xe0;
49
50 switch (qp_type(qp)) {
51 case IB_QPT_RC:
52 if (unlikely(pkt_type != IB_OPCODE_RC)) {
53 pr_warn_ratelimited("bad qp type\n");
54 goto err1;
55 }
56 break;
57 case IB_QPT_UC:
58 if (unlikely(pkt_type != IB_OPCODE_UC)) {
59 pr_warn_ratelimited("bad qp type\n");
60 goto err1;
61 }
62 break;
63 case IB_QPT_UD:
64 case IB_QPT_SMI:
65 case IB_QPT_GSI:
66 if (unlikely(pkt_type != IB_OPCODE_UD)) {
67 pr_warn_ratelimited("bad qp type\n");
68 goto err1;
69 }
70 break;
71 default:
72 pr_warn_ratelimited("unsupported qp type\n");
73 goto err1;
74 }
75
76 if (pkt->mask & RXE_REQ_MASK) {
77 if (unlikely(qp->resp.state != QP_STATE_READY))
78 goto err1;
79 } else if (unlikely(qp->req.state < QP_STATE_READY ||
80 qp->req.state > QP_STATE_DRAINED)) {
81 goto err1;
82 }
83
84 return 0;
85
86 err1:
87 return -EINVAL;
88 }
89
set_bad_pkey_cntr(struct rxe_port * port)90 static void set_bad_pkey_cntr(struct rxe_port *port)
91 {
92 spin_lock_bh(&port->port_lock);
93 port->attr.bad_pkey_cntr = min((u32)0xffff,
94 port->attr.bad_pkey_cntr + 1);
95 spin_unlock_bh(&port->port_lock);
96 }
97
set_qkey_viol_cntr(struct rxe_port * port)98 static void set_qkey_viol_cntr(struct rxe_port *port)
99 {
100 spin_lock_bh(&port->port_lock);
101 port->attr.qkey_viol_cntr = min((u32)0xffff,
102 port->attr.qkey_viol_cntr + 1);
103 spin_unlock_bh(&port->port_lock);
104 }
105
check_keys(struct rxe_dev * rxe,struct rxe_pkt_info * pkt,u32 qpn,struct rxe_qp * qp)106 static int check_keys(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
107 u32 qpn, struct rxe_qp *qp)
108 {
109 int i;
110 int found_pkey = 0;
111 struct rxe_port *port = &rxe->port;
112 u16 pkey = bth_pkey(pkt);
113
114 pkt->pkey_index = 0;
115
116 if (qpn == 1) {
117 for (i = 0; i < port->attr.pkey_tbl_len; i++) {
118 if (pkey_match(pkey, port->pkey_tbl[i])) {
119 pkt->pkey_index = i;
120 found_pkey = 1;
121 break;
122 }
123 }
124
125 if (!found_pkey) {
126 pr_warn_ratelimited("bad pkey = 0x%x\n", pkey);
127 set_bad_pkey_cntr(port);
128 goto err1;
129 }
130 } else {
131 if (unlikely(!pkey_match(pkey,
132 port->pkey_tbl[qp->attr.pkey_index]
133 ))) {
134 pr_warn_ratelimited("bad pkey = 0x%0x\n", pkey);
135 set_bad_pkey_cntr(port);
136 goto err1;
137 }
138 pkt->pkey_index = qp->attr.pkey_index;
139 }
140
141 if ((qp_type(qp) == IB_QPT_UD || qp_type(qp) == IB_QPT_GSI) &&
142 pkt->mask) {
143 u32 qkey = (qpn == 1) ? GSI_QKEY : qp->attr.qkey;
144
145 if (unlikely(deth_qkey(pkt) != qkey)) {
146 pr_warn_ratelimited("bad qkey, got 0x%x expected 0x%x for qpn 0x%x\n",
147 deth_qkey(pkt), qkey, qpn);
148 set_qkey_viol_cntr(port);
149 goto err1;
150 }
151 }
152
153 return 0;
154
155 err1:
156 return -EINVAL;
157 }
158
check_addr(struct rxe_dev * rxe,struct rxe_pkt_info * pkt,struct rxe_qp * qp)159 static int check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
160 struct rxe_qp *qp)
161 {
162 struct sk_buff *skb = PKT_TO_SKB(pkt);
163
164 if (qp_type(qp) != IB_QPT_RC && qp_type(qp) != IB_QPT_UC)
165 goto done;
166
167 if (unlikely(pkt->port_num != qp->attr.port_num)) {
168 pr_warn_ratelimited("port %d != qp port %d\n",
169 pkt->port_num, qp->attr.port_num);
170 goto err1;
171 }
172
173 if (skb->protocol == htons(ETH_P_IP)) {
174 struct in_addr *saddr =
175 &qp->pri_av.sgid_addr._sockaddr_in.sin_addr;
176 struct in_addr *daddr =
177 &qp->pri_av.dgid_addr._sockaddr_in.sin_addr;
178
179 if (ip_hdr(skb)->daddr != saddr->s_addr) {
180 pr_warn_ratelimited("dst addr %pI4 != qp source addr %pI4\n",
181 &ip_hdr(skb)->daddr,
182 &saddr->s_addr);
183 goto err1;
184 }
185
186 if (ip_hdr(skb)->saddr != daddr->s_addr) {
187 pr_warn_ratelimited("source addr %pI4 != qp dst addr %pI4\n",
188 &ip_hdr(skb)->saddr,
189 &daddr->s_addr);
190 goto err1;
191 }
192
193 } else if (skb->protocol == htons(ETH_P_IPV6)) {
194 struct in6_addr *saddr =
195 &qp->pri_av.sgid_addr._sockaddr_in6.sin6_addr;
196 struct in6_addr *daddr =
197 &qp->pri_av.dgid_addr._sockaddr_in6.sin6_addr;
198
199 if (memcmp(&ipv6_hdr(skb)->daddr, saddr, sizeof(*saddr))) {
200 pr_warn_ratelimited("dst addr %pI6 != qp source addr %pI6\n",
201 &ipv6_hdr(skb)->daddr, saddr);
202 goto err1;
203 }
204
205 if (memcmp(&ipv6_hdr(skb)->saddr, daddr, sizeof(*daddr))) {
206 pr_warn_ratelimited("source addr %pI6 != qp dst addr %pI6\n",
207 &ipv6_hdr(skb)->saddr, daddr);
208 goto err1;
209 }
210 }
211
212 done:
213 return 0;
214
215 err1:
216 return -EINVAL;
217 }
218
hdr_check(struct rxe_pkt_info * pkt)219 static int hdr_check(struct rxe_pkt_info *pkt)
220 {
221 struct rxe_dev *rxe = pkt->rxe;
222 struct rxe_port *port = &rxe->port;
223 struct rxe_qp *qp = NULL;
224 u32 qpn = bth_qpn(pkt);
225 int index;
226 int err;
227
228 if (unlikely(bth_tver(pkt) != BTH_TVER)) {
229 pr_warn_ratelimited("bad tver\n");
230 goto err1;
231 }
232
233 if (unlikely(qpn == 0)) {
234 pr_warn_once("QP 0 not supported");
235 goto err1;
236 }
237
238 if (qpn != IB_MULTICAST_QPN) {
239 index = (qpn == 1) ? port->qp_gsi_index : qpn;
240
241 qp = rxe_pool_get_index(&rxe->qp_pool, index);
242 if (unlikely(!qp)) {
243 pr_warn_ratelimited("no qp matches qpn 0x%x\n", qpn);
244 goto err1;
245 }
246
247 err = check_type_state(rxe, pkt, qp);
248 if (unlikely(err))
249 goto err2;
250
251 err = check_addr(rxe, pkt, qp);
252 if (unlikely(err))
253 goto err2;
254
255 err = check_keys(rxe, pkt, qpn, qp);
256 if (unlikely(err))
257 goto err2;
258 } else {
259 if (unlikely((pkt->mask & RXE_GRH_MASK) == 0)) {
260 pr_warn_ratelimited("no grh for mcast qpn\n");
261 goto err1;
262 }
263 }
264
265 pkt->qp = qp;
266 return 0;
267
268 err2:
269 rxe_drop_ref(qp);
270 err1:
271 return -EINVAL;
272 }
273
rxe_rcv_pkt(struct rxe_pkt_info * pkt,struct sk_buff * skb)274 static inline void rxe_rcv_pkt(struct rxe_pkt_info *pkt, struct sk_buff *skb)
275 {
276 if (pkt->mask & RXE_REQ_MASK)
277 rxe_resp_queue_pkt(pkt->qp, skb);
278 else
279 rxe_comp_queue_pkt(pkt->qp, skb);
280 }
281
rxe_rcv_mcast_pkt(struct rxe_dev * rxe,struct sk_buff * skb)282 static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
283 {
284 struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
285 struct rxe_mc_grp *mcg;
286 struct rxe_mc_elem *mce;
287 struct rxe_qp *qp;
288 union ib_gid dgid;
289 struct sk_buff *per_qp_skb;
290 struct rxe_pkt_info *per_qp_pkt;
291 int err;
292
293 if (skb->protocol == htons(ETH_P_IP))
294 ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr,
295 (struct in6_addr *)&dgid);
296 else if (skb->protocol == htons(ETH_P_IPV6))
297 memcpy(&dgid, &ipv6_hdr(skb)->daddr, sizeof(dgid));
298
299 /* lookup mcast group corresponding to mgid, takes a ref */
300 mcg = rxe_pool_get_key(&rxe->mc_grp_pool, &dgid);
301 if (!mcg)
302 goto err1; /* mcast group not registered */
303
304 spin_lock_bh(&mcg->mcg_lock);
305
306 list_for_each_entry(mce, &mcg->qp_list, qp_list) {
307 qp = mce->qp;
308
309 /* validate qp for incoming packet */
310 err = check_type_state(rxe, pkt, qp);
311 if (err)
312 continue;
313
314 err = check_keys(rxe, pkt, bth_qpn(pkt), qp);
315 if (err)
316 continue;
317
318 /* for all but the last qp create a new clone of the
319 * skb and pass to the qp. If an error occurs in the
320 * checks for the last qp in the list we need to
321 * free the skb since it hasn't been passed on to
322 * rxe_rcv_pkt() which would free it later.
323 */
324 if (mce->qp_list.next != &mcg->qp_list) {
325 per_qp_skb = skb_clone(skb, GFP_ATOMIC);
326 } else {
327 per_qp_skb = skb;
328 /* show we have consumed the skb */
329 skb = NULL;
330 }
331
332 if (unlikely(!per_qp_skb))
333 continue;
334
335 per_qp_pkt = SKB_TO_PKT(per_qp_skb);
336 per_qp_pkt->qp = qp;
337 rxe_add_ref(qp);
338 rxe_rcv_pkt(per_qp_pkt, per_qp_skb);
339 }
340
341 spin_unlock_bh(&mcg->mcg_lock);
342
343 rxe_drop_ref(mcg); /* drop ref from rxe_pool_get_key. */
344
345 err1:
346 /* free skb if not consumed */
347 kfree_skb(skb);
348 }
349
rxe_match_dgid(struct rxe_dev * rxe,struct sk_buff * skb)350 static int rxe_match_dgid(struct rxe_dev *rxe, struct sk_buff *skb)
351 {
352 struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
353 const struct ib_gid_attr *gid_attr;
354 union ib_gid dgid;
355 union ib_gid *pdgid;
356
357 if (pkt->mask & RXE_LOOPBACK_MASK)
358 return 0;
359
360 if (skb->protocol == htons(ETH_P_IP)) {
361 ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr,
362 (struct in6_addr *)&dgid);
363 pdgid = &dgid;
364 } else {
365 pdgid = (union ib_gid *)&ipv6_hdr(skb)->daddr;
366 }
367
368 gid_attr = rdma_find_gid_by_port(&rxe->ib_dev, pdgid,
369 IB_GID_TYPE_ROCE_UDP_ENCAP,
370 1, skb->dev);
371 if (IS_ERR(gid_attr))
372 return PTR_ERR(gid_attr);
373
374 rdma_put_gid_attr(gid_attr);
375 return 0;
376 }
377
378 /* rxe_rcv is called from the interface driver */
rxe_rcv(struct sk_buff * skb)379 void rxe_rcv(struct sk_buff *skb)
380 {
381 int err;
382 struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
383 struct rxe_dev *rxe = pkt->rxe;
384 __be32 *icrcp;
385 u32 calc_icrc, pack_icrc;
386
387 pkt->offset = 0;
388
389 if (unlikely(skb->len < pkt->offset + RXE_BTH_BYTES))
390 goto drop;
391
392 if (rxe_match_dgid(rxe, skb) < 0) {
393 pr_warn_ratelimited("failed matching dgid\n");
394 goto drop;
395 }
396
397 pkt->opcode = bth_opcode(pkt);
398 pkt->psn = bth_psn(pkt);
399 pkt->qp = NULL;
400 pkt->mask |= rxe_opcode[pkt->opcode].mask;
401
402 if (unlikely(skb->len < header_size(pkt)))
403 goto drop;
404
405 err = hdr_check(pkt);
406 if (unlikely(err))
407 goto drop;
408
409 /* Verify ICRC */
410 icrcp = (__be32 *)(pkt->hdr + pkt->paylen - RXE_ICRC_SIZE);
411 pack_icrc = be32_to_cpu(*icrcp);
412
413 calc_icrc = rxe_icrc_hdr(pkt, skb);
414 calc_icrc = rxe_crc32(rxe, calc_icrc, (u8 *)payload_addr(pkt),
415 payload_size(pkt) + bth_pad(pkt));
416 calc_icrc = (__force u32)cpu_to_be32(~calc_icrc);
417 if (unlikely(calc_icrc != pack_icrc)) {
418 if (skb->protocol == htons(ETH_P_IPV6))
419 pr_warn_ratelimited("bad ICRC from %pI6c\n",
420 &ipv6_hdr(skb)->saddr);
421 else if (skb->protocol == htons(ETH_P_IP))
422 pr_warn_ratelimited("bad ICRC from %pI4\n",
423 &ip_hdr(skb)->saddr);
424 else
425 pr_warn_ratelimited("bad ICRC from unknown\n");
426
427 goto drop;
428 }
429
430 rxe_counter_inc(rxe, RXE_CNT_RCVD_PKTS);
431
432 if (unlikely(bth_qpn(pkt) == IB_MULTICAST_QPN))
433 rxe_rcv_mcast_pkt(rxe, skb);
434 else
435 rxe_rcv_pkt(pkt, skb);
436
437 return;
438
439 drop:
440 if (pkt->qp)
441 rxe_drop_ref(pkt->qp);
442
443 kfree_skb(skb);
444 }
445