1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
2
3 /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
4 /* Fredy Neeser */
5 /* Greg Joyce <greg@opengridcomputing.com> */
6 /* Copyright (c) 2008-2019, IBM Corporation */
7 /* Copyright (c) 2017, Open Grid Computing, Inc. */
8
9 #include <linux/errno.h>
10 #include <linux/types.h>
11 #include <linux/net.h>
12 #include <linux/inetdevice.h>
13 #include <net/addrconf.h>
14 #include <linux/workqueue.h>
15 #include <net/sock.h>
16 #include <net/tcp.h>
17 #include <linux/inet.h>
18 #include <linux/tcp.h>
19
20 #include <rdma/iw_cm.h>
21 #include <rdma/ib_verbs.h>
22 #include <rdma/ib_user_verbs.h>
23
24 #include "siw.h"
25 #include "siw_cm.h"
26
27 /*
28 * Set to any combination of
29 * MPA_V2_RDMA_NO_RTR, MPA_V2_RDMA_READ_RTR, MPA_V2_RDMA_WRITE_RTR
30 */
31 static __be16 rtr_type = MPA_V2_RDMA_READ_RTR | MPA_V2_RDMA_WRITE_RTR;
32 static const bool relaxed_ird_negotiation = true;
33
34 static void siw_cm_llp_state_change(struct sock *s);
35 static void siw_cm_llp_data_ready(struct sock *s);
36 static void siw_cm_llp_write_space(struct sock *s);
37 static void siw_cm_llp_error_report(struct sock *s);
38 static int siw_cm_upcall(struct siw_cep *cep, enum iw_cm_event_type reason,
39 int status);
40
siw_sk_assign_cm_upcalls(struct sock * sk)41 static void siw_sk_assign_cm_upcalls(struct sock *sk)
42 {
43 write_lock_bh(&sk->sk_callback_lock);
44 sk->sk_state_change = siw_cm_llp_state_change;
45 sk->sk_data_ready = siw_cm_llp_data_ready;
46 sk->sk_write_space = siw_cm_llp_write_space;
47 sk->sk_error_report = siw_cm_llp_error_report;
48 write_unlock_bh(&sk->sk_callback_lock);
49 }
50
siw_sk_save_upcalls(struct sock * sk)51 static void siw_sk_save_upcalls(struct sock *sk)
52 {
53 struct siw_cep *cep = sk_to_cep(sk);
54
55 write_lock_bh(&sk->sk_callback_lock);
56 cep->sk_state_change = sk->sk_state_change;
57 cep->sk_data_ready = sk->sk_data_ready;
58 cep->sk_write_space = sk->sk_write_space;
59 cep->sk_error_report = sk->sk_error_report;
60 write_unlock_bh(&sk->sk_callback_lock);
61 }
62
siw_sk_restore_upcalls(struct sock * sk,struct siw_cep * cep)63 static void siw_sk_restore_upcalls(struct sock *sk, struct siw_cep *cep)
64 {
65 sk->sk_state_change = cep->sk_state_change;
66 sk->sk_data_ready = cep->sk_data_ready;
67 sk->sk_write_space = cep->sk_write_space;
68 sk->sk_error_report = cep->sk_error_report;
69 sk->sk_user_data = NULL;
70 }
71
siw_qp_socket_assoc(struct siw_cep * cep,struct siw_qp * qp)72 static void siw_qp_socket_assoc(struct siw_cep *cep, struct siw_qp *qp)
73 {
74 struct socket *s = cep->sock;
75 struct sock *sk = s->sk;
76
77 write_lock_bh(&sk->sk_callback_lock);
78
79 qp->attrs.sk = s;
80 sk->sk_data_ready = siw_qp_llp_data_ready;
81 sk->sk_write_space = siw_qp_llp_write_space;
82
83 write_unlock_bh(&sk->sk_callback_lock);
84 }
85
siw_socket_disassoc(struct socket * s)86 static void siw_socket_disassoc(struct socket *s)
87 {
88 struct sock *sk = s->sk;
89 struct siw_cep *cep;
90
91 if (sk) {
92 write_lock_bh(&sk->sk_callback_lock);
93 cep = sk_to_cep(sk);
94 if (cep) {
95 siw_sk_restore_upcalls(sk, cep);
96 siw_cep_put(cep);
97 } else {
98 pr_warn("siw: cannot restore sk callbacks: no ep\n");
99 }
100 write_unlock_bh(&sk->sk_callback_lock);
101 } else {
102 pr_warn("siw: cannot restore sk callbacks: no sk\n");
103 }
104 }
105
siw_rtr_data_ready(struct sock * sk)106 static void siw_rtr_data_ready(struct sock *sk)
107 {
108 struct siw_cep *cep;
109 struct siw_qp *qp = NULL;
110 read_descriptor_t rd_desc;
111
112 read_lock(&sk->sk_callback_lock);
113
114 cep = sk_to_cep(sk);
115 if (!cep) {
116 WARN(1, "No connection endpoint\n");
117 goto out;
118 }
119 qp = sk_to_qp(sk);
120
121 memset(&rd_desc, 0, sizeof(rd_desc));
122 rd_desc.arg.data = qp;
123 rd_desc.count = 1;
124
125 tcp_read_sock(sk, &rd_desc, siw_tcp_rx_data);
126 /*
127 * Check if first frame was successfully processed.
128 * Signal connection full establishment if yes.
129 * Failed data processing would have already scheduled
130 * connection drop.
131 */
132 if (!qp->rx_stream.rx_suspend)
133 siw_cm_upcall(cep, IW_CM_EVENT_ESTABLISHED, 0);
134 out:
135 read_unlock(&sk->sk_callback_lock);
136 if (qp)
137 siw_qp_socket_assoc(cep, qp);
138 }
139
siw_sk_assign_rtr_upcalls(struct siw_cep * cep)140 static void siw_sk_assign_rtr_upcalls(struct siw_cep *cep)
141 {
142 struct sock *sk = cep->sock->sk;
143
144 write_lock_bh(&sk->sk_callback_lock);
145 sk->sk_data_ready = siw_rtr_data_ready;
146 sk->sk_write_space = siw_qp_llp_write_space;
147 write_unlock_bh(&sk->sk_callback_lock);
148 }
149
siw_cep_socket_assoc(struct siw_cep * cep,struct socket * s)150 static void siw_cep_socket_assoc(struct siw_cep *cep, struct socket *s)
151 {
152 cep->sock = s;
153 siw_cep_get(cep);
154 s->sk->sk_user_data = cep;
155
156 siw_sk_save_upcalls(s->sk);
157 siw_sk_assign_cm_upcalls(s->sk);
158 }
159
siw_cep_alloc(struct siw_device * sdev)160 static struct siw_cep *siw_cep_alloc(struct siw_device *sdev)
161 {
162 struct siw_cep *cep = kzalloc(sizeof(*cep), GFP_KERNEL);
163 unsigned long flags;
164
165 if (!cep)
166 return NULL;
167
168 INIT_LIST_HEAD(&cep->listenq);
169 INIT_LIST_HEAD(&cep->devq);
170 INIT_LIST_HEAD(&cep->work_freelist);
171
172 kref_init(&cep->ref);
173 cep->state = SIW_EPSTATE_IDLE;
174 init_waitqueue_head(&cep->waitq);
175 spin_lock_init(&cep->lock);
176 cep->sdev = sdev;
177 cep->enhanced_rdma_conn_est = false;
178
179 spin_lock_irqsave(&sdev->lock, flags);
180 list_add_tail(&cep->devq, &sdev->cep_list);
181 spin_unlock_irqrestore(&sdev->lock, flags);
182
183 siw_dbg_cep(cep, "new endpoint\n");
184 return cep;
185 }
186
siw_cm_free_work(struct siw_cep * cep)187 static void siw_cm_free_work(struct siw_cep *cep)
188 {
189 struct list_head *w, *tmp;
190 struct siw_cm_work *work;
191
192 list_for_each_safe(w, tmp, &cep->work_freelist) {
193 work = list_entry(w, struct siw_cm_work, list);
194 list_del(&work->list);
195 kfree(work);
196 }
197 }
198
siw_cancel_mpatimer(struct siw_cep * cep)199 static void siw_cancel_mpatimer(struct siw_cep *cep)
200 {
201 spin_lock_bh(&cep->lock);
202 if (cep->mpa_timer) {
203 if (cancel_delayed_work(&cep->mpa_timer->work)) {
204 siw_cep_put(cep);
205 kfree(cep->mpa_timer); /* not needed again */
206 }
207 cep->mpa_timer = NULL;
208 }
209 spin_unlock_bh(&cep->lock);
210 }
211
siw_put_work(struct siw_cm_work * work)212 static void siw_put_work(struct siw_cm_work *work)
213 {
214 INIT_LIST_HEAD(&work->list);
215 spin_lock_bh(&work->cep->lock);
216 list_add(&work->list, &work->cep->work_freelist);
217 spin_unlock_bh(&work->cep->lock);
218 }
219
siw_cep_set_inuse(struct siw_cep * cep)220 static void siw_cep_set_inuse(struct siw_cep *cep)
221 {
222 unsigned long flags;
223 retry:
224 spin_lock_irqsave(&cep->lock, flags);
225
226 if (cep->in_use) {
227 spin_unlock_irqrestore(&cep->lock, flags);
228 wait_event_interruptible(cep->waitq, !cep->in_use);
229 if (signal_pending(current))
230 flush_signals(current);
231 goto retry;
232 } else {
233 cep->in_use = 1;
234 spin_unlock_irqrestore(&cep->lock, flags);
235 }
236 }
237
siw_cep_set_free(struct siw_cep * cep)238 static void siw_cep_set_free(struct siw_cep *cep)
239 {
240 unsigned long flags;
241
242 spin_lock_irqsave(&cep->lock, flags);
243 cep->in_use = 0;
244 spin_unlock_irqrestore(&cep->lock, flags);
245
246 wake_up(&cep->waitq);
247 }
248
__siw_cep_dealloc(struct kref * ref)249 static void __siw_cep_dealloc(struct kref *ref)
250 {
251 struct siw_cep *cep = container_of(ref, struct siw_cep, ref);
252 struct siw_device *sdev = cep->sdev;
253 unsigned long flags;
254
255 WARN_ON(cep->listen_cep);
256
257 /* kfree(NULL) is safe */
258 kfree(cep->mpa.pdata);
259 spin_lock_bh(&cep->lock);
260 if (!list_empty(&cep->work_freelist))
261 siw_cm_free_work(cep);
262 spin_unlock_bh(&cep->lock);
263
264 spin_lock_irqsave(&sdev->lock, flags);
265 list_del(&cep->devq);
266 spin_unlock_irqrestore(&sdev->lock, flags);
267
268 siw_dbg_cep(cep, "free endpoint\n");
269 kfree(cep);
270 }
271
siw_get_work(struct siw_cep * cep)272 static struct siw_cm_work *siw_get_work(struct siw_cep *cep)
273 {
274 struct siw_cm_work *work = NULL;
275
276 spin_lock_bh(&cep->lock);
277 if (!list_empty(&cep->work_freelist)) {
278 work = list_entry(cep->work_freelist.next, struct siw_cm_work,
279 list);
280 list_del_init(&work->list);
281 }
282 spin_unlock_bh(&cep->lock);
283 return work;
284 }
285
siw_cm_alloc_work(struct siw_cep * cep,int num)286 static int siw_cm_alloc_work(struct siw_cep *cep, int num)
287 {
288 struct siw_cm_work *work;
289
290 while (num--) {
291 work = kmalloc(sizeof(*work), GFP_KERNEL);
292 if (!work) {
293 if (!(list_empty(&cep->work_freelist)))
294 siw_cm_free_work(cep);
295 return -ENOMEM;
296 }
297 work->cep = cep;
298 INIT_LIST_HEAD(&work->list);
299 list_add(&work->list, &cep->work_freelist);
300 }
301 return 0;
302 }
303
304 /*
305 * siw_cm_upcall()
306 *
307 * Upcall to IWCM to inform about async connection events
308 */
siw_cm_upcall(struct siw_cep * cep,enum iw_cm_event_type reason,int status)309 static int siw_cm_upcall(struct siw_cep *cep, enum iw_cm_event_type reason,
310 int status)
311 {
312 struct iw_cm_event event;
313 struct iw_cm_id *id;
314
315 memset(&event, 0, sizeof(event));
316 event.status = status;
317 event.event = reason;
318
319 if (reason == IW_CM_EVENT_CONNECT_REQUEST) {
320 event.provider_data = cep;
321 id = cep->listen_cep->cm_id;
322 } else {
323 id = cep->cm_id;
324 }
325 /* Signal IRD and ORD */
326 if (reason == IW_CM_EVENT_ESTABLISHED ||
327 reason == IW_CM_EVENT_CONNECT_REPLY) {
328 /* Signal negotiated IRD/ORD values we will use */
329 event.ird = cep->ird;
330 event.ord = cep->ord;
331 } else if (reason == IW_CM_EVENT_CONNECT_REQUEST) {
332 event.ird = cep->ord;
333 event.ord = cep->ird;
334 }
335 /* Signal private data and address information */
336 if (reason == IW_CM_EVENT_CONNECT_REQUEST ||
337 reason == IW_CM_EVENT_CONNECT_REPLY) {
338 u16 pd_len = be16_to_cpu(cep->mpa.hdr.params.pd_len);
339
340 if (pd_len) {
341 /*
342 * hand over MPA private data
343 */
344 event.private_data_len = pd_len;
345 event.private_data = cep->mpa.pdata;
346
347 /* Hide MPA V2 IRD/ORD control */
348 if (cep->enhanced_rdma_conn_est) {
349 event.private_data_len -=
350 sizeof(struct mpa_v2_data);
351 event.private_data +=
352 sizeof(struct mpa_v2_data);
353 }
354 }
355 getname_local(cep->sock, &event.local_addr);
356 getname_peer(cep->sock, &event.remote_addr);
357 }
358 siw_dbg_cep(cep, "[QP %u]: reason=%d, status=%d\n",
359 cep->qp ? qp_id(cep->qp) : UINT_MAX, reason, status);
360
361 return id->event_handler(id, &event);
362 }
363
364 /*
365 * siw_qp_cm_drop()
366 *
367 * Drops established LLP connection if present and not already
368 * scheduled for dropping. Called from user context, SQ workqueue
369 * or receive IRQ. Caller signals if socket can be immediately
370 * closed (basically, if not in IRQ).
371 */
siw_qp_cm_drop(struct siw_qp * qp,int schedule)372 void siw_qp_cm_drop(struct siw_qp *qp, int schedule)
373 {
374 struct siw_cep *cep = qp->cep;
375
376 qp->rx_stream.rx_suspend = 1;
377 qp->tx_ctx.tx_suspend = 1;
378
379 if (!qp->cep)
380 return;
381
382 if (schedule) {
383 siw_cm_queue_work(cep, SIW_CM_WORK_CLOSE_LLP);
384 } else {
385 siw_cep_set_inuse(cep);
386
387 if (cep->state == SIW_EPSTATE_CLOSED) {
388 siw_dbg_cep(cep, "already closed\n");
389 goto out;
390 }
391 siw_dbg_cep(cep, "immediate close, state %d\n", cep->state);
392
393 if (qp->term_info.valid)
394 siw_send_terminate(qp);
395
396 if (cep->cm_id) {
397 switch (cep->state) {
398 case SIW_EPSTATE_AWAIT_MPAREP:
399 siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
400 -EINVAL);
401 break;
402
403 case SIW_EPSTATE_RDMA_MODE:
404 siw_cm_upcall(cep, IW_CM_EVENT_CLOSE, 0);
405 break;
406
407 case SIW_EPSTATE_IDLE:
408 case SIW_EPSTATE_LISTENING:
409 case SIW_EPSTATE_CONNECTING:
410 case SIW_EPSTATE_AWAIT_MPAREQ:
411 case SIW_EPSTATE_RECVD_MPAREQ:
412 case SIW_EPSTATE_CLOSED:
413 default:
414 break;
415 }
416 cep->cm_id->rem_ref(cep->cm_id);
417 cep->cm_id = NULL;
418 siw_cep_put(cep);
419 }
420 cep->state = SIW_EPSTATE_CLOSED;
421
422 if (cep->sock) {
423 siw_socket_disassoc(cep->sock);
424 /*
425 * Immediately close socket
426 */
427 sock_release(cep->sock);
428 cep->sock = NULL;
429 }
430 if (cep->qp) {
431 cep->qp = NULL;
432 siw_qp_put(qp);
433 }
434 out:
435 siw_cep_set_free(cep);
436 }
437 }
438
siw_cep_put(struct siw_cep * cep)439 void siw_cep_put(struct siw_cep *cep)
440 {
441 WARN_ON(kref_read(&cep->ref) < 1);
442 kref_put(&cep->ref, __siw_cep_dealloc);
443 }
444
siw_cep_get(struct siw_cep * cep)445 void siw_cep_get(struct siw_cep *cep)
446 {
447 kref_get(&cep->ref);
448 }
449
450 /*
451 * Expects params->pd_len in host byte order
452 */
siw_send_mpareqrep(struct siw_cep * cep,const void * pdata,u8 pd_len)453 static int siw_send_mpareqrep(struct siw_cep *cep, const void *pdata, u8 pd_len)
454 {
455 struct socket *s = cep->sock;
456 struct mpa_rr *rr = &cep->mpa.hdr;
457 struct kvec iov[3];
458 struct msghdr msg;
459 int rv;
460 int iovec_num = 0;
461 int mpa_len;
462
463 memset(&msg, 0, sizeof(msg));
464
465 iov[iovec_num].iov_base = rr;
466 iov[iovec_num].iov_len = sizeof(*rr);
467 mpa_len = sizeof(*rr);
468
469 if (cep->enhanced_rdma_conn_est) {
470 iovec_num++;
471 iov[iovec_num].iov_base = &cep->mpa.v2_ctrl;
472 iov[iovec_num].iov_len = sizeof(cep->mpa.v2_ctrl);
473 mpa_len += sizeof(cep->mpa.v2_ctrl);
474 }
475 if (pd_len) {
476 iovec_num++;
477 iov[iovec_num].iov_base = (char *)pdata;
478 iov[iovec_num].iov_len = pd_len;
479 mpa_len += pd_len;
480 }
481 if (cep->enhanced_rdma_conn_est)
482 pd_len += sizeof(cep->mpa.v2_ctrl);
483
484 rr->params.pd_len = cpu_to_be16(pd_len);
485
486 rv = kernel_sendmsg(s, &msg, iov, iovec_num + 1, mpa_len);
487
488 return rv < 0 ? rv : 0;
489 }
490
491 /*
492 * Receive MPA Request/Reply header.
493 *
494 * Returns 0 if complete MPA Request/Reply header including
495 * eventual private data was received. Returns -EAGAIN if
496 * header was partially received or negative error code otherwise.
497 *
498 * Context: May be called in process context only
499 */
siw_recv_mpa_rr(struct siw_cep * cep)500 static int siw_recv_mpa_rr(struct siw_cep *cep)
501 {
502 struct mpa_rr *hdr = &cep->mpa.hdr;
503 struct socket *s = cep->sock;
504 u16 pd_len;
505 int rcvd, to_rcv;
506
507 if (cep->mpa.bytes_rcvd < sizeof(struct mpa_rr)) {
508 rcvd = ksock_recv(s, (char *)hdr + cep->mpa.bytes_rcvd,
509 sizeof(struct mpa_rr) - cep->mpa.bytes_rcvd,
510 0);
511 if (rcvd <= 0)
512 return -ECONNABORTED;
513
514 cep->mpa.bytes_rcvd += rcvd;
515
516 if (cep->mpa.bytes_rcvd < sizeof(struct mpa_rr))
517 return -EAGAIN;
518
519 if (be16_to_cpu(hdr->params.pd_len) > MPA_MAX_PRIVDATA)
520 return -EPROTO;
521 }
522 pd_len = be16_to_cpu(hdr->params.pd_len);
523
524 /*
525 * At least the MPA Request/Reply header (frame not including
526 * private data) has been received.
527 * Receive (or continue receiving) any private data.
528 */
529 to_rcv = pd_len - (cep->mpa.bytes_rcvd - sizeof(struct mpa_rr));
530
531 if (!to_rcv) {
532 /*
533 * We must have hdr->params.pd_len == 0 and thus received a
534 * complete MPA Request/Reply frame.
535 * Check against peer protocol violation.
536 */
537 u32 word;
538
539 rcvd = ksock_recv(s, (char *)&word, sizeof(word), MSG_DONTWAIT);
540 if (rcvd == -EAGAIN)
541 return 0;
542
543 if (rcvd == 0) {
544 siw_dbg_cep(cep, "peer EOF\n");
545 return -EPIPE;
546 }
547 if (rcvd < 0) {
548 siw_dbg_cep(cep, "error: %d\n", rcvd);
549 return rcvd;
550 }
551 siw_dbg_cep(cep, "peer sent extra data: %d\n", rcvd);
552
553 return -EPROTO;
554 }
555
556 /*
557 * At this point, we must have hdr->params.pd_len != 0.
558 * A private data buffer gets allocated if hdr->params.pd_len != 0.
559 */
560 if (!cep->mpa.pdata) {
561 cep->mpa.pdata = kmalloc(pd_len + 4, GFP_KERNEL);
562 if (!cep->mpa.pdata)
563 return -ENOMEM;
564 }
565 rcvd = ksock_recv(
566 s, cep->mpa.pdata + cep->mpa.bytes_rcvd - sizeof(struct mpa_rr),
567 to_rcv + 4, MSG_DONTWAIT);
568
569 if (rcvd < 0)
570 return rcvd;
571
572 if (rcvd > to_rcv)
573 return -EPROTO;
574
575 cep->mpa.bytes_rcvd += rcvd;
576
577 if (to_rcv == rcvd) {
578 siw_dbg_cep(cep, "%d bytes private data received\n", pd_len);
579 return 0;
580 }
581 return -EAGAIN;
582 }
583
584 /*
585 * siw_proc_mpareq()
586 *
587 * Read MPA Request from socket and signal new connection to IWCM
588 * if success. Caller must hold lock on corresponding listening CEP.
589 */
siw_proc_mpareq(struct siw_cep * cep)590 static int siw_proc_mpareq(struct siw_cep *cep)
591 {
592 struct mpa_rr *req;
593 int version, rv;
594 u16 pd_len;
595
596 rv = siw_recv_mpa_rr(cep);
597 if (rv)
598 return rv;
599
600 req = &cep->mpa.hdr;
601
602 version = __mpa_rr_revision(req->params.bits);
603 pd_len = be16_to_cpu(req->params.pd_len);
604
605 if (version > MPA_REVISION_2)
606 /* allow for 0, 1, and 2 only */
607 return -EPROTO;
608
609 if (memcmp(req->key, MPA_KEY_REQ, 16))
610 return -EPROTO;
611
612 /* Prepare for sending MPA reply */
613 memcpy(req->key, MPA_KEY_REP, 16);
614
615 if (version == MPA_REVISION_2 &&
616 (req->params.bits & MPA_RR_FLAG_ENHANCED)) {
617 /*
618 * MPA version 2 must signal IRD/ORD values and P2P mode
619 * in private data if header flag MPA_RR_FLAG_ENHANCED
620 * is set.
621 */
622 if (pd_len < sizeof(struct mpa_v2_data))
623 goto reject_conn;
624
625 cep->enhanced_rdma_conn_est = true;
626 }
627
628 /* MPA Markers: currently not supported. Marker TX to be added. */
629 if (req->params.bits & MPA_RR_FLAG_MARKERS)
630 goto reject_conn;
631
632 if (req->params.bits & MPA_RR_FLAG_CRC) {
633 /*
634 * RFC 5044, page 27: CRC MUST be used if peer requests it.
635 * siw specific: 'mpa_crc_strict' parameter to reject
636 * connection with CRC if local CRC off enforced by
637 * 'mpa_crc_strict' module parameter.
638 */
639 if (!mpa_crc_required && mpa_crc_strict)
640 goto reject_conn;
641
642 /* Enable CRC if requested by module parameter */
643 if (mpa_crc_required)
644 req->params.bits |= MPA_RR_FLAG_CRC;
645 }
646 if (cep->enhanced_rdma_conn_est) {
647 struct mpa_v2_data *v2 = (struct mpa_v2_data *)cep->mpa.pdata;
648
649 /*
650 * Peer requested ORD becomes requested local IRD,
651 * peer requested IRD becomes requested local ORD.
652 * IRD and ORD get limited by global maximum values.
653 */
654 cep->ord = ntohs(v2->ird) & MPA_IRD_ORD_MASK;
655 cep->ord = min(cep->ord, SIW_MAX_ORD_QP);
656 cep->ird = ntohs(v2->ord) & MPA_IRD_ORD_MASK;
657 cep->ird = min(cep->ird, SIW_MAX_IRD_QP);
658
659 /* May get overwritten by locally negotiated values */
660 cep->mpa.v2_ctrl.ird = htons(cep->ird);
661 cep->mpa.v2_ctrl.ord = htons(cep->ord);
662
663 /*
664 * Support for peer sent zero length Write or Read to
665 * let local side enter RTS. Writes are preferred.
666 * Sends would require pre-posting a Receive and are
667 * not supported.
668 * Propose zero length Write if none of Read and Write
669 * is indicated.
670 */
671 if (v2->ird & MPA_V2_PEER_TO_PEER) {
672 cep->mpa.v2_ctrl.ird |= MPA_V2_PEER_TO_PEER;
673
674 if (v2->ord & MPA_V2_RDMA_WRITE_RTR)
675 cep->mpa.v2_ctrl.ord |= MPA_V2_RDMA_WRITE_RTR;
676 else if (v2->ord & MPA_V2_RDMA_READ_RTR)
677 cep->mpa.v2_ctrl.ord |= MPA_V2_RDMA_READ_RTR;
678 else
679 cep->mpa.v2_ctrl.ord |= MPA_V2_RDMA_WRITE_RTR;
680 }
681 }
682
683 cep->state = SIW_EPSTATE_RECVD_MPAREQ;
684
685 /* Keep reference until IWCM accepts/rejects */
686 siw_cep_get(cep);
687 rv = siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REQUEST, 0);
688 if (rv)
689 siw_cep_put(cep);
690
691 return rv;
692
693 reject_conn:
694 siw_dbg_cep(cep, "reject: crc %d:%d:%d, m %d:%d\n",
695 req->params.bits & MPA_RR_FLAG_CRC ? 1 : 0,
696 mpa_crc_required, mpa_crc_strict,
697 req->params.bits & MPA_RR_FLAG_MARKERS ? 1 : 0, 0);
698
699 req->params.bits &= ~MPA_RR_FLAG_MARKERS;
700 req->params.bits |= MPA_RR_FLAG_REJECT;
701
702 if (!mpa_crc_required && mpa_crc_strict)
703 req->params.bits &= ~MPA_RR_FLAG_CRC;
704
705 if (pd_len)
706 kfree(cep->mpa.pdata);
707
708 cep->mpa.pdata = NULL;
709
710 siw_send_mpareqrep(cep, NULL, 0);
711
712 return -EOPNOTSUPP;
713 }
714
siw_proc_mpareply(struct siw_cep * cep)715 static int siw_proc_mpareply(struct siw_cep *cep)
716 {
717 struct siw_qp_attrs qp_attrs;
718 enum siw_qp_attr_mask qp_attr_mask;
719 struct siw_qp *qp = cep->qp;
720 struct mpa_rr *rep;
721 int rv;
722 u16 rep_ord;
723 u16 rep_ird;
724 bool ird_insufficient = false;
725 enum mpa_v2_ctrl mpa_p2p_mode = MPA_V2_RDMA_NO_RTR;
726
727 rv = siw_recv_mpa_rr(cep);
728 if (rv)
729 goto out_err;
730
731 siw_cancel_mpatimer(cep);
732
733 rep = &cep->mpa.hdr;
734
735 if (__mpa_rr_revision(rep->params.bits) > MPA_REVISION_2) {
736 /* allow for 0, 1, and 2 only */
737 rv = -EPROTO;
738 goto out_err;
739 }
740 if (memcmp(rep->key, MPA_KEY_REP, 16)) {
741 siw_init_terminate(qp, TERM_ERROR_LAYER_LLP, LLP_ETYPE_MPA,
742 LLP_ECODE_INVALID_REQ_RESP, 0);
743 siw_send_terminate(qp);
744 rv = -EPROTO;
745 goto out_err;
746 }
747 if (rep->params.bits & MPA_RR_FLAG_REJECT) {
748 siw_dbg_cep(cep, "got mpa reject\n");
749 siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -ECONNRESET);
750
751 return -ECONNRESET;
752 }
753 if (try_gso && rep->params.bits & MPA_RR_FLAG_GSO_EXP) {
754 siw_dbg_cep(cep, "peer allows GSO on TX\n");
755 qp->tx_ctx.gso_seg_limit = 0;
756 }
757 if ((rep->params.bits & MPA_RR_FLAG_MARKERS) ||
758 (mpa_crc_required && !(rep->params.bits & MPA_RR_FLAG_CRC)) ||
759 (mpa_crc_strict && !mpa_crc_required &&
760 (rep->params.bits & MPA_RR_FLAG_CRC))) {
761 siw_dbg_cep(cep, "reply unsupp: crc %d:%d:%d, m %d:%d\n",
762 rep->params.bits & MPA_RR_FLAG_CRC ? 1 : 0,
763 mpa_crc_required, mpa_crc_strict,
764 rep->params.bits & MPA_RR_FLAG_MARKERS ? 1 : 0, 0);
765
766 siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -ECONNREFUSED);
767
768 return -EINVAL;
769 }
770 if (cep->enhanced_rdma_conn_est) {
771 struct mpa_v2_data *v2;
772
773 if (__mpa_rr_revision(rep->params.bits) < MPA_REVISION_2 ||
774 !(rep->params.bits & MPA_RR_FLAG_ENHANCED)) {
775 /*
776 * Protocol failure: The responder MUST reply with
777 * MPA version 2 and MUST set MPA_RR_FLAG_ENHANCED.
778 */
779 siw_dbg_cep(cep, "mpa reply error: vers %d, enhcd %d\n",
780 __mpa_rr_revision(rep->params.bits),
781 rep->params.bits & MPA_RR_FLAG_ENHANCED ?
782 1 :
783 0);
784
785 siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
786 -ECONNRESET);
787 return -EINVAL;
788 }
789 v2 = (struct mpa_v2_data *)cep->mpa.pdata;
790 rep_ird = ntohs(v2->ird) & MPA_IRD_ORD_MASK;
791 rep_ord = ntohs(v2->ord) & MPA_IRD_ORD_MASK;
792
793 if (cep->ird < rep_ord &&
794 (relaxed_ird_negotiation == false ||
795 rep_ord > cep->sdev->attrs.max_ird)) {
796 siw_dbg_cep(cep, "ird %d, rep_ord %d, max_ord %d\n",
797 cep->ird, rep_ord,
798 cep->sdev->attrs.max_ord);
799 ird_insufficient = true;
800 }
801 if (cep->ord > rep_ird && relaxed_ird_negotiation == false) {
802 siw_dbg_cep(cep, "ord %d, rep_ird %d\n", cep->ord,
803 rep_ird);
804 ird_insufficient = true;
805 }
806 /*
807 * Always report negotiated peer values to user,
808 * even if IRD/ORD negotiation failed
809 */
810 cep->ird = rep_ord;
811 cep->ord = rep_ird;
812
813 if (ird_insufficient) {
814 /*
815 * If the initiator IRD is insuffient for the
816 * responder ORD, send a TERM.
817 */
818 siw_init_terminate(qp, TERM_ERROR_LAYER_LLP,
819 LLP_ETYPE_MPA,
820 LLP_ECODE_INSUFFICIENT_IRD, 0);
821 siw_send_terminate(qp);
822 rv = -ENOMEM;
823 goto out_err;
824 }
825 if (cep->mpa.v2_ctrl_req.ird & MPA_V2_PEER_TO_PEER)
826 mpa_p2p_mode =
827 cep->mpa.v2_ctrl_req.ord &
828 (MPA_V2_RDMA_WRITE_RTR | MPA_V2_RDMA_READ_RTR);
829
830 /*
831 * Check if we requested P2P mode, and if peer agrees
832 */
833 if (mpa_p2p_mode != MPA_V2_RDMA_NO_RTR) {
834 if ((mpa_p2p_mode & v2->ord) == 0) {
835 /*
836 * We requested RTR mode(s), but the peer
837 * did not pick any mode we support.
838 */
839 siw_dbg_cep(cep,
840 "rtr mode: req %2x, got %2x\n",
841 mpa_p2p_mode,
842 v2->ord & (MPA_V2_RDMA_WRITE_RTR |
843 MPA_V2_RDMA_READ_RTR));
844
845 siw_init_terminate(qp, TERM_ERROR_LAYER_LLP,
846 LLP_ETYPE_MPA,
847 LLP_ECODE_NO_MATCHING_RTR,
848 0);
849 siw_send_terminate(qp);
850 rv = -EPROTO;
851 goto out_err;
852 }
853 mpa_p2p_mode = v2->ord & (MPA_V2_RDMA_WRITE_RTR |
854 MPA_V2_RDMA_READ_RTR);
855 }
856 }
857 memset(&qp_attrs, 0, sizeof(qp_attrs));
858
859 if (rep->params.bits & MPA_RR_FLAG_CRC)
860 qp_attrs.flags = SIW_MPA_CRC;
861
862 qp_attrs.irq_size = cep->ird;
863 qp_attrs.orq_size = cep->ord;
864 qp_attrs.sk = cep->sock;
865 qp_attrs.state = SIW_QP_STATE_RTS;
866
867 qp_attr_mask = SIW_QP_ATTR_STATE | SIW_QP_ATTR_LLP_HANDLE |
868 SIW_QP_ATTR_ORD | SIW_QP_ATTR_IRD | SIW_QP_ATTR_MPA;
869
870 /* Move socket RX/TX under QP control */
871 down_write(&qp->state_lock);
872 if (qp->attrs.state > SIW_QP_STATE_RTR) {
873 rv = -EINVAL;
874 up_write(&qp->state_lock);
875 goto out_err;
876 }
877 rv = siw_qp_modify(qp, &qp_attrs, qp_attr_mask);
878
879 siw_qp_socket_assoc(cep, qp);
880
881 up_write(&qp->state_lock);
882
883 /* Send extra RDMA frame to trigger peer RTS if negotiated */
884 if (mpa_p2p_mode != MPA_V2_RDMA_NO_RTR) {
885 rv = siw_qp_mpa_rts(qp, mpa_p2p_mode);
886 if (rv)
887 goto out_err;
888 }
889 if (!rv) {
890 rv = siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, 0);
891 if (!rv)
892 cep->state = SIW_EPSTATE_RDMA_MODE;
893
894 return 0;
895 }
896
897 out_err:
898 if (rv != -EAGAIN)
899 siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY, -EINVAL);
900
901 return rv;
902 }
903
904 /*
905 * siw_accept_newconn - accept an incoming pending connection
906 *
907 */
siw_accept_newconn(struct siw_cep * cep)908 static void siw_accept_newconn(struct siw_cep *cep)
909 {
910 struct socket *s = cep->sock;
911 struct socket *new_s = NULL;
912 struct siw_cep *new_cep = NULL;
913 int rv = 0; /* debug only. should disappear */
914
915 if (cep->state != SIW_EPSTATE_LISTENING)
916 goto error;
917
918 new_cep = siw_cep_alloc(cep->sdev);
919 if (!new_cep)
920 goto error;
921
922 /*
923 * 4: Allocate a sufficient number of work elements
924 * to allow concurrent handling of local + peer close
925 * events, MPA header processing + MPA timeout.
926 */
927 if (siw_cm_alloc_work(new_cep, 4) != 0)
928 goto error;
929
930 /*
931 * Copy saved socket callbacks from listening CEP
932 * and assign new socket with new CEP
933 */
934 new_cep->sk_state_change = cep->sk_state_change;
935 new_cep->sk_data_ready = cep->sk_data_ready;
936 new_cep->sk_write_space = cep->sk_write_space;
937 new_cep->sk_error_report = cep->sk_error_report;
938
939 rv = kernel_accept(s, &new_s, O_NONBLOCK);
940 if (rv != 0) {
941 /*
942 * Connection already aborted by peer..?
943 */
944 siw_dbg_cep(cep, "kernel_accept() error: %d\n", rv);
945 goto error;
946 }
947 new_cep->sock = new_s;
948 siw_cep_get(new_cep);
949 new_s->sk->sk_user_data = new_cep;
950
951 if (siw_tcp_nagle == false)
952 tcp_sock_set_nodelay(new_s->sk);
953 new_cep->state = SIW_EPSTATE_AWAIT_MPAREQ;
954
955 rv = siw_cm_queue_work(new_cep, SIW_CM_WORK_MPATIMEOUT);
956 if (rv)
957 goto error;
958 /*
959 * See siw_proc_mpareq() etc. for the use of new_cep->listen_cep.
960 */
961 new_cep->listen_cep = cep;
962 siw_cep_get(cep);
963
964 if (atomic_read(&new_s->sk->sk_rmem_alloc)) {
965 /*
966 * MPA REQ already queued
967 */
968 siw_dbg_cep(cep, "immediate mpa request\n");
969
970 siw_cep_set_inuse(new_cep);
971 rv = siw_proc_mpareq(new_cep);
972 if (rv != -EAGAIN) {
973 siw_cep_put(cep);
974 new_cep->listen_cep = NULL;
975 if (rv) {
976 siw_cep_set_free(new_cep);
977 goto error;
978 }
979 }
980 siw_cep_set_free(new_cep);
981 }
982 return;
983
984 error:
985 if (new_cep)
986 siw_cep_put(new_cep);
987
988 if (new_s) {
989 siw_socket_disassoc(new_s);
990 sock_release(new_s);
991 new_cep->sock = NULL;
992 }
993 siw_dbg_cep(cep, "error %d\n", rv);
994 }
995
siw_cm_work_handler(struct work_struct * w)996 static void siw_cm_work_handler(struct work_struct *w)
997 {
998 struct siw_cm_work *work;
999 struct siw_cep *cep;
1000 int release_cep = 0, rv = 0;
1001
1002 work = container_of(w, struct siw_cm_work, work.work);
1003 cep = work->cep;
1004
1005 siw_dbg_cep(cep, "[QP %u]: work type: %d, state %d\n",
1006 cep->qp ? qp_id(cep->qp) : UINT_MAX,
1007 work->type, cep->state);
1008
1009 siw_cep_set_inuse(cep);
1010
1011 switch (work->type) {
1012 case SIW_CM_WORK_ACCEPT:
1013 siw_accept_newconn(cep);
1014 break;
1015
1016 case SIW_CM_WORK_READ_MPAHDR:
1017 if (cep->state == SIW_EPSTATE_AWAIT_MPAREQ) {
1018 if (cep->listen_cep) {
1019 siw_cep_set_inuse(cep->listen_cep);
1020
1021 if (cep->listen_cep->state ==
1022 SIW_EPSTATE_LISTENING)
1023 rv = siw_proc_mpareq(cep);
1024 else
1025 rv = -EFAULT;
1026
1027 siw_cep_set_free(cep->listen_cep);
1028
1029 if (rv != -EAGAIN) {
1030 siw_cep_put(cep->listen_cep);
1031 cep->listen_cep = NULL;
1032 if (rv)
1033 siw_cep_put(cep);
1034 }
1035 }
1036 } else if (cep->state == SIW_EPSTATE_AWAIT_MPAREP) {
1037 rv = siw_proc_mpareply(cep);
1038 } else {
1039 /*
1040 * CEP already moved out of MPA handshake.
1041 * any connection management already done.
1042 * silently ignore the mpa packet.
1043 */
1044 if (cep->state == SIW_EPSTATE_RDMA_MODE) {
1045 cep->sock->sk->sk_data_ready(cep->sock->sk);
1046 siw_dbg_cep(cep, "already in RDMA mode");
1047 } else {
1048 siw_dbg_cep(cep, "out of state: %d\n",
1049 cep->state);
1050 }
1051 }
1052 if (rv && rv != EAGAIN)
1053 release_cep = 1;
1054 break;
1055
1056 case SIW_CM_WORK_CLOSE_LLP:
1057 /*
1058 * QP scheduled LLP close
1059 */
1060 if (cep->qp && cep->qp->term_info.valid)
1061 siw_send_terminate(cep->qp);
1062
1063 if (cep->cm_id)
1064 siw_cm_upcall(cep, IW_CM_EVENT_CLOSE, 0);
1065
1066 release_cep = 1;
1067 break;
1068
1069 case SIW_CM_WORK_PEER_CLOSE:
1070 if (cep->cm_id) {
1071 if (cep->state == SIW_EPSTATE_AWAIT_MPAREP) {
1072 /*
1073 * MPA reply not received, but connection drop
1074 */
1075 siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
1076 -ECONNRESET);
1077 } else if (cep->state == SIW_EPSTATE_RDMA_MODE) {
1078 /*
1079 * NOTE: IW_CM_EVENT_DISCONNECT is given just
1080 * to transition IWCM into CLOSING.
1081 */
1082 siw_cm_upcall(cep, IW_CM_EVENT_DISCONNECT, 0);
1083 siw_cm_upcall(cep, IW_CM_EVENT_CLOSE, 0);
1084 }
1085 /*
1086 * for other states there is no connection
1087 * known to the IWCM.
1088 */
1089 } else {
1090 if (cep->state == SIW_EPSTATE_RECVD_MPAREQ) {
1091 /*
1092 * Wait for the ulp/CM to call accept/reject
1093 */
1094 siw_dbg_cep(cep,
1095 "mpa req recvd, wait for ULP\n");
1096 } else if (cep->state == SIW_EPSTATE_AWAIT_MPAREQ) {
1097 /*
1098 * Socket close before MPA request received.
1099 */
1100 siw_dbg_cep(cep, "no mpareq: drop listener\n");
1101 siw_cep_put(cep->listen_cep);
1102 cep->listen_cep = NULL;
1103 }
1104 }
1105 release_cep = 1;
1106 break;
1107
1108 case SIW_CM_WORK_MPATIMEOUT:
1109 cep->mpa_timer = NULL;
1110
1111 if (cep->state == SIW_EPSTATE_AWAIT_MPAREP) {
1112 /*
1113 * MPA request timed out:
1114 * Hide any partially received private data and signal
1115 * timeout
1116 */
1117 cep->mpa.hdr.params.pd_len = 0;
1118
1119 if (cep->cm_id)
1120 siw_cm_upcall(cep, IW_CM_EVENT_CONNECT_REPLY,
1121 -ETIMEDOUT);
1122 release_cep = 1;
1123
1124 } else if (cep->state == SIW_EPSTATE_AWAIT_MPAREQ) {
1125 /*
1126 * No MPA request received after peer TCP stream setup.
1127 */
1128 if (cep->listen_cep) {
1129 siw_cep_put(cep->listen_cep);
1130 cep->listen_cep = NULL;
1131 }
1132 release_cep = 1;
1133 }
1134 break;
1135
1136 default:
1137 WARN(1, "Undefined CM work type: %d\n", work->type);
1138 }
1139 if (release_cep) {
1140 siw_dbg_cep(cep,
1141 "release: timer=%s, QP[%u]\n",
1142 cep->mpa_timer ? "y" : "n",
1143 cep->qp ? qp_id(cep->qp) : UINT_MAX);
1144
1145 siw_cancel_mpatimer(cep);
1146
1147 cep->state = SIW_EPSTATE_CLOSED;
1148
1149 if (cep->qp) {
1150 struct siw_qp *qp = cep->qp;
1151 /*
1152 * Serialize a potential race with application
1153 * closing the QP and calling siw_qp_cm_drop()
1154 */
1155 siw_qp_get(qp);
1156 siw_cep_set_free(cep);
1157
1158 siw_qp_llp_close(qp);
1159 siw_qp_put(qp);
1160
1161 siw_cep_set_inuse(cep);
1162 cep->qp = NULL;
1163 siw_qp_put(qp);
1164 }
1165 if (cep->sock) {
1166 siw_socket_disassoc(cep->sock);
1167 sock_release(cep->sock);
1168 cep->sock = NULL;
1169 }
1170 if (cep->cm_id) {
1171 cep->cm_id->rem_ref(cep->cm_id);
1172 cep->cm_id = NULL;
1173 siw_cep_put(cep);
1174 }
1175 }
1176 siw_cep_set_free(cep);
1177 siw_put_work(work);
1178 siw_cep_put(cep);
1179 }
1180
1181 static struct workqueue_struct *siw_cm_wq;
1182
siw_cm_queue_work(struct siw_cep * cep,enum siw_work_type type)1183 int siw_cm_queue_work(struct siw_cep *cep, enum siw_work_type type)
1184 {
1185 struct siw_cm_work *work = siw_get_work(cep);
1186 unsigned long delay = 0;
1187
1188 if (!work) {
1189 siw_dbg_cep(cep, "failed with no work available\n");
1190 return -ENOMEM;
1191 }
1192 work->type = type;
1193 work->cep = cep;
1194
1195 siw_cep_get(cep);
1196
1197 INIT_DELAYED_WORK(&work->work, siw_cm_work_handler);
1198
1199 if (type == SIW_CM_WORK_MPATIMEOUT) {
1200 cep->mpa_timer = work;
1201
1202 if (cep->state == SIW_EPSTATE_AWAIT_MPAREP)
1203 delay = MPAREQ_TIMEOUT;
1204 else
1205 delay = MPAREP_TIMEOUT;
1206 }
1207 siw_dbg_cep(cep, "[QP %u]: work type: %d, timeout %lu\n",
1208 cep->qp ? qp_id(cep->qp) : -1, type, delay);
1209
1210 queue_delayed_work(siw_cm_wq, &work->work, delay);
1211
1212 return 0;
1213 }
1214
siw_cm_llp_data_ready(struct sock * sk)1215 static void siw_cm_llp_data_ready(struct sock *sk)
1216 {
1217 struct siw_cep *cep;
1218
1219 read_lock(&sk->sk_callback_lock);
1220
1221 cep = sk_to_cep(sk);
1222 if (!cep)
1223 goto out;
1224
1225 siw_dbg_cep(cep, "state: %d\n", cep->state);
1226
1227 switch (cep->state) {
1228 case SIW_EPSTATE_RDMA_MODE:
1229 case SIW_EPSTATE_LISTENING:
1230 break;
1231
1232 case SIW_EPSTATE_AWAIT_MPAREQ:
1233 case SIW_EPSTATE_AWAIT_MPAREP:
1234 siw_cm_queue_work(cep, SIW_CM_WORK_READ_MPAHDR);
1235 break;
1236
1237 default:
1238 siw_dbg_cep(cep, "unexpected data, state %d\n", cep->state);
1239 break;
1240 }
1241 out:
1242 read_unlock(&sk->sk_callback_lock);
1243 }
1244
siw_cm_llp_write_space(struct sock * sk)1245 static void siw_cm_llp_write_space(struct sock *sk)
1246 {
1247 struct siw_cep *cep = sk_to_cep(sk);
1248
1249 if (cep)
1250 siw_dbg_cep(cep, "state: %d\n", cep->state);
1251 }
1252
siw_cm_llp_error_report(struct sock * sk)1253 static void siw_cm_llp_error_report(struct sock *sk)
1254 {
1255 struct siw_cep *cep = sk_to_cep(sk);
1256
1257 if (cep) {
1258 siw_dbg_cep(cep, "error %d, socket state: %d, cep state: %d\n",
1259 sk->sk_err, sk->sk_state, cep->state);
1260 cep->sk_error_report(sk);
1261 }
1262 }
1263
siw_cm_llp_state_change(struct sock * sk)1264 static void siw_cm_llp_state_change(struct sock *sk)
1265 {
1266 struct siw_cep *cep;
1267 void (*orig_state_change)(struct sock *s);
1268
1269 read_lock(&sk->sk_callback_lock);
1270
1271 cep = sk_to_cep(sk);
1272 if (!cep) {
1273 /* endpoint already disassociated */
1274 read_unlock(&sk->sk_callback_lock);
1275 return;
1276 }
1277 orig_state_change = cep->sk_state_change;
1278
1279 siw_dbg_cep(cep, "state: %d\n", cep->state);
1280
1281 switch (sk->sk_state) {
1282 case TCP_ESTABLISHED:
1283 /*
1284 * handle accepting socket as special case where only
1285 * new connection is possible
1286 */
1287 siw_cm_queue_work(cep, SIW_CM_WORK_ACCEPT);
1288 break;
1289
1290 case TCP_CLOSE:
1291 case TCP_CLOSE_WAIT:
1292 if (cep->qp)
1293 cep->qp->tx_ctx.tx_suspend = 1;
1294 siw_cm_queue_work(cep, SIW_CM_WORK_PEER_CLOSE);
1295 break;
1296
1297 default:
1298 siw_dbg_cep(cep, "unexpected socket state %d\n", sk->sk_state);
1299 }
1300 read_unlock(&sk->sk_callback_lock);
1301 orig_state_change(sk);
1302 }
1303
kernel_bindconnect(struct socket * s,struct sockaddr * laddr,struct sockaddr * raddr)1304 static int kernel_bindconnect(struct socket *s, struct sockaddr *laddr,
1305 struct sockaddr *raddr)
1306 {
1307 int rv, flags = 0;
1308 size_t size = laddr->sa_family == AF_INET ?
1309 sizeof(struct sockaddr_in) : sizeof(struct sockaddr_in6);
1310
1311 /*
1312 * Make address available again asap.
1313 */
1314 sock_set_reuseaddr(s->sk);
1315
1316 rv = s->ops->bind(s, laddr, size);
1317 if (rv < 0)
1318 return rv;
1319
1320 rv = s->ops->connect(s, raddr, size, flags);
1321
1322 return rv < 0 ? rv : 0;
1323 }
1324
siw_connect(struct iw_cm_id * id,struct iw_cm_conn_param * params)1325 int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1326 {
1327 struct siw_device *sdev = to_siw_dev(id->device);
1328 struct siw_qp *qp;
1329 struct siw_cep *cep = NULL;
1330 struct socket *s = NULL;
1331 struct sockaddr *laddr = (struct sockaddr *)&id->local_addr,
1332 *raddr = (struct sockaddr *)&id->remote_addr;
1333 bool p2p_mode = peer_to_peer, v4 = true;
1334 u16 pd_len = params->private_data_len;
1335 int version = mpa_version, rv;
1336
1337 if (pd_len > MPA_MAX_PRIVDATA)
1338 return -EINVAL;
1339
1340 if (params->ird > sdev->attrs.max_ird ||
1341 params->ord > sdev->attrs.max_ord)
1342 return -ENOMEM;
1343
1344 if (laddr->sa_family == AF_INET6)
1345 v4 = false;
1346 else if (laddr->sa_family != AF_INET)
1347 return -EAFNOSUPPORT;
1348
1349 /*
1350 * Respect any iwarp port mapping: Use mapped remote address
1351 * if valid. Local address must not be mapped, since siw
1352 * uses kernel TCP stack.
1353 */
1354 if ((v4 && to_sockaddr_in(id->remote_addr).sin_port != 0) ||
1355 to_sockaddr_in6(id->remote_addr).sin6_port != 0)
1356 raddr = (struct sockaddr *)&id->m_remote_addr;
1357
1358 qp = siw_qp_id2obj(sdev, params->qpn);
1359 if (!qp) {
1360 WARN(1, "[QP %u] does not exist\n", params->qpn);
1361 rv = -EINVAL;
1362 goto error;
1363 }
1364 siw_dbg_qp(qp, "pd_len %d, laddr %pISp, raddr %pISp\n", pd_len, laddr,
1365 raddr);
1366
1367 rv = sock_create(v4 ? AF_INET : AF_INET6, SOCK_STREAM, IPPROTO_TCP, &s);
1368 if (rv < 0)
1369 goto error;
1370
1371 /*
1372 * NOTE: For simplification, connect() is called in blocking
1373 * mode. Might be reconsidered for async connection setup at
1374 * TCP level.
1375 */
1376 rv = kernel_bindconnect(s, laddr, raddr);
1377 if (rv != 0) {
1378 siw_dbg_qp(qp, "kernel_bindconnect: error %d\n", rv);
1379 goto error;
1380 }
1381 if (siw_tcp_nagle == false)
1382 tcp_sock_set_nodelay(s->sk);
1383 cep = siw_cep_alloc(sdev);
1384 if (!cep) {
1385 rv = -ENOMEM;
1386 goto error;
1387 }
1388 siw_cep_set_inuse(cep);
1389
1390 /* Associate QP with CEP */
1391 siw_cep_get(cep);
1392 qp->cep = cep;
1393
1394 /* siw_qp_get(qp) already done by QP lookup */
1395 cep->qp = qp;
1396
1397 id->add_ref(id);
1398 cep->cm_id = id;
1399
1400 /*
1401 * 4: Allocate a sufficient number of work elements
1402 * to allow concurrent handling of local + peer close
1403 * events, MPA header processing + MPA timeout.
1404 */
1405 rv = siw_cm_alloc_work(cep, 4);
1406 if (rv != 0) {
1407 rv = -ENOMEM;
1408 goto error;
1409 }
1410 cep->ird = params->ird;
1411 cep->ord = params->ord;
1412
1413 if (p2p_mode && cep->ord == 0)
1414 cep->ord = 1;
1415
1416 cep->state = SIW_EPSTATE_CONNECTING;
1417
1418 /*
1419 * Associate CEP with socket
1420 */
1421 siw_cep_socket_assoc(cep, s);
1422
1423 cep->state = SIW_EPSTATE_AWAIT_MPAREP;
1424
1425 /*
1426 * Set MPA Request bits: CRC if required, no MPA Markers,
1427 * MPA Rev. according to module parameter 'mpa_version', Key 'Request'.
1428 */
1429 cep->mpa.hdr.params.bits = 0;
1430 if (version > MPA_REVISION_2) {
1431 pr_warn("Setting MPA version to %u\n", MPA_REVISION_2);
1432 version = MPA_REVISION_2;
1433 /* Adjust also module parameter */
1434 mpa_version = MPA_REVISION_2;
1435 }
1436 __mpa_rr_set_revision(&cep->mpa.hdr.params.bits, version);
1437
1438 if (try_gso)
1439 cep->mpa.hdr.params.bits |= MPA_RR_FLAG_GSO_EXP;
1440
1441 if (mpa_crc_required)
1442 cep->mpa.hdr.params.bits |= MPA_RR_FLAG_CRC;
1443
1444 /*
1445 * If MPA version == 2:
1446 * o Include ORD and IRD.
1447 * o Indicate peer-to-peer mode, if required by module
1448 * parameter 'peer_to_peer'.
1449 */
1450 if (version == MPA_REVISION_2) {
1451 cep->enhanced_rdma_conn_est = true;
1452 cep->mpa.hdr.params.bits |= MPA_RR_FLAG_ENHANCED;
1453
1454 cep->mpa.v2_ctrl.ird = htons(cep->ird);
1455 cep->mpa.v2_ctrl.ord = htons(cep->ord);
1456
1457 if (p2p_mode) {
1458 cep->mpa.v2_ctrl.ird |= MPA_V2_PEER_TO_PEER;
1459 cep->mpa.v2_ctrl.ord |= rtr_type;
1460 }
1461 /* Remember own P2P mode requested */
1462 cep->mpa.v2_ctrl_req.ird = cep->mpa.v2_ctrl.ird;
1463 cep->mpa.v2_ctrl_req.ord = cep->mpa.v2_ctrl.ord;
1464 }
1465 memcpy(cep->mpa.hdr.key, MPA_KEY_REQ, 16);
1466
1467 rv = siw_send_mpareqrep(cep, params->private_data, pd_len);
1468 /*
1469 * Reset private data.
1470 */
1471 cep->mpa.hdr.params.pd_len = 0;
1472
1473 if (rv >= 0) {
1474 rv = siw_cm_queue_work(cep, SIW_CM_WORK_MPATIMEOUT);
1475 if (!rv) {
1476 siw_dbg_cep(cep, "[QP %u]: exit\n", qp_id(qp));
1477 siw_cep_set_free(cep);
1478 return 0;
1479 }
1480 }
1481 error:
1482 siw_dbg(id->device, "failed: %d\n", rv);
1483
1484 if (cep) {
1485 siw_socket_disassoc(s);
1486 sock_release(s);
1487 cep->sock = NULL;
1488
1489 cep->qp = NULL;
1490
1491 cep->cm_id = NULL;
1492 id->rem_ref(id);
1493 siw_cep_put(cep);
1494
1495 qp->cep = NULL;
1496 siw_cep_put(cep);
1497
1498 cep->state = SIW_EPSTATE_CLOSED;
1499
1500 siw_cep_set_free(cep);
1501
1502 siw_cep_put(cep);
1503
1504 } else if (s) {
1505 sock_release(s);
1506 }
1507 if (qp)
1508 siw_qp_put(qp);
1509
1510 return rv;
1511 }
1512
1513 /*
1514 * siw_accept - Let SoftiWARP accept an RDMA connection request
1515 *
1516 * @id: New connection management id to be used for accepted
1517 * connection request
1518 * @params: Connection parameters provided by ULP for accepting connection
1519 *
1520 * Transition QP to RTS state, associate new CM id @id with accepted CEP
1521 * and get prepared for TCP input by installing socket callbacks.
1522 * Then send MPA Reply and generate the "connection established" event.
1523 * Socket callbacks must be installed before sending MPA Reply, because
1524 * the latter may cause a first RDMA message to arrive from the RDMA Initiator
1525 * side very quickly, at which time the socket callbacks must be ready.
1526 */
siw_accept(struct iw_cm_id * id,struct iw_cm_conn_param * params)1527 int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
1528 {
1529 struct siw_device *sdev = to_siw_dev(id->device);
1530 struct siw_cep *cep = (struct siw_cep *)id->provider_data;
1531 struct siw_qp *qp;
1532 struct siw_qp_attrs qp_attrs;
1533 int rv, max_priv_data = MPA_MAX_PRIVDATA;
1534 bool wait_for_peer_rts = false;
1535
1536 siw_cep_set_inuse(cep);
1537 siw_cep_put(cep);
1538
1539 /* Free lingering inbound private data */
1540 if (cep->mpa.hdr.params.pd_len) {
1541 cep->mpa.hdr.params.pd_len = 0;
1542 kfree(cep->mpa.pdata);
1543 cep->mpa.pdata = NULL;
1544 }
1545 siw_cancel_mpatimer(cep);
1546
1547 if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) {
1548 siw_dbg_cep(cep, "out of state\n");
1549
1550 siw_cep_set_free(cep);
1551 siw_cep_put(cep);
1552
1553 return -ECONNRESET;
1554 }
1555 qp = siw_qp_id2obj(sdev, params->qpn);
1556 if (!qp) {
1557 WARN(1, "[QP %d] does not exist\n", params->qpn);
1558 siw_cep_set_free(cep);
1559 siw_cep_put(cep);
1560
1561 return -EINVAL;
1562 }
1563 down_write(&qp->state_lock);
1564 if (qp->attrs.state > SIW_QP_STATE_RTR) {
1565 rv = -EINVAL;
1566 up_write(&qp->state_lock);
1567 goto error;
1568 }
1569 siw_dbg_cep(cep, "[QP %d]\n", params->qpn);
1570
1571 if (try_gso && cep->mpa.hdr.params.bits & MPA_RR_FLAG_GSO_EXP) {
1572 siw_dbg_cep(cep, "peer allows GSO on TX\n");
1573 qp->tx_ctx.gso_seg_limit = 0;
1574 }
1575 if (params->ord > sdev->attrs.max_ord ||
1576 params->ird > sdev->attrs.max_ird) {
1577 siw_dbg_cep(
1578 cep,
1579 "[QP %u]: ord %d (max %d), ird %d (max %d)\n",
1580 qp_id(qp), params->ord, sdev->attrs.max_ord,
1581 params->ird, sdev->attrs.max_ird);
1582 rv = -EINVAL;
1583 up_write(&qp->state_lock);
1584 goto error;
1585 }
1586 if (cep->enhanced_rdma_conn_est)
1587 max_priv_data -= sizeof(struct mpa_v2_data);
1588
1589 if (params->private_data_len > max_priv_data) {
1590 siw_dbg_cep(
1591 cep,
1592 "[QP %u]: private data length: %d (max %d)\n",
1593 qp_id(qp), params->private_data_len, max_priv_data);
1594 rv = -EINVAL;
1595 up_write(&qp->state_lock);
1596 goto error;
1597 }
1598 if (cep->enhanced_rdma_conn_est) {
1599 if (params->ord > cep->ord) {
1600 if (relaxed_ird_negotiation) {
1601 params->ord = cep->ord;
1602 } else {
1603 cep->ird = params->ird;
1604 cep->ord = params->ord;
1605 rv = -EINVAL;
1606 up_write(&qp->state_lock);
1607 goto error;
1608 }
1609 }
1610 if (params->ird < cep->ird) {
1611 if (relaxed_ird_negotiation &&
1612 cep->ird <= sdev->attrs.max_ird)
1613 params->ird = cep->ird;
1614 else {
1615 rv = -ENOMEM;
1616 up_write(&qp->state_lock);
1617 goto error;
1618 }
1619 }
1620 if (cep->mpa.v2_ctrl.ord &
1621 (MPA_V2_RDMA_WRITE_RTR | MPA_V2_RDMA_READ_RTR))
1622 wait_for_peer_rts = true;
1623 /*
1624 * Signal back negotiated IRD and ORD values
1625 */
1626 cep->mpa.v2_ctrl.ord =
1627 htons(params->ord & MPA_IRD_ORD_MASK) |
1628 (cep->mpa.v2_ctrl.ord & ~MPA_V2_MASK_IRD_ORD);
1629 cep->mpa.v2_ctrl.ird =
1630 htons(params->ird & MPA_IRD_ORD_MASK) |
1631 (cep->mpa.v2_ctrl.ird & ~MPA_V2_MASK_IRD_ORD);
1632 }
1633 cep->ird = params->ird;
1634 cep->ord = params->ord;
1635
1636 cep->cm_id = id;
1637 id->add_ref(id);
1638
1639 memset(&qp_attrs, 0, sizeof(qp_attrs));
1640 qp_attrs.orq_size = cep->ord;
1641 qp_attrs.irq_size = cep->ird;
1642 qp_attrs.sk = cep->sock;
1643 if (cep->mpa.hdr.params.bits & MPA_RR_FLAG_CRC)
1644 qp_attrs.flags = SIW_MPA_CRC;
1645 qp_attrs.state = SIW_QP_STATE_RTS;
1646
1647 siw_dbg_cep(cep, "[QP%u]: moving to rts\n", qp_id(qp));
1648
1649 /* Associate QP with CEP */
1650 siw_cep_get(cep);
1651 qp->cep = cep;
1652
1653 /* siw_qp_get(qp) already done by QP lookup */
1654 cep->qp = qp;
1655
1656 cep->state = SIW_EPSTATE_RDMA_MODE;
1657
1658 /* Move socket RX/TX under QP control */
1659 rv = siw_qp_modify(qp, &qp_attrs,
1660 SIW_QP_ATTR_STATE | SIW_QP_ATTR_LLP_HANDLE |
1661 SIW_QP_ATTR_ORD | SIW_QP_ATTR_IRD |
1662 SIW_QP_ATTR_MPA);
1663 up_write(&qp->state_lock);
1664
1665 if (rv)
1666 goto error;
1667
1668 siw_dbg_cep(cep, "[QP %u]: send mpa reply, %d byte pdata\n",
1669 qp_id(qp), params->private_data_len);
1670
1671 rv = siw_send_mpareqrep(cep, params->private_data,
1672 params->private_data_len);
1673 if (rv != 0)
1674 goto error;
1675
1676 if (wait_for_peer_rts) {
1677 siw_sk_assign_rtr_upcalls(cep);
1678 } else {
1679 siw_qp_socket_assoc(cep, qp);
1680 rv = siw_cm_upcall(cep, IW_CM_EVENT_ESTABLISHED, 0);
1681 if (rv)
1682 goto error;
1683 }
1684 siw_cep_set_free(cep);
1685
1686 return 0;
1687 error:
1688 siw_socket_disassoc(cep->sock);
1689 sock_release(cep->sock);
1690 cep->sock = NULL;
1691
1692 cep->state = SIW_EPSTATE_CLOSED;
1693
1694 if (cep->cm_id) {
1695 cep->cm_id->rem_ref(id);
1696 cep->cm_id = NULL;
1697 }
1698 if (qp->cep) {
1699 siw_cep_put(cep);
1700 qp->cep = NULL;
1701 }
1702 cep->qp = NULL;
1703 siw_qp_put(qp);
1704
1705 siw_cep_set_free(cep);
1706 siw_cep_put(cep);
1707
1708 return rv;
1709 }
1710
1711 /*
1712 * siw_reject()
1713 *
1714 * Local connection reject case. Send private data back to peer,
1715 * close connection and dereference connection id.
1716 */
siw_reject(struct iw_cm_id * id,const void * pdata,u8 pd_len)1717 int siw_reject(struct iw_cm_id *id, const void *pdata, u8 pd_len)
1718 {
1719 struct siw_cep *cep = (struct siw_cep *)id->provider_data;
1720
1721 siw_cep_set_inuse(cep);
1722 siw_cep_put(cep);
1723
1724 siw_cancel_mpatimer(cep);
1725
1726 if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) {
1727 siw_dbg_cep(cep, "out of state\n");
1728
1729 siw_cep_set_free(cep);
1730 siw_cep_put(cep); /* put last reference */
1731
1732 return -ECONNRESET;
1733 }
1734 siw_dbg_cep(cep, "cep->state %d, pd_len %d\n", cep->state,
1735 pd_len);
1736
1737 if (__mpa_rr_revision(cep->mpa.hdr.params.bits) >= MPA_REVISION_1) {
1738 cep->mpa.hdr.params.bits |= MPA_RR_FLAG_REJECT; /* reject */
1739 siw_send_mpareqrep(cep, pdata, pd_len);
1740 }
1741 siw_socket_disassoc(cep->sock);
1742 sock_release(cep->sock);
1743 cep->sock = NULL;
1744
1745 cep->state = SIW_EPSTATE_CLOSED;
1746
1747 siw_cep_set_free(cep);
1748 siw_cep_put(cep);
1749
1750 return 0;
1751 }
1752
1753 /*
1754 * siw_create_listen - Create resources for a listener's IWCM ID @id
1755 *
1756 * Starts listen on the socket address id->local_addr.
1757 *
1758 */
siw_create_listen(struct iw_cm_id * id,int backlog)1759 int siw_create_listen(struct iw_cm_id *id, int backlog)
1760 {
1761 struct socket *s;
1762 struct siw_cep *cep = NULL;
1763 struct siw_device *sdev = to_siw_dev(id->device);
1764 int addr_family = id->local_addr.ss_family;
1765 int rv = 0;
1766
1767 if (addr_family != AF_INET && addr_family != AF_INET6)
1768 return -EAFNOSUPPORT;
1769
1770 rv = sock_create(addr_family, SOCK_STREAM, IPPROTO_TCP, &s);
1771 if (rv < 0)
1772 return rv;
1773
1774 /*
1775 * Allow binding local port when still in TIME_WAIT from last close.
1776 */
1777 sock_set_reuseaddr(s->sk);
1778
1779 if (addr_family == AF_INET) {
1780 struct sockaddr_in *laddr = &to_sockaddr_in(id->local_addr);
1781
1782 /* For wildcard addr, limit binding to current device only */
1783 if (ipv4_is_zeronet(laddr->sin_addr.s_addr))
1784 s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
1785
1786 rv = s->ops->bind(s, (struct sockaddr *)laddr,
1787 sizeof(struct sockaddr_in));
1788 } else {
1789 struct sockaddr_in6 *laddr = &to_sockaddr_in6(id->local_addr);
1790
1791 /* For wildcard addr, limit binding to current device only */
1792 if (ipv6_addr_any(&laddr->sin6_addr))
1793 s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
1794
1795 rv = s->ops->bind(s, (struct sockaddr *)laddr,
1796 sizeof(struct sockaddr_in6));
1797 }
1798 if (rv) {
1799 siw_dbg(id->device, "socket bind error: %d\n", rv);
1800 goto error;
1801 }
1802 cep = siw_cep_alloc(sdev);
1803 if (!cep) {
1804 rv = -ENOMEM;
1805 goto error;
1806 }
1807 siw_cep_socket_assoc(cep, s);
1808
1809 rv = siw_cm_alloc_work(cep, backlog);
1810 if (rv) {
1811 siw_dbg(id->device,
1812 "alloc_work error %d, backlog %d\n",
1813 rv, backlog);
1814 goto error;
1815 }
1816 rv = s->ops->listen(s, backlog);
1817 if (rv) {
1818 siw_dbg(id->device, "listen error %d\n", rv);
1819 goto error;
1820 }
1821 cep->cm_id = id;
1822 id->add_ref(id);
1823
1824 /*
1825 * In case of a wildcard rdma_listen on a multi-homed device,
1826 * a listener's IWCM id is associated with more than one listening CEP.
1827 *
1828 * We currently use id->provider_data in three different ways:
1829 *
1830 * o For a listener's IWCM id, id->provider_data points to
1831 * the list_head of the list of listening CEPs.
1832 * Uses: siw_create_listen(), siw_destroy_listen()
1833 *
1834 * o For each accepted passive-side IWCM id, id->provider_data
1835 * points to the CEP itself. This is a consequence of
1836 * - siw_cm_upcall() setting event.provider_data = cep and
1837 * - the IWCM's cm_conn_req_handler() setting provider_data of the
1838 * new passive-side IWCM id equal to event.provider_data
1839 * Uses: siw_accept(), siw_reject()
1840 *
1841 * o For an active-side IWCM id, id->provider_data is not used at all.
1842 *
1843 */
1844 if (!id->provider_data) {
1845 id->provider_data =
1846 kmalloc(sizeof(struct list_head), GFP_KERNEL);
1847 if (!id->provider_data) {
1848 rv = -ENOMEM;
1849 goto error;
1850 }
1851 INIT_LIST_HEAD((struct list_head *)id->provider_data);
1852 }
1853 list_add_tail(&cep->listenq, (struct list_head *)id->provider_data);
1854 cep->state = SIW_EPSTATE_LISTENING;
1855
1856 siw_dbg(id->device, "Listen at laddr %pISp\n", &id->local_addr);
1857
1858 return 0;
1859
1860 error:
1861 siw_dbg(id->device, "failed: %d\n", rv);
1862
1863 if (cep) {
1864 siw_cep_set_inuse(cep);
1865
1866 if (cep->cm_id) {
1867 cep->cm_id->rem_ref(cep->cm_id);
1868 cep->cm_id = NULL;
1869 }
1870 cep->sock = NULL;
1871 siw_socket_disassoc(s);
1872 cep->state = SIW_EPSTATE_CLOSED;
1873
1874 siw_cep_set_free(cep);
1875 siw_cep_put(cep);
1876 }
1877 sock_release(s);
1878
1879 return rv;
1880 }
1881
siw_drop_listeners(struct iw_cm_id * id)1882 static void siw_drop_listeners(struct iw_cm_id *id)
1883 {
1884 struct list_head *p, *tmp;
1885
1886 /*
1887 * In case of a wildcard rdma_listen on a multi-homed device,
1888 * a listener's IWCM id is associated with more than one listening CEP.
1889 */
1890 list_for_each_safe(p, tmp, (struct list_head *)id->provider_data) {
1891 struct siw_cep *cep = list_entry(p, struct siw_cep, listenq);
1892
1893 list_del(p);
1894
1895 siw_dbg_cep(cep, "drop cep, state %d\n", cep->state);
1896
1897 siw_cep_set_inuse(cep);
1898
1899 if (cep->cm_id) {
1900 cep->cm_id->rem_ref(cep->cm_id);
1901 cep->cm_id = NULL;
1902 }
1903 if (cep->sock) {
1904 siw_socket_disassoc(cep->sock);
1905 sock_release(cep->sock);
1906 cep->sock = NULL;
1907 }
1908 cep->state = SIW_EPSTATE_CLOSED;
1909 siw_cep_set_free(cep);
1910 siw_cep_put(cep);
1911 }
1912 }
1913
siw_destroy_listen(struct iw_cm_id * id)1914 int siw_destroy_listen(struct iw_cm_id *id)
1915 {
1916 if (!id->provider_data) {
1917 siw_dbg(id->device, "no cep(s)\n");
1918 return 0;
1919 }
1920 siw_drop_listeners(id);
1921 kfree(id->provider_data);
1922 id->provider_data = NULL;
1923
1924 return 0;
1925 }
1926
siw_cm_init(void)1927 int siw_cm_init(void)
1928 {
1929 /*
1930 * create_single_workqueue for strict ordering
1931 */
1932 siw_cm_wq = create_singlethread_workqueue("siw_cm_wq");
1933 if (!siw_cm_wq)
1934 return -ENOMEM;
1935
1936 return 0;
1937 }
1938
siw_cm_exit(void)1939 void siw_cm_exit(void)
1940 {
1941 if (siw_cm_wq) {
1942 flush_workqueue(siw_cm_wq);
1943 destroy_workqueue(siw_cm_wq);
1944 }
1945 }
1946