1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * RDMA Transport Layer
4 *
5 * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
6 * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
7 * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
8 */
9
10 #undef pr_fmt
11 #define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
12
13 #include <linux/module.h>
14 #include <linux/rculist.h>
15 #include <linux/random.h>
16
17 #include "rtrs-clt.h"
18 #include "rtrs-log.h"
19
20 #define RTRS_CONNECT_TIMEOUT_MS 30000
21 /*
22 * Wait a bit before trying to reconnect after a failure
23 * in order to give server time to finish clean up which
24 * leads to "false positives" failed reconnect attempts
25 */
26 #define RTRS_RECONNECT_BACKOFF 1000
27 /*
28 * Wait for additional random time between 0 and 8 seconds
29 * before starting to reconnect to avoid clients reconnecting
30 * all at once in case of a major network outage
31 */
32 #define RTRS_RECONNECT_SEED 8
33
34 #define FIRST_CONN 0x01
35
36 MODULE_DESCRIPTION("RDMA Transport Client");
37 MODULE_LICENSE("GPL");
38
39 static const struct rtrs_rdma_dev_pd_ops dev_pd_ops;
40 static struct rtrs_rdma_dev_pd dev_pd = {
41 .ops = &dev_pd_ops
42 };
43
44 static struct workqueue_struct *rtrs_wq;
45 static struct class *rtrs_clt_dev_class;
46
rtrs_clt_is_connected(const struct rtrs_clt * clt)47 static inline bool rtrs_clt_is_connected(const struct rtrs_clt *clt)
48 {
49 struct rtrs_clt_sess *sess;
50 bool connected = false;
51
52 rcu_read_lock();
53 list_for_each_entry_rcu(sess, &clt->paths_list, s.entry)
54 connected |= READ_ONCE(sess->state) == RTRS_CLT_CONNECTED;
55 rcu_read_unlock();
56
57 return connected;
58 }
59
60 static struct rtrs_permit *
__rtrs_get_permit(struct rtrs_clt * clt,enum rtrs_clt_con_type con_type)61 __rtrs_get_permit(struct rtrs_clt *clt, enum rtrs_clt_con_type con_type)
62 {
63 size_t max_depth = clt->queue_depth;
64 struct rtrs_permit *permit;
65 int bit;
66
67 /*
68 * Adapted from null_blk get_tag(). Callers from different cpus may
69 * grab the same bit, since find_first_zero_bit is not atomic.
70 * But then the test_and_set_bit_lock will fail for all the
71 * callers but one, so that they will loop again.
72 * This way an explicit spinlock is not required.
73 */
74 do {
75 bit = find_first_zero_bit(clt->permits_map, max_depth);
76 if (unlikely(bit >= max_depth))
77 return NULL;
78 } while (unlikely(test_and_set_bit_lock(bit, clt->permits_map)));
79
80 permit = get_permit(clt, bit);
81 WARN_ON(permit->mem_id != bit);
82 permit->cpu_id = raw_smp_processor_id();
83 permit->con_type = con_type;
84
85 return permit;
86 }
87
__rtrs_put_permit(struct rtrs_clt * clt,struct rtrs_permit * permit)88 static inline void __rtrs_put_permit(struct rtrs_clt *clt,
89 struct rtrs_permit *permit)
90 {
91 clear_bit_unlock(permit->mem_id, clt->permits_map);
92 }
93
94 /**
95 * rtrs_clt_get_permit() - allocates permit for future RDMA operation
96 * @clt: Current session
97 * @con_type: Type of connection to use with the permit
98 * @can_wait: Wait type
99 *
100 * Description:
101 * Allocates permit for the following RDMA operation. Permit is used
102 * to preallocate all resources and to propagate memory pressure
103 * up earlier.
104 *
105 * Context:
106 * Can sleep if @wait == RTRS_TAG_WAIT
107 */
rtrs_clt_get_permit(struct rtrs_clt * clt,enum rtrs_clt_con_type con_type,int can_wait)108 struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt *clt,
109 enum rtrs_clt_con_type con_type,
110 int can_wait)
111 {
112 struct rtrs_permit *permit;
113 DEFINE_WAIT(wait);
114
115 permit = __rtrs_get_permit(clt, con_type);
116 if (likely(permit) || !can_wait)
117 return permit;
118
119 do {
120 prepare_to_wait(&clt->permits_wait, &wait,
121 TASK_UNINTERRUPTIBLE);
122 permit = __rtrs_get_permit(clt, con_type);
123 if (likely(permit))
124 break;
125
126 io_schedule();
127 } while (1);
128
129 finish_wait(&clt->permits_wait, &wait);
130
131 return permit;
132 }
133 EXPORT_SYMBOL(rtrs_clt_get_permit);
134
135 /**
136 * rtrs_clt_put_permit() - puts allocated permit
137 * @clt: Current session
138 * @permit: Permit to be freed
139 *
140 * Context:
141 * Does not matter
142 */
rtrs_clt_put_permit(struct rtrs_clt * clt,struct rtrs_permit * permit)143 void rtrs_clt_put_permit(struct rtrs_clt *clt, struct rtrs_permit *permit)
144 {
145 if (WARN_ON(!test_bit(permit->mem_id, clt->permits_map)))
146 return;
147
148 __rtrs_put_permit(clt, permit);
149
150 /*
151 * rtrs_clt_get_permit() adds itself to the &clt->permits_wait list
152 * before calling schedule(). So if rtrs_clt_get_permit() is sleeping
153 * it must have added itself to &clt->permits_wait before
154 * __rtrs_put_permit() finished.
155 * Hence it is safe to guard wake_up() with a waitqueue_active() test.
156 */
157 if (waitqueue_active(&clt->permits_wait))
158 wake_up(&clt->permits_wait);
159 }
160 EXPORT_SYMBOL(rtrs_clt_put_permit);
161
rtrs_permit_to_pdu(struct rtrs_permit * permit)162 void *rtrs_permit_to_pdu(struct rtrs_permit *permit)
163 {
164 return permit + 1;
165 }
166 EXPORT_SYMBOL(rtrs_permit_to_pdu);
167
168 /**
169 * rtrs_permit_to_clt_con() - returns RDMA connection pointer by the permit
170 * @sess: client session pointer
171 * @permit: permit for the allocation of the RDMA buffer
172 * Note:
173 * IO connection starts from 1.
174 * 0 connection is for user messages.
175 */
176 static
rtrs_permit_to_clt_con(struct rtrs_clt_sess * sess,struct rtrs_permit * permit)177 struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_sess *sess,
178 struct rtrs_permit *permit)
179 {
180 int id = 0;
181
182 if (likely(permit->con_type == RTRS_IO_CON))
183 id = (permit->cpu_id % (sess->s.con_num - 1)) + 1;
184
185 return to_clt_con(sess->s.con[id]);
186 }
187
188 /**
189 * __rtrs_clt_change_state() - change the session state through session state
190 * machine.
191 *
192 * @sess: client session to change the state of.
193 * @new_state: state to change to.
194 *
195 * returns true if successful, false if the requested state can not be set.
196 *
197 * Locks:
198 * state_wq lock must be hold.
199 */
__rtrs_clt_change_state(struct rtrs_clt_sess * sess,enum rtrs_clt_state new_state)200 static bool __rtrs_clt_change_state(struct rtrs_clt_sess *sess,
201 enum rtrs_clt_state new_state)
202 {
203 enum rtrs_clt_state old_state;
204 bool changed = false;
205
206 lockdep_assert_held(&sess->state_wq.lock);
207
208 old_state = sess->state;
209 switch (new_state) {
210 case RTRS_CLT_CONNECTING:
211 switch (old_state) {
212 case RTRS_CLT_RECONNECTING:
213 changed = true;
214 fallthrough;
215 default:
216 break;
217 }
218 break;
219 case RTRS_CLT_RECONNECTING:
220 switch (old_state) {
221 case RTRS_CLT_CONNECTED:
222 case RTRS_CLT_CONNECTING_ERR:
223 case RTRS_CLT_CLOSED:
224 changed = true;
225 fallthrough;
226 default:
227 break;
228 }
229 break;
230 case RTRS_CLT_CONNECTED:
231 switch (old_state) {
232 case RTRS_CLT_CONNECTING:
233 changed = true;
234 fallthrough;
235 default:
236 break;
237 }
238 break;
239 case RTRS_CLT_CONNECTING_ERR:
240 switch (old_state) {
241 case RTRS_CLT_CONNECTING:
242 changed = true;
243 fallthrough;
244 default:
245 break;
246 }
247 break;
248 case RTRS_CLT_CLOSING:
249 switch (old_state) {
250 case RTRS_CLT_CONNECTING:
251 case RTRS_CLT_CONNECTING_ERR:
252 case RTRS_CLT_RECONNECTING:
253 case RTRS_CLT_CONNECTED:
254 changed = true;
255 fallthrough;
256 default:
257 break;
258 }
259 break;
260 case RTRS_CLT_CLOSED:
261 switch (old_state) {
262 case RTRS_CLT_CLOSING:
263 changed = true;
264 fallthrough;
265 default:
266 break;
267 }
268 break;
269 case RTRS_CLT_DEAD:
270 switch (old_state) {
271 case RTRS_CLT_CLOSED:
272 changed = true;
273 fallthrough;
274 default:
275 break;
276 }
277 break;
278 default:
279 break;
280 }
281 if (changed) {
282 sess->state = new_state;
283 wake_up_locked(&sess->state_wq);
284 }
285
286 return changed;
287 }
288
rtrs_clt_change_state_from_to(struct rtrs_clt_sess * sess,enum rtrs_clt_state old_state,enum rtrs_clt_state new_state)289 static bool rtrs_clt_change_state_from_to(struct rtrs_clt_sess *sess,
290 enum rtrs_clt_state old_state,
291 enum rtrs_clt_state new_state)
292 {
293 bool changed = false;
294
295 spin_lock_irq(&sess->state_wq.lock);
296 if (sess->state == old_state)
297 changed = __rtrs_clt_change_state(sess, new_state);
298 spin_unlock_irq(&sess->state_wq.lock);
299
300 return changed;
301 }
302
rtrs_rdma_error_recovery(struct rtrs_clt_con * con)303 static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con)
304 {
305 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
306
307 if (rtrs_clt_change_state_from_to(sess,
308 RTRS_CLT_CONNECTED,
309 RTRS_CLT_RECONNECTING)) {
310 struct rtrs_clt *clt = sess->clt;
311 unsigned int delay_ms;
312
313 /*
314 * Normal scenario, reconnect if we were successfully connected
315 */
316 delay_ms = clt->reconnect_delay_sec * 1000;
317 queue_delayed_work(rtrs_wq, &sess->reconnect_dwork,
318 msecs_to_jiffies(delay_ms +
319 prandom_u32() % RTRS_RECONNECT_SEED));
320 } else {
321 /*
322 * Error can happen just on establishing new connection,
323 * so notify waiter with error state, waiter is responsible
324 * for cleaning the rest and reconnect if needed.
325 */
326 rtrs_clt_change_state_from_to(sess,
327 RTRS_CLT_CONNECTING,
328 RTRS_CLT_CONNECTING_ERR);
329 }
330 }
331
rtrs_clt_fast_reg_done(struct ib_cq * cq,struct ib_wc * wc)332 static void rtrs_clt_fast_reg_done(struct ib_cq *cq, struct ib_wc *wc)
333 {
334 struct rtrs_clt_con *con = cq->cq_context;
335
336 if (unlikely(wc->status != IB_WC_SUCCESS)) {
337 rtrs_err(con->c.sess, "Failed IB_WR_REG_MR: %s\n",
338 ib_wc_status_msg(wc->status));
339 rtrs_rdma_error_recovery(con);
340 }
341 }
342
343 static struct ib_cqe fast_reg_cqe = {
344 .done = rtrs_clt_fast_reg_done
345 };
346
347 static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
348 bool notify, bool can_wait);
349
rtrs_clt_inv_rkey_done(struct ib_cq * cq,struct ib_wc * wc)350 static void rtrs_clt_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
351 {
352 struct rtrs_clt_io_req *req =
353 container_of(wc->wr_cqe, typeof(*req), inv_cqe);
354 struct rtrs_clt_con *con = cq->cq_context;
355
356 if (unlikely(wc->status != IB_WC_SUCCESS)) {
357 rtrs_err(con->c.sess, "Failed IB_WR_LOCAL_INV: %s\n",
358 ib_wc_status_msg(wc->status));
359 rtrs_rdma_error_recovery(con);
360 }
361 req->need_inv = false;
362 if (likely(req->need_inv_comp))
363 complete(&req->inv_comp);
364 else
365 /* Complete request from INV callback */
366 complete_rdma_req(req, req->inv_errno, true, false);
367 }
368
rtrs_inv_rkey(struct rtrs_clt_io_req * req)369 static int rtrs_inv_rkey(struct rtrs_clt_io_req *req)
370 {
371 struct rtrs_clt_con *con = req->con;
372 struct ib_send_wr wr = {
373 .opcode = IB_WR_LOCAL_INV,
374 .wr_cqe = &req->inv_cqe,
375 .send_flags = IB_SEND_SIGNALED,
376 .ex.invalidate_rkey = req->mr->rkey,
377 };
378 req->inv_cqe.done = rtrs_clt_inv_rkey_done;
379
380 return ib_post_send(con->c.qp, &wr, NULL);
381 }
382
complete_rdma_req(struct rtrs_clt_io_req * req,int errno,bool notify,bool can_wait)383 static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
384 bool notify, bool can_wait)
385 {
386 struct rtrs_clt_con *con = req->con;
387 struct rtrs_clt_sess *sess;
388 int err;
389
390 if (!req->in_use)
391 return;
392 if (WARN_ON(!req->con))
393 return;
394 sess = to_clt_sess(con->c.sess);
395
396 if (req->sg_cnt) {
397 if (unlikely(req->dir == DMA_FROM_DEVICE && req->need_inv)) {
398 /*
399 * We are here to invalidate read requests
400 * ourselves. In normal scenario server should
401 * send INV for all read requests, but
402 * we are here, thus two things could happen:
403 *
404 * 1. this is failover, when errno != 0
405 * and can_wait == 1,
406 *
407 * 2. something totally bad happened and
408 * server forgot to send INV, so we
409 * should do that ourselves.
410 */
411
412 if (likely(can_wait)) {
413 req->need_inv_comp = true;
414 } else {
415 /* This should be IO path, so always notify */
416 WARN_ON(!notify);
417 /* Save errno for INV callback */
418 req->inv_errno = errno;
419 }
420
421 err = rtrs_inv_rkey(req);
422 if (unlikely(err)) {
423 rtrs_err(con->c.sess, "Send INV WR key=%#x: %d\n",
424 req->mr->rkey, err);
425 } else if (likely(can_wait)) {
426 wait_for_completion(&req->inv_comp);
427 } else {
428 /*
429 * Something went wrong, so request will be
430 * completed from INV callback.
431 */
432 WARN_ON_ONCE(1);
433
434 return;
435 }
436 }
437 ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist,
438 req->sg_cnt, req->dir);
439 }
440 if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
441 atomic_dec(&sess->stats->inflight);
442
443 req->in_use = false;
444 req->con = NULL;
445
446 if (notify)
447 req->conf(req->priv, errno);
448 }
449
rtrs_post_send_rdma(struct rtrs_clt_con * con,struct rtrs_clt_io_req * req,struct rtrs_rbuf * rbuf,u32 off,u32 imm,struct ib_send_wr * wr)450 static int rtrs_post_send_rdma(struct rtrs_clt_con *con,
451 struct rtrs_clt_io_req *req,
452 struct rtrs_rbuf *rbuf, u32 off,
453 u32 imm, struct ib_send_wr *wr)
454 {
455 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
456 enum ib_send_flags flags;
457 struct ib_sge sge;
458
459 if (unlikely(!req->sg_size)) {
460 rtrs_wrn(con->c.sess,
461 "Doing RDMA Write failed, no data supplied\n");
462 return -EINVAL;
463 }
464
465 /* user data and user message in the first list element */
466 sge.addr = req->iu->dma_addr;
467 sge.length = req->sg_size;
468 sge.lkey = sess->s.dev->ib_pd->local_dma_lkey;
469
470 /*
471 * From time to time we have to post signalled sends,
472 * or send queue will fill up and only QP reset can help.
473 */
474 flags = atomic_inc_return(&con->io_cnt) % sess->queue_depth ?
475 0 : IB_SEND_SIGNALED;
476
477 ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr,
478 req->sg_size, DMA_TO_DEVICE);
479
480 return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, &sge, 1,
481 rbuf->rkey, rbuf->addr + off,
482 imm, flags, wr);
483 }
484
process_io_rsp(struct rtrs_clt_sess * sess,u32 msg_id,s16 errno,bool w_inval)485 static void process_io_rsp(struct rtrs_clt_sess *sess, u32 msg_id,
486 s16 errno, bool w_inval)
487 {
488 struct rtrs_clt_io_req *req;
489
490 if (WARN_ON(msg_id >= sess->queue_depth))
491 return;
492
493 req = &sess->reqs[msg_id];
494 /* Drop need_inv if server responded with send with invalidation */
495 req->need_inv &= !w_inval;
496 complete_rdma_req(req, errno, true, false);
497 }
498
rtrs_clt_recv_done(struct rtrs_clt_con * con,struct ib_wc * wc)499 static void rtrs_clt_recv_done(struct rtrs_clt_con *con, struct ib_wc *wc)
500 {
501 struct rtrs_iu *iu;
502 int err;
503 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
504
505 WARN_ON((sess->flags & RTRS_MSG_NEW_RKEY_F) == 0);
506 iu = container_of(wc->wr_cqe, struct rtrs_iu,
507 cqe);
508 err = rtrs_iu_post_recv(&con->c, iu);
509 if (unlikely(err)) {
510 rtrs_err(con->c.sess, "post iu failed %d\n", err);
511 rtrs_rdma_error_recovery(con);
512 }
513 }
514
rtrs_clt_rkey_rsp_done(struct rtrs_clt_con * con,struct ib_wc * wc)515 static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc)
516 {
517 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
518 struct rtrs_msg_rkey_rsp *msg;
519 u32 imm_type, imm_payload;
520 bool w_inval = false;
521 struct rtrs_iu *iu;
522 u32 buf_id;
523 int err;
524
525 WARN_ON((sess->flags & RTRS_MSG_NEW_RKEY_F) == 0);
526
527 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
528
529 if (unlikely(wc->byte_len < sizeof(*msg))) {
530 rtrs_err(con->c.sess, "rkey response is malformed: size %d\n",
531 wc->byte_len);
532 goto out;
533 }
534 ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr,
535 iu->size, DMA_FROM_DEVICE);
536 msg = iu->buf;
537 if (unlikely(le16_to_cpu(msg->type) != RTRS_MSG_RKEY_RSP)) {
538 rtrs_err(sess->clt, "rkey response is malformed: type %d\n",
539 le16_to_cpu(msg->type));
540 goto out;
541 }
542 buf_id = le16_to_cpu(msg->buf_id);
543 if (WARN_ON(buf_id >= sess->queue_depth))
544 goto out;
545
546 rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), &imm_type, &imm_payload);
547 if (likely(imm_type == RTRS_IO_RSP_IMM ||
548 imm_type == RTRS_IO_RSP_W_INV_IMM)) {
549 u32 msg_id;
550
551 w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM);
552 rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err);
553
554 if (WARN_ON(buf_id != msg_id))
555 goto out;
556 sess->rbufs[buf_id].rkey = le32_to_cpu(msg->rkey);
557 process_io_rsp(sess, msg_id, err, w_inval);
558 }
559 ib_dma_sync_single_for_device(sess->s.dev->ib_dev, iu->dma_addr,
560 iu->size, DMA_FROM_DEVICE);
561 return rtrs_clt_recv_done(con, wc);
562 out:
563 rtrs_rdma_error_recovery(con);
564 }
565
566 static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc);
567
568 static struct ib_cqe io_comp_cqe = {
569 .done = rtrs_clt_rdma_done
570 };
571
572 /*
573 * Post x2 empty WRs: first is for this RDMA with IMM,
574 * second is for RECV with INV, which happened earlier.
575 */
rtrs_post_recv_empty_x2(struct rtrs_con * con,struct ib_cqe * cqe)576 static int rtrs_post_recv_empty_x2(struct rtrs_con *con, struct ib_cqe *cqe)
577 {
578 struct ib_recv_wr wr_arr[2], *wr;
579 int i;
580
581 memset(wr_arr, 0, sizeof(wr_arr));
582 for (i = 0; i < ARRAY_SIZE(wr_arr); i++) {
583 wr = &wr_arr[i];
584 wr->wr_cqe = cqe;
585 if (i)
586 /* Chain backwards */
587 wr->next = &wr_arr[i - 1];
588 }
589
590 return ib_post_recv(con->qp, wr, NULL);
591 }
592
rtrs_clt_rdma_done(struct ib_cq * cq,struct ib_wc * wc)593 static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
594 {
595 struct rtrs_clt_con *con = cq->cq_context;
596 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
597 u32 imm_type, imm_payload;
598 bool w_inval = false;
599 int err;
600
601 if (unlikely(wc->status != IB_WC_SUCCESS)) {
602 if (wc->status != IB_WC_WR_FLUSH_ERR) {
603 rtrs_err(sess->clt, "RDMA failed: %s\n",
604 ib_wc_status_msg(wc->status));
605 rtrs_rdma_error_recovery(con);
606 }
607 return;
608 }
609 rtrs_clt_update_wc_stats(con);
610
611 switch (wc->opcode) {
612 case IB_WC_RECV_RDMA_WITH_IMM:
613 /*
614 * post_recv() RDMA write completions of IO reqs (read/write)
615 * and hb
616 */
617 if (WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done))
618 return;
619 rtrs_from_imm(be32_to_cpu(wc->ex.imm_data),
620 &imm_type, &imm_payload);
621 if (likely(imm_type == RTRS_IO_RSP_IMM ||
622 imm_type == RTRS_IO_RSP_W_INV_IMM)) {
623 u32 msg_id;
624
625 w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM);
626 rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err);
627
628 process_io_rsp(sess, msg_id, err, w_inval);
629 } else if (imm_type == RTRS_HB_MSG_IMM) {
630 WARN_ON(con->c.cid);
631 rtrs_send_hb_ack(&sess->s);
632 if (sess->flags & RTRS_MSG_NEW_RKEY_F)
633 return rtrs_clt_recv_done(con, wc);
634 } else if (imm_type == RTRS_HB_ACK_IMM) {
635 WARN_ON(con->c.cid);
636 sess->s.hb_missed_cnt = 0;
637 if (sess->flags & RTRS_MSG_NEW_RKEY_F)
638 return rtrs_clt_recv_done(con, wc);
639 } else {
640 rtrs_wrn(con->c.sess, "Unknown IMM type %u\n",
641 imm_type);
642 }
643 if (w_inval)
644 /*
645 * Post x2 empty WRs: first is for this RDMA with IMM,
646 * second is for RECV with INV, which happened earlier.
647 */
648 err = rtrs_post_recv_empty_x2(&con->c, &io_comp_cqe);
649 else
650 err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
651 if (unlikely(err)) {
652 rtrs_err(con->c.sess, "rtrs_post_recv_empty(): %d\n",
653 err);
654 rtrs_rdma_error_recovery(con);
655 break;
656 }
657 break;
658 case IB_WC_RECV:
659 /*
660 * Key invalidations from server side
661 */
662 WARN_ON(!(wc->wc_flags & IB_WC_WITH_INVALIDATE ||
663 wc->wc_flags & IB_WC_WITH_IMM));
664 WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done);
665 if (sess->flags & RTRS_MSG_NEW_RKEY_F) {
666 if (wc->wc_flags & IB_WC_WITH_INVALIDATE)
667 return rtrs_clt_recv_done(con, wc);
668
669 return rtrs_clt_rkey_rsp_done(con, wc);
670 }
671 break;
672 case IB_WC_RDMA_WRITE:
673 /*
674 * post_send() RDMA write completions of IO reqs (read/write)
675 */
676 break;
677
678 default:
679 rtrs_wrn(sess->clt, "Unexpected WC type: %d\n", wc->opcode);
680 return;
681 }
682 }
683
post_recv_io(struct rtrs_clt_con * con,size_t q_size)684 static int post_recv_io(struct rtrs_clt_con *con, size_t q_size)
685 {
686 int err, i;
687 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
688
689 for (i = 0; i < q_size; i++) {
690 if (sess->flags & RTRS_MSG_NEW_RKEY_F) {
691 struct rtrs_iu *iu = &con->rsp_ius[i];
692
693 err = rtrs_iu_post_recv(&con->c, iu);
694 } else {
695 err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
696 }
697 if (unlikely(err))
698 return err;
699 }
700
701 return 0;
702 }
703
post_recv_sess(struct rtrs_clt_sess * sess)704 static int post_recv_sess(struct rtrs_clt_sess *sess)
705 {
706 size_t q_size = 0;
707 int err, cid;
708
709 for (cid = 0; cid < sess->s.con_num; cid++) {
710 if (cid == 0)
711 q_size = SERVICE_CON_QUEUE_DEPTH;
712 else
713 q_size = sess->queue_depth;
714
715 /*
716 * x2 for RDMA read responses + FR key invalidations,
717 * RDMA writes do not require any FR registrations.
718 */
719 q_size *= 2;
720
721 err = post_recv_io(to_clt_con(sess->s.con[cid]), q_size);
722 if (unlikely(err)) {
723 rtrs_err(sess->clt, "post_recv_io(), err: %d\n", err);
724 return err;
725 }
726 }
727
728 return 0;
729 }
730
731 struct path_it {
732 int i;
733 struct list_head skip_list;
734 struct rtrs_clt *clt;
735 struct rtrs_clt_sess *(*next_path)(struct path_it *it);
736 };
737
738 /**
739 * list_next_or_null_rr_rcu - get next list element in round-robin fashion.
740 * @head: the head for the list.
741 * @ptr: the list head to take the next element from.
742 * @type: the type of the struct this is embedded in.
743 * @memb: the name of the list_head within the struct.
744 *
745 * Next element returned in round-robin fashion, i.e. head will be skipped,
746 * but if list is observed as empty, NULL will be returned.
747 *
748 * This primitive may safely run concurrently with the _rcu list-mutation
749 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
750 */
751 #define list_next_or_null_rr_rcu(head, ptr, type, memb) \
752 ({ \
753 list_next_or_null_rcu(head, ptr, type, memb) ?: \
754 list_next_or_null_rcu(head, READ_ONCE((ptr)->next), \
755 type, memb); \
756 })
757
758 /**
759 * get_next_path_rr() - Returns path in round-robin fashion.
760 * @it: the path pointer
761 *
762 * Related to @MP_POLICY_RR
763 *
764 * Locks:
765 * rcu_read_lock() must be hold.
766 */
get_next_path_rr(struct path_it * it)767 static struct rtrs_clt_sess *get_next_path_rr(struct path_it *it)
768 {
769 struct rtrs_clt_sess __rcu **ppcpu_path;
770 struct rtrs_clt_sess *path;
771 struct rtrs_clt *clt;
772
773 clt = it->clt;
774
775 /*
776 * Here we use two RCU objects: @paths_list and @pcpu_path
777 * pointer. See rtrs_clt_remove_path_from_arr() for details
778 * how that is handled.
779 */
780
781 ppcpu_path = this_cpu_ptr(clt->pcpu_path);
782 path = rcu_dereference(*ppcpu_path);
783 if (unlikely(!path))
784 path = list_first_or_null_rcu(&clt->paths_list,
785 typeof(*path), s.entry);
786 else
787 path = list_next_or_null_rr_rcu(&clt->paths_list,
788 &path->s.entry,
789 typeof(*path),
790 s.entry);
791 rcu_assign_pointer(*ppcpu_path, path);
792
793 return path;
794 }
795
796 /**
797 * get_next_path_min_inflight() - Returns path with minimal inflight count.
798 * @it: the path pointer
799 *
800 * Related to @MP_POLICY_MIN_INFLIGHT
801 *
802 * Locks:
803 * rcu_read_lock() must be hold.
804 */
get_next_path_min_inflight(struct path_it * it)805 static struct rtrs_clt_sess *get_next_path_min_inflight(struct path_it *it)
806 {
807 struct rtrs_clt_sess *min_path = NULL;
808 struct rtrs_clt *clt = it->clt;
809 struct rtrs_clt_sess *sess;
810 int min_inflight = INT_MAX;
811 int inflight;
812
813 list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) {
814 if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED))
815 continue;
816
817 if (unlikely(!list_empty(raw_cpu_ptr(sess->mp_skip_entry))))
818 continue;
819
820 inflight = atomic_read(&sess->stats->inflight);
821
822 if (inflight < min_inflight) {
823 min_inflight = inflight;
824 min_path = sess;
825 }
826 }
827
828 /*
829 * add the path to the skip list, so that next time we can get
830 * a different one
831 */
832 if (min_path)
833 list_add(raw_cpu_ptr(min_path->mp_skip_entry), &it->skip_list);
834
835 return min_path;
836 }
837
path_it_init(struct path_it * it,struct rtrs_clt * clt)838 static inline void path_it_init(struct path_it *it, struct rtrs_clt *clt)
839 {
840 INIT_LIST_HEAD(&it->skip_list);
841 it->clt = clt;
842 it->i = 0;
843
844 if (clt->mp_policy == MP_POLICY_RR)
845 it->next_path = get_next_path_rr;
846 else
847 it->next_path = get_next_path_min_inflight;
848 }
849
path_it_deinit(struct path_it * it)850 static inline void path_it_deinit(struct path_it *it)
851 {
852 struct list_head *skip, *tmp;
853 /*
854 * The skip_list is used only for the MIN_INFLIGHT policy.
855 * We need to remove paths from it, so that next IO can insert
856 * paths (->mp_skip_entry) into a skip_list again.
857 */
858 list_for_each_safe(skip, tmp, &it->skip_list)
859 list_del_init(skip);
860 }
861
862 /**
863 * rtrs_clt_init_req() Initialize an rtrs_clt_io_req holding information
864 * about an inflight IO.
865 * The user buffer holding user control message (not data) is copied into
866 * the corresponding buffer of rtrs_iu (req->iu->buf), which later on will
867 * also hold the control message of rtrs.
868 * @req: an io request holding information about IO.
869 * @sess: client session
870 * @conf: conformation callback function to notify upper layer.
871 * @permit: permit for allocation of RDMA remote buffer
872 * @priv: private pointer
873 * @vec: kernel vector containing control message
874 * @usr_len: length of the user message
875 * @sg: scater list for IO data
876 * @sg_cnt: number of scater list entries
877 * @data_len: length of the IO data
878 * @dir: direction of the IO.
879 */
rtrs_clt_init_req(struct rtrs_clt_io_req * req,struct rtrs_clt_sess * sess,void (* conf)(void * priv,int errno),struct rtrs_permit * permit,void * priv,const struct kvec * vec,size_t usr_len,struct scatterlist * sg,size_t sg_cnt,size_t data_len,int dir)880 static void rtrs_clt_init_req(struct rtrs_clt_io_req *req,
881 struct rtrs_clt_sess *sess,
882 void (*conf)(void *priv, int errno),
883 struct rtrs_permit *permit, void *priv,
884 const struct kvec *vec, size_t usr_len,
885 struct scatterlist *sg, size_t sg_cnt,
886 size_t data_len, int dir)
887 {
888 struct iov_iter iter;
889 size_t len;
890
891 req->permit = permit;
892 req->in_use = true;
893 req->usr_len = usr_len;
894 req->data_len = data_len;
895 req->sglist = sg;
896 req->sg_cnt = sg_cnt;
897 req->priv = priv;
898 req->dir = dir;
899 req->con = rtrs_permit_to_clt_con(sess, permit);
900 req->conf = conf;
901 req->need_inv = false;
902 req->need_inv_comp = false;
903 req->inv_errno = 0;
904
905 iov_iter_kvec(&iter, WRITE, vec, 1, usr_len);
906 len = _copy_from_iter(req->iu->buf, usr_len, &iter);
907 WARN_ON(len != usr_len);
908
909 reinit_completion(&req->inv_comp);
910 }
911
912 static struct rtrs_clt_io_req *
rtrs_clt_get_req(struct rtrs_clt_sess * sess,void (* conf)(void * priv,int errno),struct rtrs_permit * permit,void * priv,const struct kvec * vec,size_t usr_len,struct scatterlist * sg,size_t sg_cnt,size_t data_len,int dir)913 rtrs_clt_get_req(struct rtrs_clt_sess *sess,
914 void (*conf)(void *priv, int errno),
915 struct rtrs_permit *permit, void *priv,
916 const struct kvec *vec, size_t usr_len,
917 struct scatterlist *sg, size_t sg_cnt,
918 size_t data_len, int dir)
919 {
920 struct rtrs_clt_io_req *req;
921
922 req = &sess->reqs[permit->mem_id];
923 rtrs_clt_init_req(req, sess, conf, permit, priv, vec, usr_len,
924 sg, sg_cnt, data_len, dir);
925 return req;
926 }
927
928 static struct rtrs_clt_io_req *
rtrs_clt_get_copy_req(struct rtrs_clt_sess * alive_sess,struct rtrs_clt_io_req * fail_req)929 rtrs_clt_get_copy_req(struct rtrs_clt_sess *alive_sess,
930 struct rtrs_clt_io_req *fail_req)
931 {
932 struct rtrs_clt_io_req *req;
933 struct kvec vec = {
934 .iov_base = fail_req->iu->buf,
935 .iov_len = fail_req->usr_len
936 };
937
938 req = &alive_sess->reqs[fail_req->permit->mem_id];
939 rtrs_clt_init_req(req, alive_sess, fail_req->conf, fail_req->permit,
940 fail_req->priv, &vec, fail_req->usr_len,
941 fail_req->sglist, fail_req->sg_cnt,
942 fail_req->data_len, fail_req->dir);
943 return req;
944 }
945
rtrs_post_rdma_write_sg(struct rtrs_clt_con * con,struct rtrs_clt_io_req * req,struct rtrs_rbuf * rbuf,u32 size,u32 imm)946 static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
947 struct rtrs_clt_io_req *req,
948 struct rtrs_rbuf *rbuf,
949 u32 size, u32 imm)
950 {
951 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
952 struct ib_sge *sge = req->sge;
953 enum ib_send_flags flags;
954 struct scatterlist *sg;
955 size_t num_sge;
956 int i;
957
958 for_each_sg(req->sglist, sg, req->sg_cnt, i) {
959 sge[i].addr = sg_dma_address(sg);
960 sge[i].length = sg_dma_len(sg);
961 sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey;
962 }
963 sge[i].addr = req->iu->dma_addr;
964 sge[i].length = size;
965 sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey;
966
967 num_sge = 1 + req->sg_cnt;
968
969 /*
970 * From time to time we have to post signalled sends,
971 * or send queue will fill up and only QP reset can help.
972 */
973 flags = atomic_inc_return(&con->io_cnt) % sess->queue_depth ?
974 0 : IB_SEND_SIGNALED;
975
976 ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr,
977 size, DMA_TO_DEVICE);
978
979 return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, sge, num_sge,
980 rbuf->rkey, rbuf->addr, imm,
981 flags, NULL);
982 }
983
rtrs_clt_write_req(struct rtrs_clt_io_req * req)984 static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
985 {
986 struct rtrs_clt_con *con = req->con;
987 struct rtrs_sess *s = con->c.sess;
988 struct rtrs_clt_sess *sess = to_clt_sess(s);
989 struct rtrs_msg_rdma_write *msg;
990
991 struct rtrs_rbuf *rbuf;
992 int ret, count = 0;
993 u32 imm, buf_id;
994
995 const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
996
997 if (unlikely(tsize > sess->chunk_size)) {
998 rtrs_wrn(s, "Write request failed, size too big %zu > %d\n",
999 tsize, sess->chunk_size);
1000 return -EMSGSIZE;
1001 }
1002 if (req->sg_cnt) {
1003 count = ib_dma_map_sg(sess->s.dev->ib_dev, req->sglist,
1004 req->sg_cnt, req->dir);
1005 if (unlikely(!count)) {
1006 rtrs_wrn(s, "Write request failed, map failed\n");
1007 return -EINVAL;
1008 }
1009 }
1010 /* put rtrs msg after sg and user message */
1011 msg = req->iu->buf + req->usr_len;
1012 msg->type = cpu_to_le16(RTRS_MSG_WRITE);
1013 msg->usr_len = cpu_to_le16(req->usr_len);
1014
1015 /* rtrs message on server side will be after user data and message */
1016 imm = req->permit->mem_off + req->data_len + req->usr_len;
1017 imm = rtrs_to_io_req_imm(imm);
1018 buf_id = req->permit->mem_id;
1019 req->sg_size = tsize;
1020 rbuf = &sess->rbufs[buf_id];
1021
1022 /*
1023 * Update stats now, after request is successfully sent it is not
1024 * safe anymore to touch it.
1025 */
1026 rtrs_clt_update_all_stats(req, WRITE);
1027
1028 ret = rtrs_post_rdma_write_sg(req->con, req, rbuf,
1029 req->usr_len + sizeof(*msg),
1030 imm);
1031 if (unlikely(ret)) {
1032 rtrs_err(s, "Write request failed: %d\n", ret);
1033 if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
1034 atomic_dec(&sess->stats->inflight);
1035 if (req->sg_cnt)
1036 ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist,
1037 req->sg_cnt, req->dir);
1038 }
1039
1040 return ret;
1041 }
1042
rtrs_map_sg_fr(struct rtrs_clt_io_req * req,size_t count)1043 static int rtrs_map_sg_fr(struct rtrs_clt_io_req *req, size_t count)
1044 {
1045 int nr;
1046
1047 /* Align the MR to a 4K page size to match the block virt boundary */
1048 nr = ib_map_mr_sg(req->mr, req->sglist, count, NULL, SZ_4K);
1049 if (nr < 0)
1050 return nr;
1051 if (unlikely(nr < req->sg_cnt))
1052 return -EINVAL;
1053 ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
1054
1055 return nr;
1056 }
1057
rtrs_clt_read_req(struct rtrs_clt_io_req * req)1058 static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
1059 {
1060 struct rtrs_clt_con *con = req->con;
1061 struct rtrs_sess *s = con->c.sess;
1062 struct rtrs_clt_sess *sess = to_clt_sess(s);
1063 struct rtrs_msg_rdma_read *msg;
1064 struct rtrs_ib_dev *dev;
1065
1066 struct ib_reg_wr rwr;
1067 struct ib_send_wr *wr = NULL;
1068
1069 int ret, count = 0;
1070 u32 imm, buf_id;
1071
1072 const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
1073
1074 s = &sess->s;
1075 dev = sess->s.dev;
1076
1077 if (unlikely(tsize > sess->chunk_size)) {
1078 rtrs_wrn(s,
1079 "Read request failed, message size is %zu, bigger than CHUNK_SIZE %d\n",
1080 tsize, sess->chunk_size);
1081 return -EMSGSIZE;
1082 }
1083
1084 if (req->sg_cnt) {
1085 count = ib_dma_map_sg(dev->ib_dev, req->sglist, req->sg_cnt,
1086 req->dir);
1087 if (unlikely(!count)) {
1088 rtrs_wrn(s,
1089 "Read request failed, dma map failed\n");
1090 return -EINVAL;
1091 }
1092 }
1093 /* put our message into req->buf after user message*/
1094 msg = req->iu->buf + req->usr_len;
1095 msg->type = cpu_to_le16(RTRS_MSG_READ);
1096 msg->usr_len = cpu_to_le16(req->usr_len);
1097
1098 if (count) {
1099 ret = rtrs_map_sg_fr(req, count);
1100 if (ret < 0) {
1101 rtrs_err_rl(s,
1102 "Read request failed, failed to map fast reg. data, err: %d\n",
1103 ret);
1104 ib_dma_unmap_sg(dev->ib_dev, req->sglist, req->sg_cnt,
1105 req->dir);
1106 return ret;
1107 }
1108 rwr = (struct ib_reg_wr) {
1109 .wr.opcode = IB_WR_REG_MR,
1110 .wr.wr_cqe = &fast_reg_cqe,
1111 .mr = req->mr,
1112 .key = req->mr->rkey,
1113 .access = (IB_ACCESS_LOCAL_WRITE |
1114 IB_ACCESS_REMOTE_WRITE),
1115 };
1116 wr = &rwr.wr;
1117
1118 msg->sg_cnt = cpu_to_le16(1);
1119 msg->flags = cpu_to_le16(RTRS_MSG_NEED_INVAL_F);
1120
1121 msg->desc[0].addr = cpu_to_le64(req->mr->iova);
1122 msg->desc[0].key = cpu_to_le32(req->mr->rkey);
1123 msg->desc[0].len = cpu_to_le32(req->mr->length);
1124
1125 /* Further invalidation is required */
1126 req->need_inv = !!RTRS_MSG_NEED_INVAL_F;
1127
1128 } else {
1129 msg->sg_cnt = 0;
1130 msg->flags = 0;
1131 }
1132 /*
1133 * rtrs message will be after the space reserved for disk data and
1134 * user message
1135 */
1136 imm = req->permit->mem_off + req->data_len + req->usr_len;
1137 imm = rtrs_to_io_req_imm(imm);
1138 buf_id = req->permit->mem_id;
1139
1140 req->sg_size = sizeof(*msg);
1141 req->sg_size += le16_to_cpu(msg->sg_cnt) * sizeof(struct rtrs_sg_desc);
1142 req->sg_size += req->usr_len;
1143
1144 /*
1145 * Update stats now, after request is successfully sent it is not
1146 * safe anymore to touch it.
1147 */
1148 rtrs_clt_update_all_stats(req, READ);
1149
1150 ret = rtrs_post_send_rdma(req->con, req, &sess->rbufs[buf_id],
1151 req->data_len, imm, wr);
1152 if (unlikely(ret)) {
1153 rtrs_err(s, "Read request failed: %d\n", ret);
1154 if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
1155 atomic_dec(&sess->stats->inflight);
1156 req->need_inv = false;
1157 if (req->sg_cnt)
1158 ib_dma_unmap_sg(dev->ib_dev, req->sglist,
1159 req->sg_cnt, req->dir);
1160 }
1161
1162 return ret;
1163 }
1164
1165 /**
1166 * rtrs_clt_failover_req() Try to find an active path for a failed request
1167 * @clt: clt context
1168 * @fail_req: a failed io request.
1169 */
rtrs_clt_failover_req(struct rtrs_clt * clt,struct rtrs_clt_io_req * fail_req)1170 static int rtrs_clt_failover_req(struct rtrs_clt *clt,
1171 struct rtrs_clt_io_req *fail_req)
1172 {
1173 struct rtrs_clt_sess *alive_sess;
1174 struct rtrs_clt_io_req *req;
1175 int err = -ECONNABORTED;
1176 struct path_it it;
1177
1178 rcu_read_lock();
1179 for (path_it_init(&it, clt);
1180 (alive_sess = it.next_path(&it)) && it.i < it.clt->paths_num;
1181 it.i++) {
1182 if (unlikely(READ_ONCE(alive_sess->state) !=
1183 RTRS_CLT_CONNECTED))
1184 continue;
1185 req = rtrs_clt_get_copy_req(alive_sess, fail_req);
1186 if (req->dir == DMA_TO_DEVICE)
1187 err = rtrs_clt_write_req(req);
1188 else
1189 err = rtrs_clt_read_req(req);
1190 if (unlikely(err)) {
1191 req->in_use = false;
1192 continue;
1193 }
1194 /* Success path */
1195 rtrs_clt_inc_failover_cnt(alive_sess->stats);
1196 break;
1197 }
1198 path_it_deinit(&it);
1199 rcu_read_unlock();
1200
1201 return err;
1202 }
1203
fail_all_outstanding_reqs(struct rtrs_clt_sess * sess)1204 static void fail_all_outstanding_reqs(struct rtrs_clt_sess *sess)
1205 {
1206 struct rtrs_clt *clt = sess->clt;
1207 struct rtrs_clt_io_req *req;
1208 int i, err;
1209
1210 if (!sess->reqs)
1211 return;
1212 for (i = 0; i < sess->queue_depth; ++i) {
1213 req = &sess->reqs[i];
1214 if (!req->in_use)
1215 continue;
1216
1217 /*
1218 * Safely (without notification) complete failed request.
1219 * After completion this request is still useble and can
1220 * be failovered to another path.
1221 */
1222 complete_rdma_req(req, -ECONNABORTED, false, true);
1223
1224 err = rtrs_clt_failover_req(clt, req);
1225 if (unlikely(err))
1226 /* Failover failed, notify anyway */
1227 req->conf(req->priv, err);
1228 }
1229 }
1230
free_sess_reqs(struct rtrs_clt_sess * sess)1231 static void free_sess_reqs(struct rtrs_clt_sess *sess)
1232 {
1233 struct rtrs_clt_io_req *req;
1234 int i;
1235
1236 if (!sess->reqs)
1237 return;
1238 for (i = 0; i < sess->queue_depth; ++i) {
1239 req = &sess->reqs[i];
1240 if (req->mr)
1241 ib_dereg_mr(req->mr);
1242 kfree(req->sge);
1243 rtrs_iu_free(req->iu, sess->s.dev->ib_dev, 1);
1244 }
1245 kfree(sess->reqs);
1246 sess->reqs = NULL;
1247 }
1248
alloc_sess_reqs(struct rtrs_clt_sess * sess)1249 static int alloc_sess_reqs(struct rtrs_clt_sess *sess)
1250 {
1251 struct rtrs_clt_io_req *req;
1252 struct rtrs_clt *clt = sess->clt;
1253 int i, err = -ENOMEM;
1254
1255 sess->reqs = kcalloc(sess->queue_depth, sizeof(*sess->reqs),
1256 GFP_KERNEL);
1257 if (!sess->reqs)
1258 return -ENOMEM;
1259
1260 for (i = 0; i < sess->queue_depth; ++i) {
1261 req = &sess->reqs[i];
1262 req->iu = rtrs_iu_alloc(1, sess->max_hdr_size, GFP_KERNEL,
1263 sess->s.dev->ib_dev,
1264 DMA_TO_DEVICE,
1265 rtrs_clt_rdma_done);
1266 if (!req->iu)
1267 goto out;
1268
1269 req->sge = kmalloc_array(clt->max_segments + 1,
1270 sizeof(*req->sge), GFP_KERNEL);
1271 if (!req->sge)
1272 goto out;
1273
1274 req->mr = ib_alloc_mr(sess->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
1275 sess->max_pages_per_mr);
1276 if (IS_ERR(req->mr)) {
1277 err = PTR_ERR(req->mr);
1278 req->mr = NULL;
1279 pr_err("Failed to alloc sess->max_pages_per_mr %d\n",
1280 sess->max_pages_per_mr);
1281 goto out;
1282 }
1283
1284 init_completion(&req->inv_comp);
1285 }
1286
1287 return 0;
1288
1289 out:
1290 free_sess_reqs(sess);
1291
1292 return err;
1293 }
1294
alloc_permits(struct rtrs_clt * clt)1295 static int alloc_permits(struct rtrs_clt *clt)
1296 {
1297 unsigned int chunk_bits;
1298 int err, i;
1299
1300 clt->permits_map = kcalloc(BITS_TO_LONGS(clt->queue_depth),
1301 sizeof(long), GFP_KERNEL);
1302 if (!clt->permits_map) {
1303 err = -ENOMEM;
1304 goto out_err;
1305 }
1306 clt->permits = kcalloc(clt->queue_depth, permit_size(clt), GFP_KERNEL);
1307 if (!clt->permits) {
1308 err = -ENOMEM;
1309 goto err_map;
1310 }
1311 chunk_bits = ilog2(clt->queue_depth - 1) + 1;
1312 for (i = 0; i < clt->queue_depth; i++) {
1313 struct rtrs_permit *permit;
1314
1315 permit = get_permit(clt, i);
1316 permit->mem_id = i;
1317 permit->mem_off = i << (MAX_IMM_PAYL_BITS - chunk_bits);
1318 }
1319
1320 return 0;
1321
1322 err_map:
1323 kfree(clt->permits_map);
1324 clt->permits_map = NULL;
1325 out_err:
1326 return err;
1327 }
1328
free_permits(struct rtrs_clt * clt)1329 static void free_permits(struct rtrs_clt *clt)
1330 {
1331 if (clt->permits_map) {
1332 size_t sz = clt->queue_depth;
1333
1334 wait_event(clt->permits_wait,
1335 find_first_bit(clt->permits_map, sz) >= sz);
1336 }
1337 kfree(clt->permits_map);
1338 clt->permits_map = NULL;
1339 kfree(clt->permits);
1340 clt->permits = NULL;
1341 }
1342
query_fast_reg_mode(struct rtrs_clt_sess * sess)1343 static void query_fast_reg_mode(struct rtrs_clt_sess *sess)
1344 {
1345 struct ib_device *ib_dev;
1346 u64 max_pages_per_mr;
1347 int mr_page_shift;
1348
1349 ib_dev = sess->s.dev->ib_dev;
1350
1351 /*
1352 * Use the smallest page size supported by the HCA, down to a
1353 * minimum of 4096 bytes. We're unlikely to build large sglists
1354 * out of smaller entries.
1355 */
1356 mr_page_shift = max(12, ffs(ib_dev->attrs.page_size_cap) - 1);
1357 max_pages_per_mr = ib_dev->attrs.max_mr_size;
1358 do_div(max_pages_per_mr, (1ull << mr_page_shift));
1359 sess->max_pages_per_mr =
1360 min3(sess->max_pages_per_mr, (u32)max_pages_per_mr,
1361 ib_dev->attrs.max_fast_reg_page_list_len);
1362 sess->max_send_sge = ib_dev->attrs.max_send_sge;
1363 }
1364
rtrs_clt_change_state_get_old(struct rtrs_clt_sess * sess,enum rtrs_clt_state new_state,enum rtrs_clt_state * old_state)1365 static bool rtrs_clt_change_state_get_old(struct rtrs_clt_sess *sess,
1366 enum rtrs_clt_state new_state,
1367 enum rtrs_clt_state *old_state)
1368 {
1369 bool changed;
1370
1371 spin_lock_irq(&sess->state_wq.lock);
1372 *old_state = sess->state;
1373 changed = __rtrs_clt_change_state(sess, new_state);
1374 spin_unlock_irq(&sess->state_wq.lock);
1375
1376 return changed;
1377 }
1378
rtrs_clt_change_state(struct rtrs_clt_sess * sess,enum rtrs_clt_state new_state)1379 static bool rtrs_clt_change_state(struct rtrs_clt_sess *sess,
1380 enum rtrs_clt_state new_state)
1381 {
1382 enum rtrs_clt_state old_state;
1383
1384 return rtrs_clt_change_state_get_old(sess, new_state, &old_state);
1385 }
1386
rtrs_clt_hb_err_handler(struct rtrs_con * c)1387 static void rtrs_clt_hb_err_handler(struct rtrs_con *c)
1388 {
1389 struct rtrs_clt_con *con = container_of(c, typeof(*con), c);
1390
1391 rtrs_rdma_error_recovery(con);
1392 }
1393
rtrs_clt_init_hb(struct rtrs_clt_sess * sess)1394 static void rtrs_clt_init_hb(struct rtrs_clt_sess *sess)
1395 {
1396 rtrs_init_hb(&sess->s, &io_comp_cqe,
1397 RTRS_HB_INTERVAL_MS,
1398 RTRS_HB_MISSED_MAX,
1399 rtrs_clt_hb_err_handler,
1400 rtrs_wq);
1401 }
1402
rtrs_clt_start_hb(struct rtrs_clt_sess * sess)1403 static void rtrs_clt_start_hb(struct rtrs_clt_sess *sess)
1404 {
1405 rtrs_start_hb(&sess->s);
1406 }
1407
rtrs_clt_stop_hb(struct rtrs_clt_sess * sess)1408 static void rtrs_clt_stop_hb(struct rtrs_clt_sess *sess)
1409 {
1410 rtrs_stop_hb(&sess->s);
1411 }
1412
1413 static void rtrs_clt_reconnect_work(struct work_struct *work);
1414 static void rtrs_clt_close_work(struct work_struct *work);
1415
alloc_sess(struct rtrs_clt * clt,const struct rtrs_addr * path,size_t con_num,u16 max_segments,size_t max_segment_size)1416 static struct rtrs_clt_sess *alloc_sess(struct rtrs_clt *clt,
1417 const struct rtrs_addr *path,
1418 size_t con_num, u16 max_segments,
1419 size_t max_segment_size)
1420 {
1421 struct rtrs_clt_sess *sess;
1422 int err = -ENOMEM;
1423 int cpu;
1424
1425 sess = kzalloc(sizeof(*sess), GFP_KERNEL);
1426 if (!sess)
1427 goto err;
1428
1429 /* Extra connection for user messages */
1430 con_num += 1;
1431
1432 sess->s.con = kcalloc(con_num, sizeof(*sess->s.con), GFP_KERNEL);
1433 if (!sess->s.con)
1434 goto err_free_sess;
1435
1436 sess->stats = kzalloc(sizeof(*sess->stats), GFP_KERNEL);
1437 if (!sess->stats)
1438 goto err_free_con;
1439
1440 mutex_init(&sess->init_mutex);
1441 uuid_gen(&sess->s.uuid);
1442 memcpy(&sess->s.dst_addr, path->dst,
1443 rdma_addr_size((struct sockaddr *)path->dst));
1444
1445 /*
1446 * rdma_resolve_addr() passes src_addr to cma_bind_addr, which
1447 * checks the sa_family to be non-zero. If user passed src_addr=NULL
1448 * the sess->src_addr will contain only zeros, which is then fine.
1449 */
1450 if (path->src)
1451 memcpy(&sess->s.src_addr, path->src,
1452 rdma_addr_size((struct sockaddr *)path->src));
1453 strlcpy(sess->s.sessname, clt->sessname, sizeof(sess->s.sessname));
1454 sess->s.con_num = con_num;
1455 sess->clt = clt;
1456 sess->max_pages_per_mr = max_segments * max_segment_size >> 12;
1457 init_waitqueue_head(&sess->state_wq);
1458 sess->state = RTRS_CLT_CONNECTING;
1459 atomic_set(&sess->connected_cnt, 0);
1460 INIT_WORK(&sess->close_work, rtrs_clt_close_work);
1461 INIT_DELAYED_WORK(&sess->reconnect_dwork, rtrs_clt_reconnect_work);
1462 rtrs_clt_init_hb(sess);
1463
1464 sess->mp_skip_entry = alloc_percpu(typeof(*sess->mp_skip_entry));
1465 if (!sess->mp_skip_entry)
1466 goto err_free_stats;
1467
1468 for_each_possible_cpu(cpu)
1469 INIT_LIST_HEAD(per_cpu_ptr(sess->mp_skip_entry, cpu));
1470
1471 err = rtrs_clt_init_stats(sess->stats);
1472 if (err)
1473 goto err_free_percpu;
1474
1475 return sess;
1476
1477 err_free_percpu:
1478 free_percpu(sess->mp_skip_entry);
1479 err_free_stats:
1480 kfree(sess->stats);
1481 err_free_con:
1482 kfree(sess->s.con);
1483 err_free_sess:
1484 kfree(sess);
1485 err:
1486 return ERR_PTR(err);
1487 }
1488
free_sess(struct rtrs_clt_sess * sess)1489 void free_sess(struct rtrs_clt_sess *sess)
1490 {
1491 free_percpu(sess->mp_skip_entry);
1492 mutex_destroy(&sess->init_mutex);
1493 kfree(sess->s.con);
1494 kfree(sess->rbufs);
1495 kfree(sess);
1496 }
1497
create_con(struct rtrs_clt_sess * sess,unsigned int cid)1498 static int create_con(struct rtrs_clt_sess *sess, unsigned int cid)
1499 {
1500 struct rtrs_clt_con *con;
1501
1502 con = kzalloc(sizeof(*con), GFP_KERNEL);
1503 if (!con)
1504 return -ENOMEM;
1505
1506 /* Map first two connections to the first CPU */
1507 con->cpu = (cid ? cid - 1 : 0) % nr_cpu_ids;
1508 con->c.cid = cid;
1509 con->c.sess = &sess->s;
1510 atomic_set(&con->io_cnt, 0);
1511
1512 sess->s.con[cid] = &con->c;
1513
1514 return 0;
1515 }
1516
destroy_con(struct rtrs_clt_con * con)1517 static void destroy_con(struct rtrs_clt_con *con)
1518 {
1519 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1520
1521 sess->s.con[con->c.cid] = NULL;
1522 kfree(con);
1523 }
1524
create_con_cq_qp(struct rtrs_clt_con * con)1525 static int create_con_cq_qp(struct rtrs_clt_con *con)
1526 {
1527 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1528 u32 max_send_wr, max_recv_wr, cq_size;
1529 int err, cq_vector;
1530 struct rtrs_msg_rkey_rsp *rsp;
1531
1532 /*
1533 * This function can fail, but still destroy_con_cq_qp() should
1534 * be called, this is because create_con_cq_qp() is called on cm
1535 * event path, thus caller/waiter never knows: have we failed before
1536 * create_con_cq_qp() or after. To solve this dilemma without
1537 * creating any additional flags just allow destroy_con_cq_qp() be
1538 * called many times.
1539 */
1540
1541 if (con->c.cid == 0) {
1542 /*
1543 * One completion for each receive and two for each send
1544 * (send request + registration)
1545 * + 2 for drain and heartbeat
1546 * in case qp gets into error state
1547 */
1548 max_send_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2;
1549 max_recv_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2;
1550 /* We must be the first here */
1551 if (WARN_ON(sess->s.dev))
1552 return -EINVAL;
1553
1554 /*
1555 * The whole session uses device from user connection.
1556 * Be careful not to close user connection before ib dev
1557 * is gracefully put.
1558 */
1559 sess->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device,
1560 &dev_pd);
1561 if (!sess->s.dev) {
1562 rtrs_wrn(sess->clt,
1563 "rtrs_ib_dev_find_get_or_add(): no memory\n");
1564 return -ENOMEM;
1565 }
1566 sess->s.dev_ref = 1;
1567 query_fast_reg_mode(sess);
1568 } else {
1569 /*
1570 * Here we assume that session members are correctly set.
1571 * This is always true if user connection (cid == 0) is
1572 * established first.
1573 */
1574 if (WARN_ON(!sess->s.dev))
1575 return -EINVAL;
1576 if (WARN_ON(!sess->queue_depth))
1577 return -EINVAL;
1578
1579 /* Shared between connections */
1580 sess->s.dev_ref++;
1581 max_send_wr =
1582 min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr,
1583 /* QD * (REQ + RSP + FR REGS or INVS) + drain */
1584 sess->queue_depth * 3 + 1);
1585 max_recv_wr =
1586 min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr,
1587 sess->queue_depth * 3 + 1);
1588 }
1589 /* alloc iu to recv new rkey reply when server reports flags set */
1590 if (sess->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
1591 con->rsp_ius = rtrs_iu_alloc(max_recv_wr, sizeof(*rsp),
1592 GFP_KERNEL, sess->s.dev->ib_dev,
1593 DMA_FROM_DEVICE,
1594 rtrs_clt_rdma_done);
1595 if (!con->rsp_ius)
1596 return -ENOMEM;
1597 con->queue_size = max_recv_wr;
1598 }
1599 cq_size = max_send_wr + max_recv_wr;
1600 cq_vector = con->cpu % sess->s.dev->ib_dev->num_comp_vectors;
1601 err = rtrs_cq_qp_create(&sess->s, &con->c, sess->max_send_sge,
1602 cq_vector, cq_size, max_send_wr,
1603 max_recv_wr, IB_POLL_SOFTIRQ);
1604 /*
1605 * In case of error we do not bother to clean previous allocations,
1606 * since destroy_con_cq_qp() must be called.
1607 */
1608 return err;
1609 }
1610
destroy_con_cq_qp(struct rtrs_clt_con * con)1611 static void destroy_con_cq_qp(struct rtrs_clt_con *con)
1612 {
1613 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1614
1615 /*
1616 * Be careful here: destroy_con_cq_qp() can be called even
1617 * create_con_cq_qp() failed, see comments there.
1618 */
1619
1620 rtrs_cq_qp_destroy(&con->c);
1621 if (con->rsp_ius) {
1622 rtrs_iu_free(con->rsp_ius, sess->s.dev->ib_dev, con->queue_size);
1623 con->rsp_ius = NULL;
1624 con->queue_size = 0;
1625 }
1626 if (sess->s.dev_ref && !--sess->s.dev_ref) {
1627 rtrs_ib_dev_put(sess->s.dev);
1628 sess->s.dev = NULL;
1629 }
1630 }
1631
stop_cm(struct rtrs_clt_con * con)1632 static void stop_cm(struct rtrs_clt_con *con)
1633 {
1634 rdma_disconnect(con->c.cm_id);
1635 if (con->c.qp)
1636 ib_drain_qp(con->c.qp);
1637 }
1638
destroy_cm(struct rtrs_clt_con * con)1639 static void destroy_cm(struct rtrs_clt_con *con)
1640 {
1641 rdma_destroy_id(con->c.cm_id);
1642 con->c.cm_id = NULL;
1643 }
1644
rtrs_rdma_addr_resolved(struct rtrs_clt_con * con)1645 static int rtrs_rdma_addr_resolved(struct rtrs_clt_con *con)
1646 {
1647 struct rtrs_sess *s = con->c.sess;
1648 int err;
1649
1650 err = create_con_cq_qp(con);
1651 if (err) {
1652 rtrs_err(s, "create_con_cq_qp(), err: %d\n", err);
1653 return err;
1654 }
1655 err = rdma_resolve_route(con->c.cm_id, RTRS_CONNECT_TIMEOUT_MS);
1656 if (err)
1657 rtrs_err(s, "Resolving route failed, err: %d\n", err);
1658
1659 return err;
1660 }
1661
rtrs_rdma_route_resolved(struct rtrs_clt_con * con)1662 static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
1663 {
1664 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1665 struct rtrs_clt *clt = sess->clt;
1666 struct rtrs_msg_conn_req msg;
1667 struct rdma_conn_param param;
1668
1669 int err;
1670
1671 param = (struct rdma_conn_param) {
1672 .retry_count = 7,
1673 .rnr_retry_count = 7,
1674 .private_data = &msg,
1675 .private_data_len = sizeof(msg),
1676 };
1677
1678 msg = (struct rtrs_msg_conn_req) {
1679 .magic = cpu_to_le16(RTRS_MAGIC),
1680 .version = cpu_to_le16(RTRS_PROTO_VER),
1681 .cid = cpu_to_le16(con->c.cid),
1682 .cid_num = cpu_to_le16(sess->s.con_num),
1683 .recon_cnt = cpu_to_le16(sess->s.recon_cnt),
1684 };
1685 msg.first_conn = sess->for_new_clt ? FIRST_CONN : 0;
1686 uuid_copy(&msg.sess_uuid, &sess->s.uuid);
1687 uuid_copy(&msg.paths_uuid, &clt->paths_uuid);
1688
1689 err = rdma_connect_locked(con->c.cm_id, ¶m);
1690 if (err)
1691 rtrs_err(clt, "rdma_connect_locked(): %d\n", err);
1692
1693 return err;
1694 }
1695
rtrs_rdma_conn_established(struct rtrs_clt_con * con,struct rdma_cm_event * ev)1696 static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
1697 struct rdma_cm_event *ev)
1698 {
1699 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1700 struct rtrs_clt *clt = sess->clt;
1701 const struct rtrs_msg_conn_rsp *msg;
1702 u16 version, queue_depth;
1703 int errno;
1704 u8 len;
1705
1706 msg = ev->param.conn.private_data;
1707 len = ev->param.conn.private_data_len;
1708 if (len < sizeof(*msg)) {
1709 rtrs_err(clt, "Invalid RTRS connection response\n");
1710 return -ECONNRESET;
1711 }
1712 if (le16_to_cpu(msg->magic) != RTRS_MAGIC) {
1713 rtrs_err(clt, "Invalid RTRS magic\n");
1714 return -ECONNRESET;
1715 }
1716 version = le16_to_cpu(msg->version);
1717 if (version >> 8 != RTRS_PROTO_VER_MAJOR) {
1718 rtrs_err(clt, "Unsupported major RTRS version: %d, expected %d\n",
1719 version >> 8, RTRS_PROTO_VER_MAJOR);
1720 return -ECONNRESET;
1721 }
1722 errno = le16_to_cpu(msg->errno);
1723 if (errno) {
1724 rtrs_err(clt, "Invalid RTRS message: errno %d\n",
1725 errno);
1726 return -ECONNRESET;
1727 }
1728 if (con->c.cid == 0) {
1729 queue_depth = le16_to_cpu(msg->queue_depth);
1730
1731 if (sess->queue_depth > 0 && queue_depth != sess->queue_depth) {
1732 rtrs_err(clt, "Error: queue depth changed\n");
1733
1734 /*
1735 * Stop any more reconnection attempts
1736 */
1737 sess->reconnect_attempts = -1;
1738 rtrs_err(clt,
1739 "Disabling auto-reconnect. Trigger a manual reconnect after issue is resolved\n");
1740 return -ECONNRESET;
1741 }
1742
1743 if (!sess->rbufs) {
1744 kfree(sess->rbufs);
1745 sess->rbufs = kcalloc(queue_depth, sizeof(*sess->rbufs),
1746 GFP_KERNEL);
1747 if (!sess->rbufs)
1748 return -ENOMEM;
1749 }
1750 sess->queue_depth = queue_depth;
1751 sess->max_hdr_size = le32_to_cpu(msg->max_hdr_size);
1752 sess->max_io_size = le32_to_cpu(msg->max_io_size);
1753 sess->flags = le32_to_cpu(msg->flags);
1754 sess->chunk_size = sess->max_io_size + sess->max_hdr_size;
1755
1756 /*
1757 * Global IO size is always a minimum.
1758 * If while a reconnection server sends us a value a bit
1759 * higher - client does not care and uses cached minimum.
1760 *
1761 * Since we can have several sessions (paths) restablishing
1762 * connections in parallel, use lock.
1763 */
1764 mutex_lock(&clt->paths_mutex);
1765 clt->queue_depth = sess->queue_depth;
1766 clt->max_io_size = min_not_zero(sess->max_io_size,
1767 clt->max_io_size);
1768 mutex_unlock(&clt->paths_mutex);
1769
1770 /*
1771 * Cache the hca_port and hca_name for sysfs
1772 */
1773 sess->hca_port = con->c.cm_id->port_num;
1774 scnprintf(sess->hca_name, sizeof(sess->hca_name),
1775 sess->s.dev->ib_dev->name);
1776 sess->s.src_addr = con->c.cm_id->route.addr.src_addr;
1777 /* set for_new_clt, to allow future reconnect on any path */
1778 sess->for_new_clt = 1;
1779 }
1780
1781 return 0;
1782 }
1783
flag_success_on_conn(struct rtrs_clt_con * con)1784 static inline void flag_success_on_conn(struct rtrs_clt_con *con)
1785 {
1786 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
1787
1788 atomic_inc(&sess->connected_cnt);
1789 con->cm_err = 1;
1790 }
1791
rtrs_rdma_conn_rejected(struct rtrs_clt_con * con,struct rdma_cm_event * ev)1792 static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con,
1793 struct rdma_cm_event *ev)
1794 {
1795 struct rtrs_sess *s = con->c.sess;
1796 const struct rtrs_msg_conn_rsp *msg;
1797 const char *rej_msg;
1798 int status, errno;
1799 u8 data_len;
1800
1801 status = ev->status;
1802 rej_msg = rdma_reject_msg(con->c.cm_id, status);
1803 msg = rdma_consumer_reject_data(con->c.cm_id, ev, &data_len);
1804
1805 if (msg && data_len >= sizeof(*msg)) {
1806 errno = (int16_t)le16_to_cpu(msg->errno);
1807 if (errno == -EBUSY)
1808 rtrs_err(s,
1809 "Previous session is still exists on the server, please reconnect later\n");
1810 else
1811 rtrs_err(s,
1812 "Connect rejected: status %d (%s), rtrs errno %d\n",
1813 status, rej_msg, errno);
1814 } else {
1815 rtrs_err(s,
1816 "Connect rejected but with malformed message: status %d (%s)\n",
1817 status, rej_msg);
1818 }
1819
1820 return -ECONNRESET;
1821 }
1822
rtrs_clt_close_conns(struct rtrs_clt_sess * sess,bool wait)1823 static void rtrs_clt_close_conns(struct rtrs_clt_sess *sess, bool wait)
1824 {
1825 if (rtrs_clt_change_state(sess, RTRS_CLT_CLOSING))
1826 queue_work(rtrs_wq, &sess->close_work);
1827 if (wait)
1828 flush_work(&sess->close_work);
1829 }
1830
flag_error_on_conn(struct rtrs_clt_con * con,int cm_err)1831 static inline void flag_error_on_conn(struct rtrs_clt_con *con, int cm_err)
1832 {
1833 if (con->cm_err == 1) {
1834 struct rtrs_clt_sess *sess;
1835
1836 sess = to_clt_sess(con->c.sess);
1837 if (atomic_dec_and_test(&sess->connected_cnt))
1838
1839 wake_up(&sess->state_wq);
1840 }
1841 con->cm_err = cm_err;
1842 }
1843
rtrs_clt_rdma_cm_handler(struct rdma_cm_id * cm_id,struct rdma_cm_event * ev)1844 static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
1845 struct rdma_cm_event *ev)
1846 {
1847 struct rtrs_clt_con *con = cm_id->context;
1848 struct rtrs_sess *s = con->c.sess;
1849 struct rtrs_clt_sess *sess = to_clt_sess(s);
1850 int cm_err = 0;
1851
1852 switch (ev->event) {
1853 case RDMA_CM_EVENT_ADDR_RESOLVED:
1854 cm_err = rtrs_rdma_addr_resolved(con);
1855 break;
1856 case RDMA_CM_EVENT_ROUTE_RESOLVED:
1857 cm_err = rtrs_rdma_route_resolved(con);
1858 break;
1859 case RDMA_CM_EVENT_ESTABLISHED:
1860 cm_err = rtrs_rdma_conn_established(con, ev);
1861 if (likely(!cm_err)) {
1862 /*
1863 * Report success and wake up. Here we abuse state_wq,
1864 * i.e. wake up without state change, but we set cm_err.
1865 */
1866 flag_success_on_conn(con);
1867 wake_up(&sess->state_wq);
1868 return 0;
1869 }
1870 break;
1871 case RDMA_CM_EVENT_REJECTED:
1872 cm_err = rtrs_rdma_conn_rejected(con, ev);
1873 break;
1874 case RDMA_CM_EVENT_CONNECT_ERROR:
1875 case RDMA_CM_EVENT_UNREACHABLE:
1876 rtrs_wrn(s, "CM error event %d\n", ev->event);
1877 cm_err = -ECONNRESET;
1878 break;
1879 case RDMA_CM_EVENT_ADDR_ERROR:
1880 case RDMA_CM_EVENT_ROUTE_ERROR:
1881 cm_err = -EHOSTUNREACH;
1882 break;
1883 case RDMA_CM_EVENT_DISCONNECTED:
1884 case RDMA_CM_EVENT_ADDR_CHANGE:
1885 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
1886 cm_err = -ECONNRESET;
1887 break;
1888 case RDMA_CM_EVENT_DEVICE_REMOVAL:
1889 /*
1890 * Device removal is a special case. Queue close and return 0.
1891 */
1892 rtrs_clt_close_conns(sess, false);
1893 return 0;
1894 default:
1895 rtrs_err(s, "Unexpected RDMA CM event (%d)\n", ev->event);
1896 cm_err = -ECONNRESET;
1897 break;
1898 }
1899
1900 if (cm_err) {
1901 /*
1902 * cm error makes sense only on connection establishing,
1903 * in other cases we rely on normal procedure of reconnecting.
1904 */
1905 flag_error_on_conn(con, cm_err);
1906 rtrs_rdma_error_recovery(con);
1907 }
1908
1909 return 0;
1910 }
1911
create_cm(struct rtrs_clt_con * con)1912 static int create_cm(struct rtrs_clt_con *con)
1913 {
1914 struct rtrs_sess *s = con->c.sess;
1915 struct rtrs_clt_sess *sess = to_clt_sess(s);
1916 struct rdma_cm_id *cm_id;
1917 int err;
1918
1919 cm_id = rdma_create_id(&init_net, rtrs_clt_rdma_cm_handler, con,
1920 sess->s.dst_addr.ss_family == AF_IB ?
1921 RDMA_PS_IB : RDMA_PS_TCP, IB_QPT_RC);
1922 if (IS_ERR(cm_id)) {
1923 err = PTR_ERR(cm_id);
1924 rtrs_err(s, "Failed to create CM ID, err: %d\n", err);
1925
1926 return err;
1927 }
1928 con->c.cm_id = cm_id;
1929 con->cm_err = 0;
1930 /* allow the port to be reused */
1931 err = rdma_set_reuseaddr(cm_id, 1);
1932 if (err != 0) {
1933 rtrs_err(s, "Set address reuse failed, err: %d\n", err);
1934 goto destroy_cm;
1935 }
1936 err = rdma_resolve_addr(cm_id, (struct sockaddr *)&sess->s.src_addr,
1937 (struct sockaddr *)&sess->s.dst_addr,
1938 RTRS_CONNECT_TIMEOUT_MS);
1939 if (err) {
1940 rtrs_err(s, "Failed to resolve address, err: %d\n", err);
1941 goto destroy_cm;
1942 }
1943 /*
1944 * Combine connection status and session events. This is needed
1945 * for waiting two possible cases: cm_err has something meaningful
1946 * or session state was really changed to error by device removal.
1947 */
1948 err = wait_event_interruptible_timeout(
1949 sess->state_wq,
1950 con->cm_err || sess->state != RTRS_CLT_CONNECTING,
1951 msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS));
1952 if (err == 0 || err == -ERESTARTSYS) {
1953 if (err == 0)
1954 err = -ETIMEDOUT;
1955 /* Timedout or interrupted */
1956 goto errr;
1957 }
1958 if (con->cm_err < 0) {
1959 err = con->cm_err;
1960 goto errr;
1961 }
1962 if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTING) {
1963 /* Device removal */
1964 err = -ECONNABORTED;
1965 goto errr;
1966 }
1967
1968 return 0;
1969
1970 errr:
1971 stop_cm(con);
1972 /* Is safe to call destroy if cq_qp is not inited */
1973 destroy_con_cq_qp(con);
1974 destroy_cm:
1975 destroy_cm(con);
1976
1977 return err;
1978 }
1979
rtrs_clt_sess_up(struct rtrs_clt_sess * sess)1980 static void rtrs_clt_sess_up(struct rtrs_clt_sess *sess)
1981 {
1982 struct rtrs_clt *clt = sess->clt;
1983 int up;
1984
1985 /*
1986 * We can fire RECONNECTED event only when all paths were
1987 * connected on rtrs_clt_open(), then each was disconnected
1988 * and the first one connected again. That's why this nasty
1989 * game with counter value.
1990 */
1991
1992 mutex_lock(&clt->paths_ev_mutex);
1993 up = ++clt->paths_up;
1994 /*
1995 * Here it is safe to access paths num directly since up counter
1996 * is greater than MAX_PATHS_NUM only while rtrs_clt_open() is
1997 * in progress, thus paths removals are impossible.
1998 */
1999 if (up > MAX_PATHS_NUM && up == MAX_PATHS_NUM + clt->paths_num)
2000 clt->paths_up = clt->paths_num;
2001 else if (up == 1)
2002 clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_RECONNECTED);
2003 mutex_unlock(&clt->paths_ev_mutex);
2004
2005 /* Mark session as established */
2006 sess->established = true;
2007 sess->reconnect_attempts = 0;
2008 sess->stats->reconnects.successful_cnt++;
2009 }
2010
rtrs_clt_sess_down(struct rtrs_clt_sess * sess)2011 static void rtrs_clt_sess_down(struct rtrs_clt_sess *sess)
2012 {
2013 struct rtrs_clt *clt = sess->clt;
2014
2015 if (!sess->established)
2016 return;
2017
2018 sess->established = false;
2019 mutex_lock(&clt->paths_ev_mutex);
2020 WARN_ON(!clt->paths_up);
2021 if (--clt->paths_up == 0)
2022 clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_DISCONNECTED);
2023 mutex_unlock(&clt->paths_ev_mutex);
2024 }
2025
rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess * sess)2026 static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess)
2027 {
2028 struct rtrs_clt_con *con;
2029 unsigned int cid;
2030
2031 WARN_ON(READ_ONCE(sess->state) == RTRS_CLT_CONNECTED);
2032
2033 /*
2034 * Possible race with rtrs_clt_open(), when DEVICE_REMOVAL comes
2035 * exactly in between. Start destroying after it finishes.
2036 */
2037 mutex_lock(&sess->init_mutex);
2038 mutex_unlock(&sess->init_mutex);
2039
2040 /*
2041 * All IO paths must observe !CONNECTED state before we
2042 * free everything.
2043 */
2044 synchronize_rcu();
2045
2046 rtrs_clt_stop_hb(sess);
2047
2048 /*
2049 * The order it utterly crucial: firstly disconnect and complete all
2050 * rdma requests with error (thus set in_use=false for requests),
2051 * then fail outstanding requests checking in_use for each, and
2052 * eventually notify upper layer about session disconnection.
2053 */
2054
2055 for (cid = 0; cid < sess->s.con_num; cid++) {
2056 if (!sess->s.con[cid])
2057 break;
2058 con = to_clt_con(sess->s.con[cid]);
2059 stop_cm(con);
2060 }
2061 fail_all_outstanding_reqs(sess);
2062 free_sess_reqs(sess);
2063 rtrs_clt_sess_down(sess);
2064
2065 /*
2066 * Wait for graceful shutdown, namely when peer side invokes
2067 * rdma_disconnect(). 'connected_cnt' is decremented only on
2068 * CM events, thus if other side had crashed and hb has detected
2069 * something is wrong, here we will stuck for exactly timeout ms,
2070 * since CM does not fire anything. That is fine, we are not in
2071 * hurry.
2072 */
2073 wait_event_timeout(sess->state_wq, !atomic_read(&sess->connected_cnt),
2074 msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS));
2075
2076 for (cid = 0; cid < sess->s.con_num; cid++) {
2077 if (!sess->s.con[cid])
2078 break;
2079 con = to_clt_con(sess->s.con[cid]);
2080 destroy_con_cq_qp(con);
2081 destroy_cm(con);
2082 destroy_con(con);
2083 }
2084 }
2085
xchg_sessions(struct rtrs_clt_sess __rcu ** rcu_ppcpu_path,struct rtrs_clt_sess * sess,struct rtrs_clt_sess * next)2086 static inline bool xchg_sessions(struct rtrs_clt_sess __rcu **rcu_ppcpu_path,
2087 struct rtrs_clt_sess *sess,
2088 struct rtrs_clt_sess *next)
2089 {
2090 struct rtrs_clt_sess **ppcpu_path;
2091
2092 /* Call cmpxchg() without sparse warnings */
2093 ppcpu_path = (typeof(ppcpu_path))rcu_ppcpu_path;
2094 return sess == cmpxchg(ppcpu_path, sess, next);
2095 }
2096
rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess * sess)2097 static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess)
2098 {
2099 struct rtrs_clt *clt = sess->clt;
2100 struct rtrs_clt_sess *next;
2101 bool wait_for_grace = false;
2102 int cpu;
2103
2104 mutex_lock(&clt->paths_mutex);
2105 list_del_rcu(&sess->s.entry);
2106
2107 /* Make sure everybody observes path removal. */
2108 synchronize_rcu();
2109
2110 /*
2111 * At this point nobody sees @sess in the list, but still we have
2112 * dangling pointer @pcpu_path which _can_ point to @sess. Since
2113 * nobody can observe @sess in the list, we guarantee that IO path
2114 * will not assign @sess to @pcpu_path, i.e. @pcpu_path can be equal
2115 * to @sess, but can never again become @sess.
2116 */
2117
2118 /*
2119 * Decrement paths number only after grace period, because
2120 * caller of do_each_path() must firstly observe list without
2121 * path and only then decremented paths number.
2122 *
2123 * Otherwise there can be the following situation:
2124 * o Two paths exist and IO is coming.
2125 * o One path is removed:
2126 * CPU#0 CPU#1
2127 * do_each_path(): rtrs_clt_remove_path_from_arr():
2128 * path = get_next_path()
2129 * ^^^ list_del_rcu(path)
2130 * [!CONNECTED path] clt->paths_num--
2131 * ^^^^^^^^^
2132 * load clt->paths_num from 2 to 1
2133 * ^^^^^^^^^
2134 * sees 1
2135 *
2136 * path is observed as !CONNECTED, but do_each_path() loop
2137 * ends, because expression i < clt->paths_num is false.
2138 */
2139 clt->paths_num--;
2140
2141 /*
2142 * Get @next connection from current @sess which is going to be
2143 * removed. If @sess is the last element, then @next is NULL.
2144 */
2145 rcu_read_lock();
2146 next = list_next_or_null_rr_rcu(&clt->paths_list, &sess->s.entry,
2147 typeof(*next), s.entry);
2148 rcu_read_unlock();
2149
2150 /*
2151 * @pcpu paths can still point to the path which is going to be
2152 * removed, so change the pointer manually.
2153 */
2154 for_each_possible_cpu(cpu) {
2155 struct rtrs_clt_sess __rcu **ppcpu_path;
2156
2157 ppcpu_path = per_cpu_ptr(clt->pcpu_path, cpu);
2158 if (rcu_dereference_protected(*ppcpu_path,
2159 lockdep_is_held(&clt->paths_mutex)) != sess)
2160 /*
2161 * synchronize_rcu() was called just after deleting
2162 * entry from the list, thus IO code path cannot
2163 * change pointer back to the pointer which is going
2164 * to be removed, we are safe here.
2165 */
2166 continue;
2167
2168 /*
2169 * We race with IO code path, which also changes pointer,
2170 * thus we have to be careful not to overwrite it.
2171 */
2172 if (xchg_sessions(ppcpu_path, sess, next))
2173 /*
2174 * @ppcpu_path was successfully replaced with @next,
2175 * that means that someone could also pick up the
2176 * @sess and dereferencing it right now, so wait for
2177 * a grace period is required.
2178 */
2179 wait_for_grace = true;
2180 }
2181 if (wait_for_grace)
2182 synchronize_rcu();
2183
2184 mutex_unlock(&clt->paths_mutex);
2185 }
2186
rtrs_clt_add_path_to_arr(struct rtrs_clt_sess * sess,struct rtrs_addr * addr)2187 static void rtrs_clt_add_path_to_arr(struct rtrs_clt_sess *sess,
2188 struct rtrs_addr *addr)
2189 {
2190 struct rtrs_clt *clt = sess->clt;
2191
2192 mutex_lock(&clt->paths_mutex);
2193 clt->paths_num++;
2194
2195 list_add_tail_rcu(&sess->s.entry, &clt->paths_list);
2196 mutex_unlock(&clt->paths_mutex);
2197 }
2198
rtrs_clt_close_work(struct work_struct * work)2199 static void rtrs_clt_close_work(struct work_struct *work)
2200 {
2201 struct rtrs_clt_sess *sess;
2202
2203 sess = container_of(work, struct rtrs_clt_sess, close_work);
2204
2205 cancel_delayed_work_sync(&sess->reconnect_dwork);
2206 rtrs_clt_stop_and_destroy_conns(sess);
2207 rtrs_clt_change_state(sess, RTRS_CLT_CLOSED);
2208 }
2209
init_conns(struct rtrs_clt_sess * sess)2210 static int init_conns(struct rtrs_clt_sess *sess)
2211 {
2212 unsigned int cid;
2213 int err;
2214
2215 /*
2216 * On every new session connections increase reconnect counter
2217 * to avoid clashes with previous sessions not yet closed
2218 * sessions on a server side.
2219 */
2220 sess->s.recon_cnt++;
2221
2222 /* Establish all RDMA connections */
2223 for (cid = 0; cid < sess->s.con_num; cid++) {
2224 err = create_con(sess, cid);
2225 if (err)
2226 goto destroy;
2227
2228 err = create_cm(to_clt_con(sess->s.con[cid]));
2229 if (err) {
2230 destroy_con(to_clt_con(sess->s.con[cid]));
2231 goto destroy;
2232 }
2233 }
2234 err = alloc_sess_reqs(sess);
2235 if (err)
2236 goto destroy;
2237
2238 rtrs_clt_start_hb(sess);
2239
2240 return 0;
2241
2242 destroy:
2243 while (cid--) {
2244 struct rtrs_clt_con *con = to_clt_con(sess->s.con[cid]);
2245
2246 stop_cm(con);
2247 destroy_con_cq_qp(con);
2248 destroy_cm(con);
2249 destroy_con(con);
2250 }
2251 /*
2252 * If we've never taken async path and got an error, say,
2253 * doing rdma_resolve_addr(), switch to CONNECTION_ERR state
2254 * manually to keep reconnecting.
2255 */
2256 rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING_ERR);
2257
2258 return err;
2259 }
2260
rtrs_clt_info_req_done(struct ib_cq * cq,struct ib_wc * wc)2261 static void rtrs_clt_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
2262 {
2263 struct rtrs_clt_con *con = cq->cq_context;
2264 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
2265 struct rtrs_iu *iu;
2266
2267 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
2268 rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
2269
2270 if (unlikely(wc->status != IB_WC_SUCCESS)) {
2271 rtrs_err(sess->clt, "Sess info request send failed: %s\n",
2272 ib_wc_status_msg(wc->status));
2273 rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING_ERR);
2274 return;
2275 }
2276
2277 rtrs_clt_update_wc_stats(con);
2278 }
2279
process_info_rsp(struct rtrs_clt_sess * sess,const struct rtrs_msg_info_rsp * msg)2280 static int process_info_rsp(struct rtrs_clt_sess *sess,
2281 const struct rtrs_msg_info_rsp *msg)
2282 {
2283 unsigned int sg_cnt, total_len;
2284 int i, sgi;
2285
2286 sg_cnt = le16_to_cpu(msg->sg_cnt);
2287 if (unlikely(!sg_cnt))
2288 return -EINVAL;
2289 /*
2290 * Check if IB immediate data size is enough to hold the mem_id and
2291 * the offset inside the memory chunk.
2292 */
2293 if (unlikely((ilog2(sg_cnt - 1) + 1) +
2294 (ilog2(sess->chunk_size - 1) + 1) >
2295 MAX_IMM_PAYL_BITS)) {
2296 rtrs_err(sess->clt,
2297 "RDMA immediate size (%db) not enough to encode %d buffers of size %dB\n",
2298 MAX_IMM_PAYL_BITS, sg_cnt, sess->chunk_size);
2299 return -EINVAL;
2300 }
2301 if (unlikely(!sg_cnt || (sess->queue_depth % sg_cnt))) {
2302 rtrs_err(sess->clt, "Incorrect sg_cnt %d, is not multiple\n",
2303 sg_cnt);
2304 return -EINVAL;
2305 }
2306 total_len = 0;
2307 for (sgi = 0, i = 0; sgi < sg_cnt && i < sess->queue_depth; sgi++) {
2308 const struct rtrs_sg_desc *desc = &msg->desc[sgi];
2309 u32 len, rkey;
2310 u64 addr;
2311
2312 addr = le64_to_cpu(desc->addr);
2313 rkey = le32_to_cpu(desc->key);
2314 len = le32_to_cpu(desc->len);
2315
2316 total_len += len;
2317
2318 if (unlikely(!len || (len % sess->chunk_size))) {
2319 rtrs_err(sess->clt, "Incorrect [%d].len %d\n", sgi,
2320 len);
2321 return -EINVAL;
2322 }
2323 for ( ; len && i < sess->queue_depth; i++) {
2324 sess->rbufs[i].addr = addr;
2325 sess->rbufs[i].rkey = rkey;
2326
2327 len -= sess->chunk_size;
2328 addr += sess->chunk_size;
2329 }
2330 }
2331 /* Sanity check */
2332 if (unlikely(sgi != sg_cnt || i != sess->queue_depth)) {
2333 rtrs_err(sess->clt, "Incorrect sg vector, not fully mapped\n");
2334 return -EINVAL;
2335 }
2336 if (unlikely(total_len != sess->chunk_size * sess->queue_depth)) {
2337 rtrs_err(sess->clt, "Incorrect total_len %d\n", total_len);
2338 return -EINVAL;
2339 }
2340
2341 return 0;
2342 }
2343
rtrs_clt_info_rsp_done(struct ib_cq * cq,struct ib_wc * wc)2344 static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
2345 {
2346 struct rtrs_clt_con *con = cq->cq_context;
2347 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
2348 struct rtrs_msg_info_rsp *msg;
2349 enum rtrs_clt_state state;
2350 struct rtrs_iu *iu;
2351 size_t rx_sz;
2352 int err;
2353
2354 state = RTRS_CLT_CONNECTING_ERR;
2355
2356 WARN_ON(con->c.cid);
2357 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
2358 if (unlikely(wc->status != IB_WC_SUCCESS)) {
2359 rtrs_err(sess->clt, "Sess info response recv failed: %s\n",
2360 ib_wc_status_msg(wc->status));
2361 goto out;
2362 }
2363 WARN_ON(wc->opcode != IB_WC_RECV);
2364
2365 if (unlikely(wc->byte_len < sizeof(*msg))) {
2366 rtrs_err(sess->clt, "Sess info response is malformed: size %d\n",
2367 wc->byte_len);
2368 goto out;
2369 }
2370 ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr,
2371 iu->size, DMA_FROM_DEVICE);
2372 msg = iu->buf;
2373 if (unlikely(le16_to_cpu(msg->type) != RTRS_MSG_INFO_RSP)) {
2374 rtrs_err(sess->clt, "Sess info response is malformed: type %d\n",
2375 le16_to_cpu(msg->type));
2376 goto out;
2377 }
2378 rx_sz = sizeof(*msg);
2379 rx_sz += sizeof(msg->desc[0]) * le16_to_cpu(msg->sg_cnt);
2380 if (unlikely(wc->byte_len < rx_sz)) {
2381 rtrs_err(sess->clt, "Sess info response is malformed: size %d\n",
2382 wc->byte_len);
2383 goto out;
2384 }
2385 err = process_info_rsp(sess, msg);
2386 if (unlikely(err))
2387 goto out;
2388
2389 err = post_recv_sess(sess);
2390 if (unlikely(err))
2391 goto out;
2392
2393 state = RTRS_CLT_CONNECTED;
2394
2395 out:
2396 rtrs_clt_update_wc_stats(con);
2397 rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
2398 rtrs_clt_change_state(sess, state);
2399 }
2400
rtrs_send_sess_info(struct rtrs_clt_sess * sess)2401 static int rtrs_send_sess_info(struct rtrs_clt_sess *sess)
2402 {
2403 struct rtrs_clt_con *usr_con = to_clt_con(sess->s.con[0]);
2404 struct rtrs_msg_info_req *msg;
2405 struct rtrs_iu *tx_iu, *rx_iu;
2406 size_t rx_sz;
2407 int err;
2408
2409 rx_sz = sizeof(struct rtrs_msg_info_rsp);
2410 rx_sz += sizeof(u64) * MAX_SESS_QUEUE_DEPTH;
2411
2412 tx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req), GFP_KERNEL,
2413 sess->s.dev->ib_dev, DMA_TO_DEVICE,
2414 rtrs_clt_info_req_done);
2415 rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, sess->s.dev->ib_dev,
2416 DMA_FROM_DEVICE, rtrs_clt_info_rsp_done);
2417 if (unlikely(!tx_iu || !rx_iu)) {
2418 err = -ENOMEM;
2419 goto out;
2420 }
2421 /* Prepare for getting info response */
2422 err = rtrs_iu_post_recv(&usr_con->c, rx_iu);
2423 if (unlikely(err)) {
2424 rtrs_err(sess->clt, "rtrs_iu_post_recv(), err: %d\n", err);
2425 goto out;
2426 }
2427 rx_iu = NULL;
2428
2429 msg = tx_iu->buf;
2430 msg->type = cpu_to_le16(RTRS_MSG_INFO_REQ);
2431 memcpy(msg->sessname, sess->s.sessname, sizeof(msg->sessname));
2432
2433 ib_dma_sync_single_for_device(sess->s.dev->ib_dev, tx_iu->dma_addr,
2434 tx_iu->size, DMA_TO_DEVICE);
2435
2436 /* Send info request */
2437 err = rtrs_iu_post_send(&usr_con->c, tx_iu, sizeof(*msg), NULL);
2438 if (unlikely(err)) {
2439 rtrs_err(sess->clt, "rtrs_iu_post_send(), err: %d\n", err);
2440 goto out;
2441 }
2442 tx_iu = NULL;
2443
2444 /* Wait for state change */
2445 wait_event_interruptible_timeout(sess->state_wq,
2446 sess->state != RTRS_CLT_CONNECTING,
2447 msecs_to_jiffies(
2448 RTRS_CONNECT_TIMEOUT_MS));
2449 if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)) {
2450 if (READ_ONCE(sess->state) == RTRS_CLT_CONNECTING_ERR)
2451 err = -ECONNRESET;
2452 else
2453 err = -ETIMEDOUT;
2454 goto out;
2455 }
2456
2457 out:
2458 if (tx_iu)
2459 rtrs_iu_free(tx_iu, sess->s.dev->ib_dev, 1);
2460 if (rx_iu)
2461 rtrs_iu_free(rx_iu, sess->s.dev->ib_dev, 1);
2462 if (unlikely(err))
2463 /* If we've never taken async path because of malloc problems */
2464 rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING_ERR);
2465
2466 return err;
2467 }
2468
2469 /**
2470 * init_sess() - establishes all session connections and does handshake
2471 * @sess: client session.
2472 * In case of error full close or reconnect procedure should be taken,
2473 * because reconnect or close async works can be started.
2474 */
init_sess(struct rtrs_clt_sess * sess)2475 static int init_sess(struct rtrs_clt_sess *sess)
2476 {
2477 int err;
2478
2479 mutex_lock(&sess->init_mutex);
2480 err = init_conns(sess);
2481 if (err) {
2482 rtrs_err(sess->clt, "init_conns(), err: %d\n", err);
2483 goto out;
2484 }
2485 err = rtrs_send_sess_info(sess);
2486 if (err) {
2487 rtrs_err(sess->clt, "rtrs_send_sess_info(), err: %d\n", err);
2488 goto out;
2489 }
2490 rtrs_clt_sess_up(sess);
2491 out:
2492 mutex_unlock(&sess->init_mutex);
2493
2494 return err;
2495 }
2496
rtrs_clt_reconnect_work(struct work_struct * work)2497 static void rtrs_clt_reconnect_work(struct work_struct *work)
2498 {
2499 struct rtrs_clt_sess *sess;
2500 struct rtrs_clt *clt;
2501 unsigned int delay_ms;
2502 int err;
2503
2504 sess = container_of(to_delayed_work(work), struct rtrs_clt_sess,
2505 reconnect_dwork);
2506 clt = sess->clt;
2507
2508 if (READ_ONCE(sess->state) != RTRS_CLT_RECONNECTING)
2509 return;
2510
2511 if (sess->reconnect_attempts >= clt->max_reconnect_attempts) {
2512 /* Close a session completely if max attempts is reached */
2513 rtrs_clt_close_conns(sess, false);
2514 return;
2515 }
2516 sess->reconnect_attempts++;
2517
2518 /* Stop everything */
2519 rtrs_clt_stop_and_destroy_conns(sess);
2520 msleep(RTRS_RECONNECT_BACKOFF);
2521 if (rtrs_clt_change_state(sess, RTRS_CLT_CONNECTING)) {
2522 err = init_sess(sess);
2523 if (err)
2524 goto reconnect_again;
2525 }
2526
2527 return;
2528
2529 reconnect_again:
2530 if (rtrs_clt_change_state(sess, RTRS_CLT_RECONNECTING)) {
2531 sess->stats->reconnects.fail_cnt++;
2532 delay_ms = clt->reconnect_delay_sec * 1000;
2533 queue_delayed_work(rtrs_wq, &sess->reconnect_dwork,
2534 msecs_to_jiffies(delay_ms +
2535 prandom_u32() %
2536 RTRS_RECONNECT_SEED));
2537 }
2538 }
2539
rtrs_clt_dev_release(struct device * dev)2540 static void rtrs_clt_dev_release(struct device *dev)
2541 {
2542 struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev);
2543
2544 mutex_destroy(&clt->paths_ev_mutex);
2545 mutex_destroy(&clt->paths_mutex);
2546 kfree(clt);
2547 }
2548
alloc_clt(const char * sessname,size_t paths_num,u16 port,size_t pdu_sz,void * priv,void (* link_ev)(void * priv,enum rtrs_clt_link_ev ev),unsigned int max_segments,size_t max_segment_size,unsigned int reconnect_delay_sec,unsigned int max_reconnect_attempts)2549 static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num,
2550 u16 port, size_t pdu_sz, void *priv,
2551 void (*link_ev)(void *priv,
2552 enum rtrs_clt_link_ev ev),
2553 unsigned int max_segments,
2554 size_t max_segment_size,
2555 unsigned int reconnect_delay_sec,
2556 unsigned int max_reconnect_attempts)
2557 {
2558 struct rtrs_clt *clt;
2559 int err;
2560
2561 if (!paths_num || paths_num > MAX_PATHS_NUM)
2562 return ERR_PTR(-EINVAL);
2563
2564 if (strlen(sessname) >= sizeof(clt->sessname))
2565 return ERR_PTR(-EINVAL);
2566
2567 clt = kzalloc(sizeof(*clt), GFP_KERNEL);
2568 if (!clt)
2569 return ERR_PTR(-ENOMEM);
2570
2571 clt->pcpu_path = alloc_percpu(typeof(*clt->pcpu_path));
2572 if (!clt->pcpu_path) {
2573 kfree(clt);
2574 return ERR_PTR(-ENOMEM);
2575 }
2576
2577 clt->dev.class = rtrs_clt_dev_class;
2578 clt->dev.release = rtrs_clt_dev_release;
2579 uuid_gen(&clt->paths_uuid);
2580 INIT_LIST_HEAD_RCU(&clt->paths_list);
2581 clt->paths_num = paths_num;
2582 clt->paths_up = MAX_PATHS_NUM;
2583 clt->port = port;
2584 clt->pdu_sz = pdu_sz;
2585 clt->max_segments = max_segments;
2586 clt->max_segment_size = max_segment_size;
2587 clt->reconnect_delay_sec = reconnect_delay_sec;
2588 clt->max_reconnect_attempts = max_reconnect_attempts;
2589 clt->priv = priv;
2590 clt->link_ev = link_ev;
2591 clt->mp_policy = MP_POLICY_MIN_INFLIGHT;
2592 strlcpy(clt->sessname, sessname, sizeof(clt->sessname));
2593 init_waitqueue_head(&clt->permits_wait);
2594 mutex_init(&clt->paths_ev_mutex);
2595 mutex_init(&clt->paths_mutex);
2596 device_initialize(&clt->dev);
2597
2598 err = dev_set_name(&clt->dev, "%s", sessname);
2599 if (err)
2600 goto err_put;
2601
2602 /*
2603 * Suppress user space notification until
2604 * sysfs files are created
2605 */
2606 dev_set_uevent_suppress(&clt->dev, true);
2607 err = device_add(&clt->dev);
2608 if (err)
2609 goto err_put;
2610
2611 clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj);
2612 if (!clt->kobj_paths) {
2613 err = -ENOMEM;
2614 goto err_del;
2615 }
2616 err = rtrs_clt_create_sysfs_root_files(clt);
2617 if (err) {
2618 kobject_del(clt->kobj_paths);
2619 kobject_put(clt->kobj_paths);
2620 goto err_del;
2621 }
2622 dev_set_uevent_suppress(&clt->dev, false);
2623 kobject_uevent(&clt->dev.kobj, KOBJ_ADD);
2624
2625 return clt;
2626 err_del:
2627 device_del(&clt->dev);
2628 err_put:
2629 free_percpu(clt->pcpu_path);
2630 put_device(&clt->dev);
2631 return ERR_PTR(err);
2632 }
2633
free_clt(struct rtrs_clt * clt)2634 static void free_clt(struct rtrs_clt *clt)
2635 {
2636 free_percpu(clt->pcpu_path);
2637
2638 /*
2639 * release callback will free clt and destroy mutexes in last put
2640 */
2641 device_unregister(&clt->dev);
2642 }
2643
2644 /**
2645 * rtrs_clt_open() - Open a session to an RTRS server
2646 * @ops: holds the link event callback and the private pointer.
2647 * @sessname: name of the session
2648 * @paths: Paths to be established defined by their src and dst addresses
2649 * @paths_num: Number of elements in the @paths array
2650 * @port: port to be used by the RTRS session
2651 * @pdu_sz: Size of extra payload which can be accessed after permit allocation.
2652 * @reconnect_delay_sec: time between reconnect tries
2653 * @max_segments: Max. number of segments per IO request
2654 * @max_segment_size: Max. size of one segment
2655 * @max_reconnect_attempts: Number of times to reconnect on error before giving
2656 * up, 0 for * disabled, -1 for forever
2657 *
2658 * Starts session establishment with the rtrs_server. The function can block
2659 * up to ~2000ms before it returns.
2660 *
2661 * Return a valid pointer on success otherwise PTR_ERR.
2662 */
rtrs_clt_open(struct rtrs_clt_ops * ops,const char * sessname,const struct rtrs_addr * paths,size_t paths_num,u16 port,size_t pdu_sz,u8 reconnect_delay_sec,u16 max_segments,size_t max_segment_size,s16 max_reconnect_attempts)2663 struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
2664 const char *sessname,
2665 const struct rtrs_addr *paths,
2666 size_t paths_num, u16 port,
2667 size_t pdu_sz, u8 reconnect_delay_sec,
2668 u16 max_segments,
2669 size_t max_segment_size,
2670 s16 max_reconnect_attempts)
2671 {
2672 struct rtrs_clt_sess *sess, *tmp;
2673 struct rtrs_clt *clt;
2674 int err, i;
2675
2676 clt = alloc_clt(sessname, paths_num, port, pdu_sz, ops->priv,
2677 ops->link_ev,
2678 max_segments, max_segment_size, reconnect_delay_sec,
2679 max_reconnect_attempts);
2680 if (IS_ERR(clt)) {
2681 err = PTR_ERR(clt);
2682 goto out;
2683 }
2684 for (i = 0; i < paths_num; i++) {
2685 struct rtrs_clt_sess *sess;
2686
2687 sess = alloc_sess(clt, &paths[i], nr_cpu_ids,
2688 max_segments, max_segment_size);
2689 if (IS_ERR(sess)) {
2690 err = PTR_ERR(sess);
2691 goto close_all_sess;
2692 }
2693 if (!i)
2694 sess->for_new_clt = 1;
2695 list_add_tail_rcu(&sess->s.entry, &clt->paths_list);
2696
2697 err = init_sess(sess);
2698 if (err) {
2699 list_del_rcu(&sess->s.entry);
2700 rtrs_clt_close_conns(sess, true);
2701 free_percpu(sess->stats->pcpu_stats);
2702 kfree(sess->stats);
2703 free_sess(sess);
2704 goto close_all_sess;
2705 }
2706
2707 err = rtrs_clt_create_sess_files(sess);
2708 if (err) {
2709 list_del_rcu(&sess->s.entry);
2710 rtrs_clt_close_conns(sess, true);
2711 free_percpu(sess->stats->pcpu_stats);
2712 kfree(sess->stats);
2713 free_sess(sess);
2714 goto close_all_sess;
2715 }
2716 }
2717 err = alloc_permits(clt);
2718 if (err)
2719 goto close_all_sess;
2720
2721 return clt;
2722
2723 close_all_sess:
2724 list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
2725 rtrs_clt_destroy_sess_files(sess, NULL);
2726 rtrs_clt_close_conns(sess, true);
2727 kobject_put(&sess->kobj);
2728 }
2729 rtrs_clt_destroy_sysfs_root_files(clt);
2730 rtrs_clt_destroy_sysfs_root_folders(clt);
2731 free_clt(clt);
2732
2733 out:
2734 return ERR_PTR(err);
2735 }
2736 EXPORT_SYMBOL(rtrs_clt_open);
2737
2738 /**
2739 * rtrs_clt_close() - Close a session
2740 * @clt: Session handle. Session is freed upon return.
2741 */
rtrs_clt_close(struct rtrs_clt * clt)2742 void rtrs_clt_close(struct rtrs_clt *clt)
2743 {
2744 struct rtrs_clt_sess *sess, *tmp;
2745
2746 /* Firstly forbid sysfs access */
2747 rtrs_clt_destroy_sysfs_root_files(clt);
2748 rtrs_clt_destroy_sysfs_root_folders(clt);
2749
2750 /* Now it is safe to iterate over all paths without locks */
2751 list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
2752 rtrs_clt_close_conns(sess, true);
2753 rtrs_clt_destroy_sess_files(sess, NULL);
2754 kobject_put(&sess->kobj);
2755 }
2756 free_permits(clt);
2757 free_clt(clt);
2758 }
2759 EXPORT_SYMBOL(rtrs_clt_close);
2760
rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess * sess)2761 int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess)
2762 {
2763 enum rtrs_clt_state old_state;
2764 int err = -EBUSY;
2765 bool changed;
2766
2767 changed = rtrs_clt_change_state_get_old(sess, RTRS_CLT_RECONNECTING,
2768 &old_state);
2769 if (changed) {
2770 sess->reconnect_attempts = 0;
2771 queue_delayed_work(rtrs_wq, &sess->reconnect_dwork, 0);
2772 }
2773 if (changed || old_state == RTRS_CLT_RECONNECTING) {
2774 /*
2775 * flush_delayed_work() queues pending work for immediate
2776 * execution, so do the flush if we have queued something
2777 * right now or work is pending.
2778 */
2779 flush_delayed_work(&sess->reconnect_dwork);
2780 err = (READ_ONCE(sess->state) ==
2781 RTRS_CLT_CONNECTED ? 0 : -ENOTCONN);
2782 }
2783
2784 return err;
2785 }
2786
rtrs_clt_disconnect_from_sysfs(struct rtrs_clt_sess * sess)2787 int rtrs_clt_disconnect_from_sysfs(struct rtrs_clt_sess *sess)
2788 {
2789 rtrs_clt_close_conns(sess, true);
2790
2791 return 0;
2792 }
2793
rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess * sess,const struct attribute * sysfs_self)2794 int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
2795 const struct attribute *sysfs_self)
2796 {
2797 enum rtrs_clt_state old_state;
2798 bool changed;
2799
2800 /*
2801 * Continue stopping path till state was changed to DEAD or
2802 * state was observed as DEAD:
2803 * 1. State was changed to DEAD - we were fast and nobody
2804 * invoked rtrs_clt_reconnect(), which can again start
2805 * reconnecting.
2806 * 2. State was observed as DEAD - we have someone in parallel
2807 * removing the path.
2808 */
2809 do {
2810 rtrs_clt_close_conns(sess, true);
2811 changed = rtrs_clt_change_state_get_old(sess,
2812 RTRS_CLT_DEAD,
2813 &old_state);
2814 } while (!changed && old_state != RTRS_CLT_DEAD);
2815
2816 if (likely(changed)) {
2817 rtrs_clt_remove_path_from_arr(sess);
2818 rtrs_clt_destroy_sess_files(sess, sysfs_self);
2819 kobject_put(&sess->kobj);
2820 }
2821
2822 return 0;
2823 }
2824
rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt * clt,int value)2825 void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt *clt, int value)
2826 {
2827 clt->max_reconnect_attempts = (unsigned int)value;
2828 }
2829
rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt * clt)2830 int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt *clt)
2831 {
2832 return (int)clt->max_reconnect_attempts;
2833 }
2834
2835 /**
2836 * rtrs_clt_request() - Request data transfer to/from server via RDMA.
2837 *
2838 * @dir: READ/WRITE
2839 * @ops: callback function to be called as confirmation, and the pointer.
2840 * @clt: Session
2841 * @permit: Preallocated permit
2842 * @vec: Message that is sent to server together with the request.
2843 * Sum of len of all @vec elements limited to <= IO_MSG_SIZE.
2844 * Since the msg is copied internally it can be allocated on stack.
2845 * @nr: Number of elements in @vec.
2846 * @data_len: length of data sent to/from server
2847 * @sg: Pages to be sent/received to/from server.
2848 * @sg_cnt: Number of elements in the @sg
2849 *
2850 * Return:
2851 * 0: Success
2852 * <0: Error
2853 *
2854 * On dir=READ rtrs client will request a data transfer from Server to client.
2855 * The data that the server will respond with will be stored in @sg when
2856 * the user receives an %RTRS_CLT_RDMA_EV_RDMA_REQUEST_WRITE_COMPL event.
2857 * On dir=WRITE rtrs client will rdma write data in sg to server side.
2858 */
rtrs_clt_request(int dir,struct rtrs_clt_req_ops * ops,struct rtrs_clt * clt,struct rtrs_permit * permit,const struct kvec * vec,size_t nr,size_t data_len,struct scatterlist * sg,unsigned int sg_cnt)2859 int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops,
2860 struct rtrs_clt *clt, struct rtrs_permit *permit,
2861 const struct kvec *vec, size_t nr, size_t data_len,
2862 struct scatterlist *sg, unsigned int sg_cnt)
2863 {
2864 struct rtrs_clt_io_req *req;
2865 struct rtrs_clt_sess *sess;
2866
2867 enum dma_data_direction dma_dir;
2868 int err = -ECONNABORTED, i;
2869 size_t usr_len, hdr_len;
2870 struct path_it it;
2871
2872 /* Get kvec length */
2873 for (i = 0, usr_len = 0; i < nr; i++)
2874 usr_len += vec[i].iov_len;
2875
2876 if (dir == READ) {
2877 hdr_len = sizeof(struct rtrs_msg_rdma_read) +
2878 sg_cnt * sizeof(struct rtrs_sg_desc);
2879 dma_dir = DMA_FROM_DEVICE;
2880 } else {
2881 hdr_len = sizeof(struct rtrs_msg_rdma_write);
2882 dma_dir = DMA_TO_DEVICE;
2883 }
2884
2885 rcu_read_lock();
2886 for (path_it_init(&it, clt);
2887 (sess = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
2888 if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED))
2889 continue;
2890
2891 if (unlikely(usr_len + hdr_len > sess->max_hdr_size)) {
2892 rtrs_wrn_rl(sess->clt,
2893 "%s request failed, user message size is %zu and header length %zu, but max size is %u\n",
2894 dir == READ ? "Read" : "Write",
2895 usr_len, hdr_len, sess->max_hdr_size);
2896 err = -EMSGSIZE;
2897 break;
2898 }
2899 req = rtrs_clt_get_req(sess, ops->conf_fn, permit, ops->priv,
2900 vec, usr_len, sg, sg_cnt, data_len,
2901 dma_dir);
2902 if (dir == READ)
2903 err = rtrs_clt_read_req(req);
2904 else
2905 err = rtrs_clt_write_req(req);
2906 if (unlikely(err)) {
2907 req->in_use = false;
2908 continue;
2909 }
2910 /* Success path */
2911 break;
2912 }
2913 path_it_deinit(&it);
2914 rcu_read_unlock();
2915
2916 return err;
2917 }
2918 EXPORT_SYMBOL(rtrs_clt_request);
2919
2920 /**
2921 * rtrs_clt_query() - queries RTRS session attributes
2922 *@clt: session pointer
2923 *@attr: query results for session attributes.
2924 * Returns:
2925 * 0 on success
2926 * -ECOMM no connection to the server
2927 */
rtrs_clt_query(struct rtrs_clt * clt,struct rtrs_attrs * attr)2928 int rtrs_clt_query(struct rtrs_clt *clt, struct rtrs_attrs *attr)
2929 {
2930 if (!rtrs_clt_is_connected(clt))
2931 return -ECOMM;
2932
2933 attr->queue_depth = clt->queue_depth;
2934 attr->max_io_size = clt->max_io_size;
2935 attr->sess_kobj = &clt->dev.kobj;
2936 strlcpy(attr->sessname, clt->sessname, sizeof(attr->sessname));
2937
2938 return 0;
2939 }
2940 EXPORT_SYMBOL(rtrs_clt_query);
2941
rtrs_clt_create_path_from_sysfs(struct rtrs_clt * clt,struct rtrs_addr * addr)2942 int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
2943 struct rtrs_addr *addr)
2944 {
2945 struct rtrs_clt_sess *sess;
2946 int err;
2947
2948 sess = alloc_sess(clt, addr, nr_cpu_ids, clt->max_segments,
2949 clt->max_segment_size);
2950 if (IS_ERR(sess))
2951 return PTR_ERR(sess);
2952
2953 /*
2954 * It is totally safe to add path in CONNECTING state: coming
2955 * IO will never grab it. Also it is very important to add
2956 * path before init, since init fires LINK_CONNECTED event.
2957 */
2958 rtrs_clt_add_path_to_arr(sess, addr);
2959
2960 err = init_sess(sess);
2961 if (err)
2962 goto close_sess;
2963
2964 err = rtrs_clt_create_sess_files(sess);
2965 if (err)
2966 goto close_sess;
2967
2968 return 0;
2969
2970 close_sess:
2971 rtrs_clt_remove_path_from_arr(sess);
2972 rtrs_clt_close_conns(sess, true);
2973 free_percpu(sess->stats->pcpu_stats);
2974 kfree(sess->stats);
2975 free_sess(sess);
2976
2977 return err;
2978 }
2979
rtrs_clt_ib_dev_init(struct rtrs_ib_dev * dev)2980 static int rtrs_clt_ib_dev_init(struct rtrs_ib_dev *dev)
2981 {
2982 if (!(dev->ib_dev->attrs.device_cap_flags &
2983 IB_DEVICE_MEM_MGT_EXTENSIONS)) {
2984 pr_err("Memory registrations not supported.\n");
2985 return -ENOTSUPP;
2986 }
2987
2988 return 0;
2989 }
2990
2991 static const struct rtrs_rdma_dev_pd_ops dev_pd_ops = {
2992 .init = rtrs_clt_ib_dev_init
2993 };
2994
rtrs_client_init(void)2995 static int __init rtrs_client_init(void)
2996 {
2997 rtrs_rdma_dev_pd_init(0, &dev_pd);
2998
2999 rtrs_clt_dev_class = class_create(THIS_MODULE, "rtrs-client");
3000 if (IS_ERR(rtrs_clt_dev_class)) {
3001 pr_err("Failed to create rtrs-client dev class\n");
3002 return PTR_ERR(rtrs_clt_dev_class);
3003 }
3004 rtrs_wq = alloc_workqueue("rtrs_client_wq", 0, 0);
3005 if (!rtrs_wq) {
3006 class_destroy(rtrs_clt_dev_class);
3007 return -ENOMEM;
3008 }
3009
3010 return 0;
3011 }
3012
rtrs_client_exit(void)3013 static void __exit rtrs_client_exit(void)
3014 {
3015 destroy_workqueue(rtrs_wq);
3016 class_destroy(rtrs_clt_dev_class);
3017 rtrs_rdma_dev_pd_deinit(&dev_pd);
3018 }
3019
3020 module_init(rtrs_client_init);
3021 module_exit(rtrs_client_exit);
3022