• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * RDMA Transport Layer
4  *
5  * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
6  * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
7  * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
8  */
9 
10 #undef pr_fmt
11 #define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
12 
13 #include <linux/module.h>
14 #include <linux/rculist.h>
15 #include <linux/random.h>
16 
17 #include "rtrs-clt.h"
18 #include "rtrs-log.h"
19 #include "rtrs-clt-trace.h"
20 
21 #define RTRS_CONNECT_TIMEOUT_MS 30000
22 /*
23  * Wait a bit before trying to reconnect after a failure
24  * in order to give server time to finish clean up which
25  * leads to "false positives" failed reconnect attempts
26  */
27 #define RTRS_RECONNECT_BACKOFF 1000
28 /*
29  * Wait for additional random time between 0 and 8 seconds
30  * before starting to reconnect to avoid clients reconnecting
31  * all at once in case of a major network outage
32  */
33 #define RTRS_RECONNECT_SEED 8
34 
35 #define FIRST_CONN 0x01
36 /* limit to 128 * 4k = 512k max IO */
37 #define RTRS_MAX_SEGMENTS          128
38 
39 MODULE_DESCRIPTION("RDMA Transport Client");
40 MODULE_LICENSE("GPL");
41 
42 static const struct rtrs_rdma_dev_pd_ops dev_pd_ops;
43 static struct rtrs_rdma_dev_pd dev_pd = {
44 	.ops = &dev_pd_ops
45 };
46 
47 static struct workqueue_struct *rtrs_wq;
48 static const struct class rtrs_clt_dev_class = {
49 	.name = "rtrs-client",
50 };
51 
rtrs_clt_is_connected(const struct rtrs_clt_sess * clt)52 static inline bool rtrs_clt_is_connected(const struct rtrs_clt_sess *clt)
53 {
54 	struct rtrs_clt_path *clt_path;
55 	bool connected = false;
56 
57 	rcu_read_lock();
58 	list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry)
59 		if (READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED) {
60 			connected = true;
61 			break;
62 		}
63 	rcu_read_unlock();
64 
65 	return connected;
66 }
67 
68 static struct rtrs_permit *
__rtrs_get_permit(struct rtrs_clt_sess * clt,enum rtrs_clt_con_type con_type)69 __rtrs_get_permit(struct rtrs_clt_sess *clt, enum rtrs_clt_con_type con_type)
70 {
71 	size_t max_depth = clt->queue_depth;
72 	struct rtrs_permit *permit;
73 	int bit;
74 
75 	/*
76 	 * Adapted from null_blk get_tag(). Callers from different cpus may
77 	 * grab the same bit, since find_first_zero_bit is not atomic.
78 	 * But then the test_and_set_bit_lock will fail for all the
79 	 * callers but one, so that they will loop again.
80 	 * This way an explicit spinlock is not required.
81 	 */
82 	do {
83 		bit = find_first_zero_bit(clt->permits_map, max_depth);
84 		if (bit >= max_depth)
85 			return NULL;
86 	} while (test_and_set_bit_lock(bit, clt->permits_map));
87 
88 	permit = get_permit(clt, bit);
89 	WARN_ON(permit->mem_id != bit);
90 	permit->cpu_id = raw_smp_processor_id();
91 	permit->con_type = con_type;
92 
93 	return permit;
94 }
95 
__rtrs_put_permit(struct rtrs_clt_sess * clt,struct rtrs_permit * permit)96 static inline void __rtrs_put_permit(struct rtrs_clt_sess *clt,
97 				      struct rtrs_permit *permit)
98 {
99 	clear_bit_unlock(permit->mem_id, clt->permits_map);
100 }
101 
102 /**
103  * rtrs_clt_get_permit() - allocates permit for future RDMA operation
104  * @clt:	Current session
105  * @con_type:	Type of connection to use with the permit
106  * @can_wait:	Wait type
107  *
108  * Description:
109  *    Allocates permit for the following RDMA operation.  Permit is used
110  *    to preallocate all resources and to propagate memory pressure
111  *    up earlier.
112  *
113  * Context:
114  *    Can sleep if @wait == RTRS_PERMIT_WAIT
115  */
rtrs_clt_get_permit(struct rtrs_clt_sess * clt,enum rtrs_clt_con_type con_type,enum wait_type can_wait)116 struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt_sess *clt,
117 					  enum rtrs_clt_con_type con_type,
118 					  enum wait_type can_wait)
119 {
120 	struct rtrs_permit *permit;
121 	DEFINE_WAIT(wait);
122 
123 	permit = __rtrs_get_permit(clt, con_type);
124 	if (permit || !can_wait)
125 		return permit;
126 
127 	do {
128 		prepare_to_wait(&clt->permits_wait, &wait,
129 				TASK_UNINTERRUPTIBLE);
130 		permit = __rtrs_get_permit(clt, con_type);
131 		if (permit)
132 			break;
133 
134 		io_schedule();
135 	} while (1);
136 
137 	finish_wait(&clt->permits_wait, &wait);
138 
139 	return permit;
140 }
141 EXPORT_SYMBOL(rtrs_clt_get_permit);
142 
143 /**
144  * rtrs_clt_put_permit() - puts allocated permit
145  * @clt:	Current session
146  * @permit:	Permit to be freed
147  *
148  * Context:
149  *    Does not matter
150  */
rtrs_clt_put_permit(struct rtrs_clt_sess * clt,struct rtrs_permit * permit)151 void rtrs_clt_put_permit(struct rtrs_clt_sess *clt,
152 			 struct rtrs_permit *permit)
153 {
154 	if (WARN_ON(!test_bit(permit->mem_id, clt->permits_map)))
155 		return;
156 
157 	__rtrs_put_permit(clt, permit);
158 
159 	/*
160 	 * rtrs_clt_get_permit() adds itself to the &clt->permits_wait list
161 	 * before calling schedule(). So if rtrs_clt_get_permit() is sleeping
162 	 * it must have added itself to &clt->permits_wait before
163 	 * __rtrs_put_permit() finished.
164 	 * Hence it is safe to guard wake_up() with a waitqueue_active() test.
165 	 */
166 	if (waitqueue_active(&clt->permits_wait))
167 		wake_up(&clt->permits_wait);
168 }
169 EXPORT_SYMBOL(rtrs_clt_put_permit);
170 
171 /**
172  * rtrs_permit_to_clt_con() - returns RDMA connection pointer by the permit
173  * @clt_path: client path pointer
174  * @permit: permit for the allocation of the RDMA buffer
175  * Note:
176  *     IO connection starts from 1.
177  *     0 connection is for user messages.
178  */
179 static
rtrs_permit_to_clt_con(struct rtrs_clt_path * clt_path,struct rtrs_permit * permit)180 struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_path *clt_path,
181 					    struct rtrs_permit *permit)
182 {
183 	int id = 0;
184 
185 	if (permit->con_type == RTRS_IO_CON)
186 		id = (permit->cpu_id % (clt_path->s.irq_con_num - 1)) + 1;
187 
188 	return to_clt_con(clt_path->s.con[id]);
189 }
190 
191 /**
192  * rtrs_clt_change_state() - change the session state through session state
193  * machine.
194  *
195  * @clt_path: client path to change the state of.
196  * @new_state: state to change to.
197  *
198  * returns true if sess's state is changed to new state, otherwise return false.
199  *
200  * Locks:
201  * state_wq lock must be hold.
202  */
rtrs_clt_change_state(struct rtrs_clt_path * clt_path,enum rtrs_clt_state new_state)203 static bool rtrs_clt_change_state(struct rtrs_clt_path *clt_path,
204 				     enum rtrs_clt_state new_state)
205 {
206 	enum rtrs_clt_state old_state;
207 	bool changed = false;
208 
209 	lockdep_assert_held(&clt_path->state_wq.lock);
210 
211 	old_state = clt_path->state;
212 	switch (new_state) {
213 	case RTRS_CLT_CONNECTING:
214 		switch (old_state) {
215 		case RTRS_CLT_RECONNECTING:
216 			changed = true;
217 			fallthrough;
218 		default:
219 			break;
220 		}
221 		break;
222 	case RTRS_CLT_RECONNECTING:
223 		switch (old_state) {
224 		case RTRS_CLT_CONNECTED:
225 		case RTRS_CLT_CONNECTING_ERR:
226 		case RTRS_CLT_CLOSED:
227 			changed = true;
228 			fallthrough;
229 		default:
230 			break;
231 		}
232 		break;
233 	case RTRS_CLT_CONNECTED:
234 		switch (old_state) {
235 		case RTRS_CLT_CONNECTING:
236 			changed = true;
237 			fallthrough;
238 		default:
239 			break;
240 		}
241 		break;
242 	case RTRS_CLT_CONNECTING_ERR:
243 		switch (old_state) {
244 		case RTRS_CLT_CONNECTING:
245 			changed = true;
246 			fallthrough;
247 		default:
248 			break;
249 		}
250 		break;
251 	case RTRS_CLT_CLOSING:
252 		switch (old_state) {
253 		case RTRS_CLT_CONNECTING:
254 		case RTRS_CLT_CONNECTING_ERR:
255 		case RTRS_CLT_RECONNECTING:
256 		case RTRS_CLT_CONNECTED:
257 			changed = true;
258 			fallthrough;
259 		default:
260 			break;
261 		}
262 		break;
263 	case RTRS_CLT_CLOSED:
264 		switch (old_state) {
265 		case RTRS_CLT_CLOSING:
266 			changed = true;
267 			fallthrough;
268 		default:
269 			break;
270 		}
271 		break;
272 	case RTRS_CLT_DEAD:
273 		switch (old_state) {
274 		case RTRS_CLT_CLOSED:
275 			changed = true;
276 			fallthrough;
277 		default:
278 			break;
279 		}
280 		break;
281 	default:
282 		break;
283 	}
284 	if (changed) {
285 		clt_path->state = new_state;
286 		wake_up_locked(&clt_path->state_wq);
287 	}
288 
289 	return changed;
290 }
291 
rtrs_clt_change_state_from_to(struct rtrs_clt_path * clt_path,enum rtrs_clt_state old_state,enum rtrs_clt_state new_state)292 static bool rtrs_clt_change_state_from_to(struct rtrs_clt_path *clt_path,
293 					   enum rtrs_clt_state old_state,
294 					   enum rtrs_clt_state new_state)
295 {
296 	bool changed = false;
297 
298 	spin_lock_irq(&clt_path->state_wq.lock);
299 	if (clt_path->state == old_state)
300 		changed = rtrs_clt_change_state(clt_path, new_state);
301 	spin_unlock_irq(&clt_path->state_wq.lock);
302 
303 	return changed;
304 }
305 
306 static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_path *clt_path);
rtrs_rdma_error_recovery(struct rtrs_clt_con * con)307 static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con)
308 {
309 	struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
310 
311 	trace_rtrs_rdma_error_recovery(clt_path);
312 
313 	if (rtrs_clt_change_state_from_to(clt_path,
314 					   RTRS_CLT_CONNECTED,
315 					   RTRS_CLT_RECONNECTING)) {
316 		queue_work(rtrs_wq, &clt_path->err_recovery_work);
317 	} else {
318 		/*
319 		 * Error can happen just on establishing new connection,
320 		 * so notify waiter with error state, waiter is responsible
321 		 * for cleaning the rest and reconnect if needed.
322 		 */
323 		rtrs_clt_change_state_from_to(clt_path,
324 					       RTRS_CLT_CONNECTING,
325 					       RTRS_CLT_CONNECTING_ERR);
326 	}
327 }
328 
rtrs_clt_fast_reg_done(struct ib_cq * cq,struct ib_wc * wc)329 static void rtrs_clt_fast_reg_done(struct ib_cq *cq, struct ib_wc *wc)
330 {
331 	struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
332 
333 	if (wc->status != IB_WC_SUCCESS) {
334 		rtrs_err(con->c.path, "Failed IB_WR_REG_MR: %s\n",
335 			  ib_wc_status_msg(wc->status));
336 		rtrs_rdma_error_recovery(con);
337 	}
338 }
339 
340 static struct ib_cqe fast_reg_cqe = {
341 	.done = rtrs_clt_fast_reg_done
342 };
343 
344 static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
345 			      bool notify, bool can_wait);
346 
rtrs_clt_inv_rkey_done(struct ib_cq * cq,struct ib_wc * wc)347 static void rtrs_clt_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
348 {
349 	struct rtrs_clt_io_req *req =
350 		container_of(wc->wr_cqe, typeof(*req), inv_cqe);
351 	struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
352 
353 	if (wc->status != IB_WC_SUCCESS) {
354 		rtrs_err(con->c.path, "Failed IB_WR_LOCAL_INV: %s\n",
355 			  ib_wc_status_msg(wc->status));
356 		rtrs_rdma_error_recovery(con);
357 	}
358 	req->need_inv = false;
359 	if (req->need_inv_comp)
360 		complete(&req->inv_comp);
361 	else
362 		/* Complete request from INV callback */
363 		complete_rdma_req(req, req->inv_errno, true, false);
364 }
365 
rtrs_inv_rkey(struct rtrs_clt_io_req * req)366 static int rtrs_inv_rkey(struct rtrs_clt_io_req *req)
367 {
368 	struct rtrs_clt_con *con = req->con;
369 	struct ib_send_wr wr = {
370 		.opcode		    = IB_WR_LOCAL_INV,
371 		.wr_cqe		    = &req->inv_cqe,
372 		.send_flags	    = IB_SEND_SIGNALED,
373 		.ex.invalidate_rkey = req->mr->rkey,
374 	};
375 	req->inv_cqe.done = rtrs_clt_inv_rkey_done;
376 
377 	return ib_post_send(con->c.qp, &wr, NULL);
378 }
379 
complete_rdma_req(struct rtrs_clt_io_req * req,int errno,bool notify,bool can_wait)380 static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
381 			      bool notify, bool can_wait)
382 {
383 	struct rtrs_clt_con *con = req->con;
384 	struct rtrs_clt_path *clt_path;
385 	int err;
386 
387 	if (!req->in_use)
388 		return;
389 	if (WARN_ON(!req->con))
390 		return;
391 	clt_path = to_clt_path(con->c.path);
392 
393 	if (req->sg_cnt) {
394 		if (req->dir == DMA_FROM_DEVICE && req->need_inv) {
395 			/*
396 			 * We are here to invalidate read requests
397 			 * ourselves.  In normal scenario server should
398 			 * send INV for all read requests, but
399 			 * we are here, thus two things could happen:
400 			 *
401 			 *    1.  this is failover, when errno != 0
402 			 *        and can_wait == 1,
403 			 *
404 			 *    2.  something totally bad happened and
405 			 *        server forgot to send INV, so we
406 			 *        should do that ourselves.
407 			 */
408 
409 			if (can_wait) {
410 				req->need_inv_comp = true;
411 			} else {
412 				/* This should be IO path, so always notify */
413 				WARN_ON(!notify);
414 				/* Save errno for INV callback */
415 				req->inv_errno = errno;
416 			}
417 
418 			refcount_inc(&req->ref);
419 			err = rtrs_inv_rkey(req);
420 			if (err) {
421 				rtrs_err(con->c.path, "Send INV WR key=%#x: %d\n",
422 					  req->mr->rkey, err);
423 			} else if (can_wait) {
424 				wait_for_completion(&req->inv_comp);
425 			} else {
426 				/*
427 				 * Something went wrong, so request will be
428 				 * completed from INV callback.
429 				 */
430 				WARN_ON_ONCE(1);
431 
432 				return;
433 			}
434 			if (!refcount_dec_and_test(&req->ref))
435 				return;
436 		}
437 		ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist,
438 				req->sg_cnt, req->dir);
439 	}
440 	if (!refcount_dec_and_test(&req->ref))
441 		return;
442 	if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
443 		atomic_dec(&clt_path->stats->inflight);
444 
445 	req->in_use = false;
446 	req->con = NULL;
447 
448 	if (errno) {
449 		rtrs_err_rl(con->c.path, "IO request failed: error=%d path=%s [%s:%u] notify=%d\n",
450 			    errno, kobject_name(&clt_path->kobj), clt_path->hca_name,
451 			    clt_path->hca_port, notify);
452 	}
453 
454 	if (notify)
455 		req->conf(req->priv, errno);
456 }
457 
rtrs_post_send_rdma(struct rtrs_clt_con * con,struct rtrs_clt_io_req * req,struct rtrs_rbuf * rbuf,u32 off,u32 imm,struct ib_send_wr * wr)458 static int rtrs_post_send_rdma(struct rtrs_clt_con *con,
459 				struct rtrs_clt_io_req *req,
460 				struct rtrs_rbuf *rbuf, u32 off,
461 				u32 imm, struct ib_send_wr *wr)
462 {
463 	struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
464 	enum ib_send_flags flags;
465 	struct ib_sge sge;
466 
467 	if (!req->sg_size) {
468 		rtrs_wrn(con->c.path,
469 			 "Doing RDMA Write failed, no data supplied\n");
470 		return -EINVAL;
471 	}
472 
473 	/* user data and user message in the first list element */
474 	sge.addr   = req->iu->dma_addr;
475 	sge.length = req->sg_size;
476 	sge.lkey   = clt_path->s.dev->ib_pd->local_dma_lkey;
477 
478 	/*
479 	 * From time to time we have to post signalled sends,
480 	 * or send queue will fill up and only QP reset can help.
481 	 */
482 	flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ?
483 			0 : IB_SEND_SIGNALED;
484 
485 	ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev,
486 				      req->iu->dma_addr,
487 				      req->sg_size, DMA_TO_DEVICE);
488 
489 	return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, &sge, 1,
490 					    rbuf->rkey, rbuf->addr + off,
491 					    imm, flags, wr, NULL);
492 }
493 
process_io_rsp(struct rtrs_clt_path * clt_path,u32 msg_id,s16 errno,bool w_inval)494 static void process_io_rsp(struct rtrs_clt_path *clt_path, u32 msg_id,
495 			   s16 errno, bool w_inval)
496 {
497 	struct rtrs_clt_io_req *req;
498 
499 	if (WARN_ON(msg_id >= clt_path->queue_depth))
500 		return;
501 
502 	req = &clt_path->reqs[msg_id];
503 	/* Drop need_inv if server responded with send with invalidation */
504 	req->need_inv &= !w_inval;
505 	complete_rdma_req(req, errno, true, false);
506 }
507 
rtrs_clt_recv_done(struct rtrs_clt_con * con,struct ib_wc * wc)508 static void rtrs_clt_recv_done(struct rtrs_clt_con *con, struct ib_wc *wc)
509 {
510 	struct rtrs_iu *iu;
511 	int err;
512 	struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
513 
514 	WARN_ON((clt_path->flags & RTRS_MSG_NEW_RKEY_F) == 0);
515 	iu = container_of(wc->wr_cqe, struct rtrs_iu,
516 			  cqe);
517 	err = rtrs_iu_post_recv(&con->c, iu);
518 	if (err) {
519 		rtrs_err(con->c.path, "post iu failed %d\n", err);
520 		rtrs_rdma_error_recovery(con);
521 	}
522 }
523 
rtrs_clt_rkey_rsp_done(struct rtrs_clt_con * con,struct ib_wc * wc)524 static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc)
525 {
526 	struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
527 	struct rtrs_msg_rkey_rsp *msg;
528 	u32 imm_type, imm_payload;
529 	bool w_inval = false;
530 	struct rtrs_iu *iu;
531 	u32 buf_id;
532 	int err;
533 
534 	WARN_ON((clt_path->flags & RTRS_MSG_NEW_RKEY_F) == 0);
535 
536 	iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
537 
538 	if (wc->byte_len < sizeof(*msg)) {
539 		rtrs_err(con->c.path, "rkey response is malformed: size %d\n",
540 			  wc->byte_len);
541 		goto out;
542 	}
543 	ib_dma_sync_single_for_cpu(clt_path->s.dev->ib_dev, iu->dma_addr,
544 				   iu->size, DMA_FROM_DEVICE);
545 	msg = iu->buf;
546 	if (le16_to_cpu(msg->type) != RTRS_MSG_RKEY_RSP) {
547 		rtrs_err(clt_path->clt,
548 			  "rkey response is malformed: type %d\n",
549 			  le16_to_cpu(msg->type));
550 		goto out;
551 	}
552 	buf_id = le16_to_cpu(msg->buf_id);
553 	if (WARN_ON(buf_id >= clt_path->queue_depth))
554 		goto out;
555 
556 	rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), &imm_type, &imm_payload);
557 	if (imm_type == RTRS_IO_RSP_IMM ||
558 	    imm_type == RTRS_IO_RSP_W_INV_IMM) {
559 		u32 msg_id;
560 
561 		w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM);
562 		rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err);
563 
564 		if (WARN_ON(buf_id != msg_id))
565 			goto out;
566 		clt_path->rbufs[buf_id].rkey = le32_to_cpu(msg->rkey);
567 		process_io_rsp(clt_path, msg_id, err, w_inval);
568 	}
569 	ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, iu->dma_addr,
570 				      iu->size, DMA_FROM_DEVICE);
571 	return rtrs_clt_recv_done(con, wc);
572 out:
573 	rtrs_rdma_error_recovery(con);
574 }
575 
576 static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc);
577 
578 static struct ib_cqe io_comp_cqe = {
579 	.done = rtrs_clt_rdma_done
580 };
581 
582 /*
583  * Post x2 empty WRs: first is for this RDMA with IMM,
584  * second is for RECV with INV, which happened earlier.
585  */
rtrs_post_recv_empty_x2(struct rtrs_con * con,struct ib_cqe * cqe)586 static int rtrs_post_recv_empty_x2(struct rtrs_con *con, struct ib_cqe *cqe)
587 {
588 	struct ib_recv_wr wr_arr[2], *wr;
589 	int i;
590 
591 	memset(wr_arr, 0, sizeof(wr_arr));
592 	for (i = 0; i < ARRAY_SIZE(wr_arr); i++) {
593 		wr = &wr_arr[i];
594 		wr->wr_cqe  = cqe;
595 		if (i)
596 			/* Chain backwards */
597 			wr->next = &wr_arr[i - 1];
598 	}
599 
600 	return ib_post_recv(con->qp, wr, NULL);
601 }
602 
rtrs_clt_rdma_done(struct ib_cq * cq,struct ib_wc * wc)603 static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
604 {
605 	struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
606 	struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
607 	u32 imm_type, imm_payload;
608 	bool w_inval = false;
609 	int err;
610 
611 	if (wc->status != IB_WC_SUCCESS) {
612 		if (wc->status != IB_WC_WR_FLUSH_ERR) {
613 			rtrs_err(clt_path->clt, "RDMA failed: %s\n",
614 				  ib_wc_status_msg(wc->status));
615 			rtrs_rdma_error_recovery(con);
616 		}
617 		return;
618 	}
619 	rtrs_clt_update_wc_stats(con);
620 
621 	switch (wc->opcode) {
622 	case IB_WC_RECV_RDMA_WITH_IMM:
623 		/*
624 		 * post_recv() RDMA write completions of IO reqs (read/write)
625 		 * and hb
626 		 */
627 		if (WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done))
628 			return;
629 		rtrs_from_imm(be32_to_cpu(wc->ex.imm_data),
630 			       &imm_type, &imm_payload);
631 		if (imm_type == RTRS_IO_RSP_IMM ||
632 		    imm_type == RTRS_IO_RSP_W_INV_IMM) {
633 			u32 msg_id;
634 
635 			w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM);
636 			rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err);
637 
638 			process_io_rsp(clt_path, msg_id, err, w_inval);
639 		} else if (imm_type == RTRS_HB_MSG_IMM) {
640 			WARN_ON(con->c.cid);
641 			rtrs_send_hb_ack(&clt_path->s);
642 			if (clt_path->flags & RTRS_MSG_NEW_RKEY_F)
643 				return  rtrs_clt_recv_done(con, wc);
644 		} else if (imm_type == RTRS_HB_ACK_IMM) {
645 			WARN_ON(con->c.cid);
646 			clt_path->s.hb_missed_cnt = 0;
647 			clt_path->s.hb_cur_latency =
648 				ktime_sub(ktime_get(), clt_path->s.hb_last_sent);
649 			if (clt_path->flags & RTRS_MSG_NEW_RKEY_F)
650 				return  rtrs_clt_recv_done(con, wc);
651 		} else {
652 			rtrs_wrn(con->c.path, "Unknown IMM type %u\n",
653 				  imm_type);
654 		}
655 		if (w_inval)
656 			/*
657 			 * Post x2 empty WRs: first is for this RDMA with IMM,
658 			 * second is for RECV with INV, which happened earlier.
659 			 */
660 			err = rtrs_post_recv_empty_x2(&con->c, &io_comp_cqe);
661 		else
662 			err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
663 		if (err) {
664 			rtrs_err(con->c.path, "rtrs_post_recv_empty(): %d\n",
665 				  err);
666 			rtrs_rdma_error_recovery(con);
667 		}
668 		break;
669 	case IB_WC_RECV:
670 		/*
671 		 * Key invalidations from server side
672 		 */
673 		WARN_ON(!(wc->wc_flags & IB_WC_WITH_INVALIDATE ||
674 			  wc->wc_flags & IB_WC_WITH_IMM));
675 		WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done);
676 		if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) {
677 			if (wc->wc_flags & IB_WC_WITH_INVALIDATE)
678 				return  rtrs_clt_recv_done(con, wc);
679 
680 			return  rtrs_clt_rkey_rsp_done(con, wc);
681 		}
682 		break;
683 	case IB_WC_RDMA_WRITE:
684 		/*
685 		 * post_send() RDMA write completions of IO reqs (read/write)
686 		 * and hb.
687 		 */
688 		break;
689 
690 	default:
691 		rtrs_wrn(clt_path->clt, "Unexpected WC type: %d\n", wc->opcode);
692 		return;
693 	}
694 }
695 
post_recv_io(struct rtrs_clt_con * con,size_t q_size)696 static int post_recv_io(struct rtrs_clt_con *con, size_t q_size)
697 {
698 	int err, i;
699 	struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
700 
701 	for (i = 0; i < q_size; i++) {
702 		if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) {
703 			struct rtrs_iu *iu = &con->rsp_ius[i];
704 
705 			err = rtrs_iu_post_recv(&con->c, iu);
706 		} else {
707 			err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
708 		}
709 		if (err)
710 			return err;
711 	}
712 
713 	return 0;
714 }
715 
post_recv_path(struct rtrs_clt_path * clt_path)716 static int post_recv_path(struct rtrs_clt_path *clt_path)
717 {
718 	size_t q_size = 0;
719 	int err, cid;
720 
721 	for (cid = 0; cid < clt_path->s.con_num; cid++) {
722 		if (cid == 0)
723 			q_size = SERVICE_CON_QUEUE_DEPTH;
724 		else
725 			q_size = clt_path->queue_depth;
726 
727 		/*
728 		 * x2 for RDMA read responses + FR key invalidations,
729 		 * RDMA writes do not require any FR registrations.
730 		 */
731 		q_size *= 2;
732 
733 		err = post_recv_io(to_clt_con(clt_path->s.con[cid]), q_size);
734 		if (err) {
735 			rtrs_err(clt_path->clt, "post_recv_io(), err: %d\n",
736 				 err);
737 			return err;
738 		}
739 	}
740 
741 	return 0;
742 }
743 
744 struct path_it {
745 	int i;
746 	struct list_head skip_list;
747 	struct rtrs_clt_sess *clt;
748 	struct rtrs_clt_path *(*next_path)(struct path_it *it);
749 };
750 
751 /*
752  * rtrs_clt_get_next_path_or_null - get clt path from the list or return NULL
753  * @head:	the head for the list.
754  * @clt_path:	The element to take the next clt_path from.
755  *
756  * Next clt path returned in round-robin fashion, i.e. head will be skipped,
757  * but if list is observed as empty, NULL will be returned.
758  *
759  * This function may safely run concurrently with the _rcu list-mutation
760  * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
761  */
762 static inline struct rtrs_clt_path *
rtrs_clt_get_next_path_or_null(struct list_head * head,struct rtrs_clt_path * clt_path)763 rtrs_clt_get_next_path_or_null(struct list_head *head, struct rtrs_clt_path *clt_path)
764 {
765 	return list_next_or_null_rcu(head, &clt_path->s.entry, typeof(*clt_path), s.entry) ?:
766 				     list_next_or_null_rcu(head,
767 							   READ_ONCE((&clt_path->s.entry)->next),
768 							   typeof(*clt_path), s.entry);
769 }
770 
771 /**
772  * get_next_path_rr() - Returns path in round-robin fashion.
773  * @it:	the path pointer
774  *
775  * Related to @MP_POLICY_RR
776  *
777  * Locks:
778  *    rcu_read_lock() must be hold.
779  */
get_next_path_rr(struct path_it * it)780 static struct rtrs_clt_path *get_next_path_rr(struct path_it *it)
781 {
782 	struct rtrs_clt_path __rcu **ppcpu_path;
783 	struct rtrs_clt_path *path;
784 	struct rtrs_clt_sess *clt;
785 
786 	clt = it->clt;
787 
788 	/*
789 	 * Here we use two RCU objects: @paths_list and @pcpu_path
790 	 * pointer.  See rtrs_clt_remove_path_from_arr() for details
791 	 * how that is handled.
792 	 */
793 
794 	ppcpu_path = this_cpu_ptr(clt->pcpu_path);
795 	path = rcu_dereference(*ppcpu_path);
796 	if (!path)
797 		path = list_first_or_null_rcu(&clt->paths_list,
798 					      typeof(*path), s.entry);
799 	else
800 		path = rtrs_clt_get_next_path_or_null(&clt->paths_list, path);
801 
802 	rcu_assign_pointer(*ppcpu_path, path);
803 
804 	return path;
805 }
806 
807 /**
808  * get_next_path_min_inflight() - Returns path with minimal inflight count.
809  * @it:	the path pointer
810  *
811  * Related to @MP_POLICY_MIN_INFLIGHT
812  *
813  * Locks:
814  *    rcu_read_lock() must be hold.
815  */
get_next_path_min_inflight(struct path_it * it)816 static struct rtrs_clt_path *get_next_path_min_inflight(struct path_it *it)
817 {
818 	struct rtrs_clt_path *min_path = NULL;
819 	struct rtrs_clt_sess *clt = it->clt;
820 	struct rtrs_clt_path *clt_path;
821 	int min_inflight = INT_MAX;
822 	int inflight;
823 
824 	list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) {
825 		if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
826 			continue;
827 
828 		if (!list_empty(raw_cpu_ptr(clt_path->mp_skip_entry)))
829 			continue;
830 
831 		inflight = atomic_read(&clt_path->stats->inflight);
832 
833 		if (inflight < min_inflight) {
834 			min_inflight = inflight;
835 			min_path = clt_path;
836 		}
837 	}
838 
839 	/*
840 	 * add the path to the skip list, so that next time we can get
841 	 * a different one
842 	 */
843 	if (min_path)
844 		list_add(raw_cpu_ptr(min_path->mp_skip_entry), &it->skip_list);
845 
846 	return min_path;
847 }
848 
849 /**
850  * get_next_path_min_latency() - Returns path with minimal latency.
851  * @it:	the path pointer
852  *
853  * Return: a path with the lowest latency or NULL if all paths are tried
854  *
855  * Locks:
856  *    rcu_read_lock() must be hold.
857  *
858  * Related to @MP_POLICY_MIN_LATENCY
859  *
860  * This DOES skip an already-tried path.
861  * There is a skip-list to skip a path if the path has tried but failed.
862  * It will try the minimum latency path and then the second minimum latency
863  * path and so on. Finally it will return NULL if all paths are tried.
864  * Therefore the caller MUST check the returned
865  * path is NULL and trigger the IO error.
866  */
get_next_path_min_latency(struct path_it * it)867 static struct rtrs_clt_path *get_next_path_min_latency(struct path_it *it)
868 {
869 	struct rtrs_clt_path *min_path = NULL;
870 	struct rtrs_clt_sess *clt = it->clt;
871 	struct rtrs_clt_path *clt_path;
872 	ktime_t min_latency = KTIME_MAX;
873 	ktime_t latency;
874 
875 	list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) {
876 		if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
877 			continue;
878 
879 		if (!list_empty(raw_cpu_ptr(clt_path->mp_skip_entry)))
880 			continue;
881 
882 		latency = clt_path->s.hb_cur_latency;
883 
884 		if (latency < min_latency) {
885 			min_latency = latency;
886 			min_path = clt_path;
887 		}
888 	}
889 
890 	/*
891 	 * add the path to the skip list, so that next time we can get
892 	 * a different one
893 	 */
894 	if (min_path)
895 		list_add(raw_cpu_ptr(min_path->mp_skip_entry), &it->skip_list);
896 
897 	return min_path;
898 }
899 
path_it_init(struct path_it * it,struct rtrs_clt_sess * clt)900 static inline void path_it_init(struct path_it *it, struct rtrs_clt_sess *clt)
901 {
902 	INIT_LIST_HEAD(&it->skip_list);
903 	it->clt = clt;
904 	it->i = 0;
905 
906 	if (clt->mp_policy == MP_POLICY_RR)
907 		it->next_path = get_next_path_rr;
908 	else if (clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
909 		it->next_path = get_next_path_min_inflight;
910 	else
911 		it->next_path = get_next_path_min_latency;
912 }
913 
path_it_deinit(struct path_it * it)914 static inline void path_it_deinit(struct path_it *it)
915 {
916 	struct list_head *skip, *tmp;
917 	/*
918 	 * The skip_list is used only for the MIN_INFLIGHT and MIN_LATENCY policies.
919 	 * We need to remove paths from it, so that next IO can insert
920 	 * paths (->mp_skip_entry) into a skip_list again.
921 	 */
922 	list_for_each_safe(skip, tmp, &it->skip_list)
923 		list_del_init(skip);
924 }
925 
926 /**
927  * rtrs_clt_init_req() - Initialize an rtrs_clt_io_req holding information
928  * about an inflight IO.
929  * The user buffer holding user control message (not data) is copied into
930  * the corresponding buffer of rtrs_iu (req->iu->buf), which later on will
931  * also hold the control message of rtrs.
932  * @req: an io request holding information about IO.
933  * @clt_path: client path
934  * @conf: conformation callback function to notify upper layer.
935  * @permit: permit for allocation of RDMA remote buffer
936  * @priv: private pointer
937  * @vec: kernel vector containing control message
938  * @usr_len: length of the user message
939  * @sg: scater list for IO data
940  * @sg_cnt: number of scater list entries
941  * @data_len: length of the IO data
942  * @dir: direction of the IO.
943  */
rtrs_clt_init_req(struct rtrs_clt_io_req * req,struct rtrs_clt_path * clt_path,void (* conf)(void * priv,int errno),struct rtrs_permit * permit,void * priv,const struct kvec * vec,size_t usr_len,struct scatterlist * sg,size_t sg_cnt,size_t data_len,int dir)944 static void rtrs_clt_init_req(struct rtrs_clt_io_req *req,
945 			      struct rtrs_clt_path *clt_path,
946 			      void (*conf)(void *priv, int errno),
947 			      struct rtrs_permit *permit, void *priv,
948 			      const struct kvec *vec, size_t usr_len,
949 			      struct scatterlist *sg, size_t sg_cnt,
950 			      size_t data_len, int dir)
951 {
952 	struct iov_iter iter;
953 	size_t len;
954 
955 	req->permit = permit;
956 	req->in_use = true;
957 	req->usr_len = usr_len;
958 	req->data_len = data_len;
959 	req->sglist = sg;
960 	req->sg_cnt = sg_cnt;
961 	req->priv = priv;
962 	req->dir = dir;
963 	req->con = rtrs_permit_to_clt_con(clt_path, permit);
964 	req->conf = conf;
965 	req->need_inv = false;
966 	req->need_inv_comp = false;
967 	req->inv_errno = 0;
968 	refcount_set(&req->ref, 1);
969 	req->mp_policy = clt_path->clt->mp_policy;
970 
971 	iov_iter_kvec(&iter, ITER_SOURCE, vec, 1, usr_len);
972 	len = _copy_from_iter(req->iu->buf, usr_len, &iter);
973 	WARN_ON(len != usr_len);
974 
975 	reinit_completion(&req->inv_comp);
976 }
977 
978 static struct rtrs_clt_io_req *
rtrs_clt_get_req(struct rtrs_clt_path * clt_path,void (* conf)(void * priv,int errno),struct rtrs_permit * permit,void * priv,const struct kvec * vec,size_t usr_len,struct scatterlist * sg,size_t sg_cnt,size_t data_len,int dir)979 rtrs_clt_get_req(struct rtrs_clt_path *clt_path,
980 		 void (*conf)(void *priv, int errno),
981 		 struct rtrs_permit *permit, void *priv,
982 		 const struct kvec *vec, size_t usr_len,
983 		 struct scatterlist *sg, size_t sg_cnt,
984 		 size_t data_len, int dir)
985 {
986 	struct rtrs_clt_io_req *req;
987 
988 	req = &clt_path->reqs[permit->mem_id];
989 	rtrs_clt_init_req(req, clt_path, conf, permit, priv, vec, usr_len,
990 			   sg, sg_cnt, data_len, dir);
991 	return req;
992 }
993 
994 static struct rtrs_clt_io_req *
rtrs_clt_get_copy_req(struct rtrs_clt_path * alive_path,struct rtrs_clt_io_req * fail_req)995 rtrs_clt_get_copy_req(struct rtrs_clt_path *alive_path,
996 		       struct rtrs_clt_io_req *fail_req)
997 {
998 	struct rtrs_clt_io_req *req;
999 	struct kvec vec = {
1000 		.iov_base = fail_req->iu->buf,
1001 		.iov_len  = fail_req->usr_len
1002 	};
1003 
1004 	req = &alive_path->reqs[fail_req->permit->mem_id];
1005 	rtrs_clt_init_req(req, alive_path, fail_req->conf, fail_req->permit,
1006 			   fail_req->priv, &vec, fail_req->usr_len,
1007 			   fail_req->sglist, fail_req->sg_cnt,
1008 			   fail_req->data_len, fail_req->dir);
1009 	return req;
1010 }
1011 
rtrs_post_rdma_write_sg(struct rtrs_clt_con * con,struct rtrs_clt_io_req * req,struct rtrs_rbuf * rbuf,bool fr_en,u32 count,u32 size,u32 imm,struct ib_send_wr * wr,struct ib_send_wr * tail)1012 static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
1013 				   struct rtrs_clt_io_req *req,
1014 				   struct rtrs_rbuf *rbuf, bool fr_en,
1015 				   u32 count, u32 size, u32 imm,
1016 				   struct ib_send_wr *wr,
1017 				   struct ib_send_wr *tail)
1018 {
1019 	struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1020 	struct ib_sge *sge = req->sge;
1021 	enum ib_send_flags flags;
1022 	struct scatterlist *sg;
1023 	size_t num_sge;
1024 	int i;
1025 	struct ib_send_wr *ptail = NULL;
1026 
1027 	if (fr_en) {
1028 		i = 0;
1029 		sge[i].addr   = req->mr->iova;
1030 		sge[i].length = req->mr->length;
1031 		sge[i].lkey   = req->mr->lkey;
1032 		i++;
1033 		num_sge = 2;
1034 		ptail = tail;
1035 	} else {
1036 		for_each_sg(req->sglist, sg, count, i) {
1037 			sge[i].addr   = sg_dma_address(sg);
1038 			sge[i].length = sg_dma_len(sg);
1039 			sge[i].lkey   = clt_path->s.dev->ib_pd->local_dma_lkey;
1040 		}
1041 		num_sge = 1 + count;
1042 	}
1043 	sge[i].addr   = req->iu->dma_addr;
1044 	sge[i].length = size;
1045 	sge[i].lkey   = clt_path->s.dev->ib_pd->local_dma_lkey;
1046 
1047 	/*
1048 	 * From time to time we have to post signalled sends,
1049 	 * or send queue will fill up and only QP reset can help.
1050 	 */
1051 	flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ?
1052 			0 : IB_SEND_SIGNALED;
1053 
1054 	ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev,
1055 				      req->iu->dma_addr,
1056 				      size, DMA_TO_DEVICE);
1057 
1058 	return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, sge, num_sge,
1059 					    rbuf->rkey, rbuf->addr, imm,
1060 					    flags, wr, ptail);
1061 }
1062 
rtrs_map_sg_fr(struct rtrs_clt_io_req * req,size_t count)1063 static int rtrs_map_sg_fr(struct rtrs_clt_io_req *req, size_t count)
1064 {
1065 	int nr;
1066 
1067 	/* Align the MR to a 4K page size to match the block virt boundary */
1068 	nr = ib_map_mr_sg(req->mr, req->sglist, count, NULL, SZ_4K);
1069 	if (nr != count)
1070 		return nr < 0 ? nr : -EINVAL;
1071 	ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
1072 
1073 	return nr;
1074 }
1075 
rtrs_clt_write_req(struct rtrs_clt_io_req * req)1076 static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
1077 {
1078 	struct rtrs_clt_con *con = req->con;
1079 	struct rtrs_path *s = con->c.path;
1080 	struct rtrs_clt_path *clt_path = to_clt_path(s);
1081 	struct rtrs_msg_rdma_write *msg;
1082 
1083 	struct rtrs_rbuf *rbuf;
1084 	int ret, count = 0;
1085 	u32 imm, buf_id;
1086 	struct ib_reg_wr rwr;
1087 	struct ib_send_wr inv_wr;
1088 	struct ib_send_wr *wr = NULL;
1089 	bool fr_en = false;
1090 
1091 	const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
1092 
1093 	if (tsize > clt_path->chunk_size) {
1094 		rtrs_wrn(s, "Write request failed, size too big %zu > %d\n",
1095 			  tsize, clt_path->chunk_size);
1096 		return -EMSGSIZE;
1097 	}
1098 	if (req->sg_cnt) {
1099 		count = ib_dma_map_sg(clt_path->s.dev->ib_dev, req->sglist,
1100 				      req->sg_cnt, req->dir);
1101 		if (!count) {
1102 			rtrs_wrn(s, "Write request failed, map failed\n");
1103 			return -EINVAL;
1104 		}
1105 	}
1106 	/* put rtrs msg after sg and user message */
1107 	msg = req->iu->buf + req->usr_len;
1108 	msg->type = cpu_to_le16(RTRS_MSG_WRITE);
1109 	msg->usr_len = cpu_to_le16(req->usr_len);
1110 
1111 	/* rtrs message on server side will be after user data and message */
1112 	imm = req->permit->mem_off + req->data_len + req->usr_len;
1113 	imm = rtrs_to_io_req_imm(imm);
1114 	buf_id = req->permit->mem_id;
1115 	req->sg_size = tsize;
1116 	rbuf = &clt_path->rbufs[buf_id];
1117 
1118 	if (count) {
1119 		ret = rtrs_map_sg_fr(req, count);
1120 		if (ret < 0) {
1121 			rtrs_err_rl(s,
1122 				    "Write request failed, failed to map fast reg. data, err: %d\n",
1123 				    ret);
1124 			ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist,
1125 					req->sg_cnt, req->dir);
1126 			return ret;
1127 		}
1128 		inv_wr = (struct ib_send_wr) {
1129 			.opcode		    = IB_WR_LOCAL_INV,
1130 			.wr_cqe		    = &req->inv_cqe,
1131 			.send_flags	    = IB_SEND_SIGNALED,
1132 			.ex.invalidate_rkey = req->mr->rkey,
1133 		};
1134 		req->inv_cqe.done = rtrs_clt_inv_rkey_done;
1135 		rwr = (struct ib_reg_wr) {
1136 			.wr.opcode = IB_WR_REG_MR,
1137 			.wr.wr_cqe = &fast_reg_cqe,
1138 			.mr = req->mr,
1139 			.key = req->mr->rkey,
1140 			.access = (IB_ACCESS_LOCAL_WRITE),
1141 		};
1142 		wr = &rwr.wr;
1143 		fr_en = true;
1144 		refcount_inc(&req->ref);
1145 	}
1146 	/*
1147 	 * Update stats now, after request is successfully sent it is not
1148 	 * safe anymore to touch it.
1149 	 */
1150 	rtrs_clt_update_all_stats(req, WRITE);
1151 
1152 	ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en, count,
1153 				      req->usr_len + sizeof(*msg),
1154 				      imm, wr, &inv_wr);
1155 	if (ret) {
1156 		rtrs_err_rl(s,
1157 			    "Write request failed: error=%d path=%s [%s:%u]\n",
1158 			    ret, kobject_name(&clt_path->kobj), clt_path->hca_name,
1159 			    clt_path->hca_port);
1160 		if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
1161 			atomic_dec(&clt_path->stats->inflight);
1162 		if (req->sg_cnt)
1163 			ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist,
1164 					req->sg_cnt, req->dir);
1165 	}
1166 
1167 	return ret;
1168 }
1169 
rtrs_clt_read_req(struct rtrs_clt_io_req * req)1170 static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
1171 {
1172 	struct rtrs_clt_con *con = req->con;
1173 	struct rtrs_path *s = con->c.path;
1174 	struct rtrs_clt_path *clt_path = to_clt_path(s);
1175 	struct rtrs_msg_rdma_read *msg;
1176 	struct rtrs_ib_dev *dev = clt_path->s.dev;
1177 
1178 	struct ib_reg_wr rwr;
1179 	struct ib_send_wr *wr = NULL;
1180 
1181 	int ret, count = 0;
1182 	u32 imm, buf_id;
1183 
1184 	const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
1185 
1186 	if (tsize > clt_path->chunk_size) {
1187 		rtrs_wrn(s,
1188 			  "Read request failed, message size is %zu, bigger than CHUNK_SIZE %d\n",
1189 			  tsize, clt_path->chunk_size);
1190 		return -EMSGSIZE;
1191 	}
1192 
1193 	if (req->sg_cnt) {
1194 		count = ib_dma_map_sg(dev->ib_dev, req->sglist, req->sg_cnt,
1195 				      req->dir);
1196 		if (!count) {
1197 			rtrs_wrn(s,
1198 				  "Read request failed, dma map failed\n");
1199 			return -EINVAL;
1200 		}
1201 	}
1202 	/* put our message into req->buf after user message*/
1203 	msg = req->iu->buf + req->usr_len;
1204 	msg->type = cpu_to_le16(RTRS_MSG_READ);
1205 	msg->usr_len = cpu_to_le16(req->usr_len);
1206 
1207 	if (count) {
1208 		ret = rtrs_map_sg_fr(req, count);
1209 		if (ret < 0) {
1210 			rtrs_err_rl(s,
1211 				     "Read request failed, failed to map  fast reg. data, err: %d\n",
1212 				     ret);
1213 			ib_dma_unmap_sg(dev->ib_dev, req->sglist, req->sg_cnt,
1214 					req->dir);
1215 			return ret;
1216 		}
1217 		rwr = (struct ib_reg_wr) {
1218 			.wr.opcode = IB_WR_REG_MR,
1219 			.wr.wr_cqe = &fast_reg_cqe,
1220 			.mr = req->mr,
1221 			.key = req->mr->rkey,
1222 			.access = (IB_ACCESS_LOCAL_WRITE |
1223 				   IB_ACCESS_REMOTE_WRITE),
1224 		};
1225 		wr = &rwr.wr;
1226 
1227 		msg->sg_cnt = cpu_to_le16(1);
1228 		msg->flags = cpu_to_le16(RTRS_MSG_NEED_INVAL_F);
1229 
1230 		msg->desc[0].addr = cpu_to_le64(req->mr->iova);
1231 		msg->desc[0].key = cpu_to_le32(req->mr->rkey);
1232 		msg->desc[0].len = cpu_to_le32(req->mr->length);
1233 
1234 		/* Further invalidation is required */
1235 		req->need_inv = !!RTRS_MSG_NEED_INVAL_F;
1236 
1237 	} else {
1238 		msg->sg_cnt = 0;
1239 		msg->flags = 0;
1240 	}
1241 	/*
1242 	 * rtrs message will be after the space reserved for disk data and
1243 	 * user message
1244 	 */
1245 	imm = req->permit->mem_off + req->data_len + req->usr_len;
1246 	imm = rtrs_to_io_req_imm(imm);
1247 	buf_id = req->permit->mem_id;
1248 
1249 	req->sg_size  = sizeof(*msg);
1250 	req->sg_size += le16_to_cpu(msg->sg_cnt) * sizeof(struct rtrs_sg_desc);
1251 	req->sg_size += req->usr_len;
1252 
1253 	/*
1254 	 * Update stats now, after request is successfully sent it is not
1255 	 * safe anymore to touch it.
1256 	 */
1257 	rtrs_clt_update_all_stats(req, READ);
1258 
1259 	ret = rtrs_post_send_rdma(req->con, req, &clt_path->rbufs[buf_id],
1260 				   req->data_len, imm, wr);
1261 	if (ret) {
1262 		rtrs_err_rl(s,
1263 			    "Read request failed: error=%d path=%s [%s:%u]\n",
1264 			    ret, kobject_name(&clt_path->kobj), clt_path->hca_name,
1265 			    clt_path->hca_port);
1266 		if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
1267 			atomic_dec(&clt_path->stats->inflight);
1268 		req->need_inv = false;
1269 		if (req->sg_cnt)
1270 			ib_dma_unmap_sg(dev->ib_dev, req->sglist,
1271 					req->sg_cnt, req->dir);
1272 	}
1273 
1274 	return ret;
1275 }
1276 
1277 /**
1278  * rtrs_clt_failover_req() - Try to find an active path for a failed request
1279  * @clt: clt context
1280  * @fail_req: a failed io request.
1281  */
rtrs_clt_failover_req(struct rtrs_clt_sess * clt,struct rtrs_clt_io_req * fail_req)1282 static int rtrs_clt_failover_req(struct rtrs_clt_sess *clt,
1283 				 struct rtrs_clt_io_req *fail_req)
1284 {
1285 	struct rtrs_clt_path *alive_path;
1286 	struct rtrs_clt_io_req *req;
1287 	int err = -ECONNABORTED;
1288 	struct path_it it;
1289 
1290 	rcu_read_lock();
1291 	for (path_it_init(&it, clt);
1292 	     (alive_path = it.next_path(&it)) && it.i < it.clt->paths_num;
1293 	     it.i++) {
1294 		if (READ_ONCE(alive_path->state) != RTRS_CLT_CONNECTED)
1295 			continue;
1296 		req = rtrs_clt_get_copy_req(alive_path, fail_req);
1297 		if (req->dir == DMA_TO_DEVICE)
1298 			err = rtrs_clt_write_req(req);
1299 		else
1300 			err = rtrs_clt_read_req(req);
1301 		if (err) {
1302 			req->in_use = false;
1303 			continue;
1304 		}
1305 		/* Success path */
1306 		rtrs_clt_inc_failover_cnt(alive_path->stats);
1307 		break;
1308 	}
1309 	path_it_deinit(&it);
1310 	rcu_read_unlock();
1311 
1312 	return err;
1313 }
1314 
fail_all_outstanding_reqs(struct rtrs_clt_path * clt_path)1315 static void fail_all_outstanding_reqs(struct rtrs_clt_path *clt_path)
1316 {
1317 	struct rtrs_clt_sess *clt = clt_path->clt;
1318 	struct rtrs_clt_io_req *req;
1319 	int i, err;
1320 
1321 	if (!clt_path->reqs)
1322 		return;
1323 	for (i = 0; i < clt_path->queue_depth; ++i) {
1324 		req = &clt_path->reqs[i];
1325 		if (!req->in_use)
1326 			continue;
1327 
1328 		/*
1329 		 * Safely (without notification) complete failed request.
1330 		 * After completion this request is still useble and can
1331 		 * be failovered to another path.
1332 		 */
1333 		complete_rdma_req(req, -ECONNABORTED, false, true);
1334 
1335 		err = rtrs_clt_failover_req(clt, req);
1336 		if (err)
1337 			/* Failover failed, notify anyway */
1338 			req->conf(req->priv, err);
1339 	}
1340 }
1341 
free_path_reqs(struct rtrs_clt_path * clt_path)1342 static void free_path_reqs(struct rtrs_clt_path *clt_path)
1343 {
1344 	struct rtrs_clt_io_req *req;
1345 	int i;
1346 
1347 	if (!clt_path->reqs)
1348 		return;
1349 	for (i = 0; i < clt_path->queue_depth; ++i) {
1350 		req = &clt_path->reqs[i];
1351 		if (req->mr)
1352 			ib_dereg_mr(req->mr);
1353 		kfree(req->sge);
1354 		rtrs_iu_free(req->iu, clt_path->s.dev->ib_dev, 1);
1355 	}
1356 	kfree(clt_path->reqs);
1357 	clt_path->reqs = NULL;
1358 }
1359 
alloc_path_reqs(struct rtrs_clt_path * clt_path)1360 static int alloc_path_reqs(struct rtrs_clt_path *clt_path)
1361 {
1362 	struct rtrs_clt_io_req *req;
1363 	int i, err = -ENOMEM;
1364 
1365 	clt_path->reqs = kcalloc(clt_path->queue_depth,
1366 				 sizeof(*clt_path->reqs),
1367 				 GFP_KERNEL);
1368 	if (!clt_path->reqs)
1369 		return -ENOMEM;
1370 
1371 	for (i = 0; i < clt_path->queue_depth; ++i) {
1372 		req = &clt_path->reqs[i];
1373 		req->iu = rtrs_iu_alloc(1, clt_path->max_hdr_size, GFP_KERNEL,
1374 					 clt_path->s.dev->ib_dev,
1375 					 DMA_TO_DEVICE,
1376 					 rtrs_clt_rdma_done);
1377 		if (!req->iu)
1378 			goto out;
1379 
1380 		req->sge = kcalloc(2, sizeof(*req->sge), GFP_KERNEL);
1381 		if (!req->sge)
1382 			goto out;
1383 
1384 		req->mr = ib_alloc_mr(clt_path->s.dev->ib_pd,
1385 				      IB_MR_TYPE_MEM_REG,
1386 				      clt_path->max_pages_per_mr);
1387 		if (IS_ERR(req->mr)) {
1388 			err = PTR_ERR(req->mr);
1389 			req->mr = NULL;
1390 			pr_err("Failed to alloc clt_path->max_pages_per_mr %d\n",
1391 			       clt_path->max_pages_per_mr);
1392 			goto out;
1393 		}
1394 
1395 		init_completion(&req->inv_comp);
1396 	}
1397 
1398 	return 0;
1399 
1400 out:
1401 	free_path_reqs(clt_path);
1402 
1403 	return err;
1404 }
1405 
alloc_permits(struct rtrs_clt_sess * clt)1406 static int alloc_permits(struct rtrs_clt_sess *clt)
1407 {
1408 	unsigned int chunk_bits;
1409 	int err, i;
1410 
1411 	clt->permits_map = bitmap_zalloc(clt->queue_depth, GFP_KERNEL);
1412 	if (!clt->permits_map) {
1413 		err = -ENOMEM;
1414 		goto out_err;
1415 	}
1416 	clt->permits = kcalloc(clt->queue_depth, permit_size(clt), GFP_KERNEL);
1417 	if (!clt->permits) {
1418 		err = -ENOMEM;
1419 		goto err_map;
1420 	}
1421 	chunk_bits = ilog2(clt->queue_depth - 1) + 1;
1422 	for (i = 0; i < clt->queue_depth; i++) {
1423 		struct rtrs_permit *permit;
1424 
1425 		permit = get_permit(clt, i);
1426 		permit->mem_id = i;
1427 		permit->mem_off = i << (MAX_IMM_PAYL_BITS - chunk_bits);
1428 	}
1429 
1430 	return 0;
1431 
1432 err_map:
1433 	bitmap_free(clt->permits_map);
1434 	clt->permits_map = NULL;
1435 out_err:
1436 	return err;
1437 }
1438 
free_permits(struct rtrs_clt_sess * clt)1439 static void free_permits(struct rtrs_clt_sess *clt)
1440 {
1441 	if (clt->permits_map)
1442 		wait_event(clt->permits_wait,
1443 			   bitmap_empty(clt->permits_map, clt->queue_depth));
1444 
1445 	bitmap_free(clt->permits_map);
1446 	clt->permits_map = NULL;
1447 	kfree(clt->permits);
1448 	clt->permits = NULL;
1449 }
1450 
query_fast_reg_mode(struct rtrs_clt_path * clt_path)1451 static void query_fast_reg_mode(struct rtrs_clt_path *clt_path)
1452 {
1453 	struct ib_device *ib_dev;
1454 	u64 max_pages_per_mr;
1455 	int mr_page_shift;
1456 
1457 	ib_dev = clt_path->s.dev->ib_dev;
1458 
1459 	/*
1460 	 * Use the smallest page size supported by the HCA, down to a
1461 	 * minimum of 4096 bytes. We're unlikely to build large sglists
1462 	 * out of smaller entries.
1463 	 */
1464 	mr_page_shift      = max(12, ffs(ib_dev->attrs.page_size_cap) - 1);
1465 	max_pages_per_mr   = ib_dev->attrs.max_mr_size;
1466 	do_div(max_pages_per_mr, (1ull << mr_page_shift));
1467 	clt_path->max_pages_per_mr =
1468 		min3(clt_path->max_pages_per_mr, (u32)max_pages_per_mr,
1469 		     ib_dev->attrs.max_fast_reg_page_list_len);
1470 	clt_path->clt->max_segments =
1471 		min(clt_path->max_pages_per_mr, clt_path->clt->max_segments);
1472 }
1473 
rtrs_clt_change_state_get_old(struct rtrs_clt_path * clt_path,enum rtrs_clt_state new_state,enum rtrs_clt_state * old_state)1474 static bool rtrs_clt_change_state_get_old(struct rtrs_clt_path *clt_path,
1475 					   enum rtrs_clt_state new_state,
1476 					   enum rtrs_clt_state *old_state)
1477 {
1478 	bool changed;
1479 
1480 	spin_lock_irq(&clt_path->state_wq.lock);
1481 	if (old_state)
1482 		*old_state = clt_path->state;
1483 	changed = rtrs_clt_change_state(clt_path, new_state);
1484 	spin_unlock_irq(&clt_path->state_wq.lock);
1485 
1486 	return changed;
1487 }
1488 
rtrs_clt_hb_err_handler(struct rtrs_con * c)1489 static void rtrs_clt_hb_err_handler(struct rtrs_con *c)
1490 {
1491 	struct rtrs_clt_con *con = container_of(c, typeof(*con), c);
1492 
1493 	rtrs_rdma_error_recovery(con);
1494 }
1495 
rtrs_clt_init_hb(struct rtrs_clt_path * clt_path)1496 static void rtrs_clt_init_hb(struct rtrs_clt_path *clt_path)
1497 {
1498 	rtrs_init_hb(&clt_path->s, &io_comp_cqe,
1499 		      RTRS_HB_INTERVAL_MS,
1500 		      RTRS_HB_MISSED_MAX,
1501 		      rtrs_clt_hb_err_handler,
1502 		      rtrs_wq);
1503 }
1504 
1505 static void rtrs_clt_reconnect_work(struct work_struct *work);
1506 static void rtrs_clt_close_work(struct work_struct *work);
1507 
rtrs_clt_err_recovery_work(struct work_struct * work)1508 static void rtrs_clt_err_recovery_work(struct work_struct *work)
1509 {
1510 	struct rtrs_clt_path *clt_path;
1511 	struct rtrs_clt_sess *clt;
1512 	int delay_ms;
1513 
1514 	clt_path = container_of(work, struct rtrs_clt_path, err_recovery_work);
1515 	clt = clt_path->clt;
1516 	delay_ms = clt->reconnect_delay_sec * 1000;
1517 	rtrs_clt_stop_and_destroy_conns(clt_path);
1518 	queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork,
1519 			   msecs_to_jiffies(delay_ms +
1520 					    get_random_u32_below(RTRS_RECONNECT_SEED)));
1521 }
1522 
alloc_path(struct rtrs_clt_sess * clt,const struct rtrs_addr * path,size_t con_num,u32 nr_poll_queues)1523 static struct rtrs_clt_path *alloc_path(struct rtrs_clt_sess *clt,
1524 					const struct rtrs_addr *path,
1525 					size_t con_num, u32 nr_poll_queues)
1526 {
1527 	struct rtrs_clt_path *clt_path;
1528 	int err = -ENOMEM;
1529 	int cpu;
1530 	size_t total_con;
1531 
1532 	clt_path = kzalloc(sizeof(*clt_path), GFP_KERNEL);
1533 	if (!clt_path)
1534 		goto err;
1535 
1536 	/*
1537 	 * irqmode and poll
1538 	 * +1: Extra connection for user messages
1539 	 */
1540 	total_con = con_num + nr_poll_queues + 1;
1541 	clt_path->s.con = kcalloc(total_con, sizeof(*clt_path->s.con),
1542 				  GFP_KERNEL);
1543 	if (!clt_path->s.con)
1544 		goto err_free_path;
1545 
1546 	clt_path->s.con_num = total_con;
1547 	clt_path->s.irq_con_num = con_num + 1;
1548 
1549 	clt_path->stats = kzalloc(sizeof(*clt_path->stats), GFP_KERNEL);
1550 	if (!clt_path->stats)
1551 		goto err_free_con;
1552 
1553 	mutex_init(&clt_path->init_mutex);
1554 	uuid_gen(&clt_path->s.uuid);
1555 	memcpy(&clt_path->s.dst_addr, path->dst,
1556 	       rdma_addr_size((struct sockaddr *)path->dst));
1557 
1558 	/*
1559 	 * rdma_resolve_addr() passes src_addr to cma_bind_addr, which
1560 	 * checks the sa_family to be non-zero. If user passed src_addr=NULL
1561 	 * the sess->src_addr will contain only zeros, which is then fine.
1562 	 */
1563 	if (path->src)
1564 		memcpy(&clt_path->s.src_addr, path->src,
1565 		       rdma_addr_size((struct sockaddr *)path->src));
1566 	strscpy(clt_path->s.sessname, clt->sessname,
1567 		sizeof(clt_path->s.sessname));
1568 	clt_path->clt = clt;
1569 	clt_path->max_pages_per_mr = RTRS_MAX_SEGMENTS;
1570 	init_waitqueue_head(&clt_path->state_wq);
1571 	clt_path->state = RTRS_CLT_CONNECTING;
1572 	atomic_set(&clt_path->connected_cnt, 0);
1573 	INIT_WORK(&clt_path->close_work, rtrs_clt_close_work);
1574 	INIT_WORK(&clt_path->err_recovery_work, rtrs_clt_err_recovery_work);
1575 	INIT_DELAYED_WORK(&clt_path->reconnect_dwork, rtrs_clt_reconnect_work);
1576 	rtrs_clt_init_hb(clt_path);
1577 
1578 	clt_path->mp_skip_entry = alloc_percpu(typeof(*clt_path->mp_skip_entry));
1579 	if (!clt_path->mp_skip_entry)
1580 		goto err_free_stats;
1581 
1582 	for_each_possible_cpu(cpu)
1583 		INIT_LIST_HEAD(per_cpu_ptr(clt_path->mp_skip_entry, cpu));
1584 
1585 	err = rtrs_clt_init_stats(clt_path->stats);
1586 	if (err)
1587 		goto err_free_percpu;
1588 
1589 	return clt_path;
1590 
1591 err_free_percpu:
1592 	free_percpu(clt_path->mp_skip_entry);
1593 err_free_stats:
1594 	kfree(clt_path->stats);
1595 err_free_con:
1596 	kfree(clt_path->s.con);
1597 err_free_path:
1598 	kfree(clt_path);
1599 err:
1600 	return ERR_PTR(err);
1601 }
1602 
free_path(struct rtrs_clt_path * clt_path)1603 void free_path(struct rtrs_clt_path *clt_path)
1604 {
1605 	free_percpu(clt_path->mp_skip_entry);
1606 	mutex_destroy(&clt_path->init_mutex);
1607 	kfree(clt_path->s.con);
1608 	kfree(clt_path->rbufs);
1609 	kfree(clt_path);
1610 }
1611 
create_con(struct rtrs_clt_path * clt_path,unsigned int cid)1612 static int create_con(struct rtrs_clt_path *clt_path, unsigned int cid)
1613 {
1614 	struct rtrs_clt_con *con;
1615 
1616 	con = kzalloc(sizeof(*con), GFP_KERNEL);
1617 	if (!con)
1618 		return -ENOMEM;
1619 
1620 	/* Map first two connections to the first CPU */
1621 	con->cpu  = (cid ? cid - 1 : 0) % nr_cpu_ids;
1622 	con->c.cid = cid;
1623 	con->c.path = &clt_path->s;
1624 	/* Align with srv, init as 1 */
1625 	atomic_set(&con->c.wr_cnt, 1);
1626 	mutex_init(&con->con_mutex);
1627 
1628 	clt_path->s.con[cid] = &con->c;
1629 
1630 	return 0;
1631 }
1632 
destroy_con(struct rtrs_clt_con * con)1633 static void destroy_con(struct rtrs_clt_con *con)
1634 {
1635 	struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1636 
1637 	clt_path->s.con[con->c.cid] = NULL;
1638 	mutex_destroy(&con->con_mutex);
1639 	kfree(con);
1640 }
1641 
create_con_cq_qp(struct rtrs_clt_con * con)1642 static int create_con_cq_qp(struct rtrs_clt_con *con)
1643 {
1644 	struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1645 	u32 max_send_wr, max_recv_wr, cq_num, max_send_sge, wr_limit;
1646 	int err, cq_vector;
1647 	struct rtrs_msg_rkey_rsp *rsp;
1648 
1649 	lockdep_assert_held(&con->con_mutex);
1650 	if (con->c.cid == 0) {
1651 		max_send_sge = 1;
1652 		/* We must be the first here */
1653 		if (WARN_ON(clt_path->s.dev))
1654 			return -EINVAL;
1655 
1656 		/*
1657 		 * The whole session uses device from user connection.
1658 		 * Be careful not to close user connection before ib dev
1659 		 * is gracefully put.
1660 		 */
1661 		clt_path->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device,
1662 						       &dev_pd);
1663 		if (!clt_path->s.dev) {
1664 			rtrs_wrn(clt_path->clt,
1665 				  "rtrs_ib_dev_find_get_or_add(): no memory\n");
1666 			return -ENOMEM;
1667 		}
1668 		clt_path->s.dev_ref = 1;
1669 		query_fast_reg_mode(clt_path);
1670 		wr_limit = clt_path->s.dev->ib_dev->attrs.max_qp_wr;
1671 		/*
1672 		 * Two (request + registration) completion for send
1673 		 * Two for recv if always_invalidate is set on server
1674 		 * or one for recv.
1675 		 * + 2 for drain and heartbeat
1676 		 * in case qp gets into error state.
1677 		 */
1678 		max_send_wr =
1679 			min_t(int, wr_limit, SERVICE_CON_QUEUE_DEPTH * 2 + 2);
1680 		max_recv_wr = max_send_wr;
1681 	} else {
1682 		/*
1683 		 * Here we assume that session members are correctly set.
1684 		 * This is always true if user connection (cid == 0) is
1685 		 * established first.
1686 		 */
1687 		if (WARN_ON(!clt_path->s.dev))
1688 			return -EINVAL;
1689 		if (WARN_ON(!clt_path->queue_depth))
1690 			return -EINVAL;
1691 
1692 		wr_limit = clt_path->s.dev->ib_dev->attrs.max_qp_wr;
1693 		/* Shared between connections */
1694 		clt_path->s.dev_ref++;
1695 		max_send_wr = min_t(int, wr_limit,
1696 			      /* QD * (REQ + RSP + FR REGS or INVS) + drain */
1697 			      clt_path->queue_depth * 4 + 1);
1698 		max_recv_wr = min_t(int, wr_limit,
1699 			      clt_path->queue_depth * 3 + 1);
1700 		max_send_sge = 2;
1701 	}
1702 	atomic_set(&con->c.sq_wr_avail, max_send_wr);
1703 	cq_num = max_send_wr + max_recv_wr;
1704 	/* alloc iu to recv new rkey reply when server reports flags set */
1705 	if (clt_path->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
1706 		con->rsp_ius = rtrs_iu_alloc(cq_num, sizeof(*rsp),
1707 					      GFP_KERNEL,
1708 					      clt_path->s.dev->ib_dev,
1709 					      DMA_FROM_DEVICE,
1710 					      rtrs_clt_rdma_done);
1711 		if (!con->rsp_ius)
1712 			return -ENOMEM;
1713 		con->queue_num = cq_num;
1714 	}
1715 	cq_vector = con->cpu % clt_path->s.dev->ib_dev->num_comp_vectors;
1716 	if (con->c.cid >= clt_path->s.irq_con_num)
1717 		err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge,
1718 					cq_vector, cq_num, max_send_wr,
1719 					max_recv_wr, IB_POLL_DIRECT);
1720 	else
1721 		err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge,
1722 					cq_vector, cq_num, max_send_wr,
1723 					max_recv_wr, IB_POLL_SOFTIRQ);
1724 	/*
1725 	 * In case of error we do not bother to clean previous allocations,
1726 	 * since destroy_con_cq_qp() must be called.
1727 	 */
1728 	return err;
1729 }
1730 
destroy_con_cq_qp(struct rtrs_clt_con * con)1731 static void destroy_con_cq_qp(struct rtrs_clt_con *con)
1732 {
1733 	struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1734 
1735 	/*
1736 	 * Be careful here: destroy_con_cq_qp() can be called even
1737 	 * create_con_cq_qp() failed, see comments there.
1738 	 */
1739 	lockdep_assert_held(&con->con_mutex);
1740 	rtrs_cq_qp_destroy(&con->c);
1741 	if (con->rsp_ius) {
1742 		rtrs_iu_free(con->rsp_ius, clt_path->s.dev->ib_dev,
1743 			     con->queue_num);
1744 		con->rsp_ius = NULL;
1745 		con->queue_num = 0;
1746 	}
1747 	if (clt_path->s.dev_ref && !--clt_path->s.dev_ref) {
1748 		rtrs_ib_dev_put(clt_path->s.dev);
1749 		clt_path->s.dev = NULL;
1750 	}
1751 }
1752 
stop_cm(struct rtrs_clt_con * con)1753 static void stop_cm(struct rtrs_clt_con *con)
1754 {
1755 	rdma_disconnect(con->c.cm_id);
1756 	if (con->c.qp)
1757 		ib_drain_qp(con->c.qp);
1758 }
1759 
destroy_cm(struct rtrs_clt_con * con)1760 static void destroy_cm(struct rtrs_clt_con *con)
1761 {
1762 	rdma_destroy_id(con->c.cm_id);
1763 	con->c.cm_id = NULL;
1764 }
1765 
rtrs_rdma_addr_resolved(struct rtrs_clt_con * con)1766 static int rtrs_rdma_addr_resolved(struct rtrs_clt_con *con)
1767 {
1768 	struct rtrs_path *s = con->c.path;
1769 	int err;
1770 
1771 	mutex_lock(&con->con_mutex);
1772 	err = create_con_cq_qp(con);
1773 	mutex_unlock(&con->con_mutex);
1774 	if (err) {
1775 		rtrs_err(s, "create_con_cq_qp(), err: %d\n", err);
1776 		return err;
1777 	}
1778 	err = rdma_resolve_route(con->c.cm_id, RTRS_CONNECT_TIMEOUT_MS);
1779 	if (err)
1780 		rtrs_err(s, "Resolving route failed, err: %d\n", err);
1781 
1782 	return err;
1783 }
1784 
rtrs_rdma_route_resolved(struct rtrs_clt_con * con)1785 static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
1786 {
1787 	struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1788 	struct rtrs_clt_sess *clt = clt_path->clt;
1789 	struct rtrs_msg_conn_req msg;
1790 	struct rdma_conn_param param;
1791 
1792 	int err;
1793 
1794 	param = (struct rdma_conn_param) {
1795 		.retry_count = 7,
1796 		.rnr_retry_count = 7,
1797 		.private_data = &msg,
1798 		.private_data_len = sizeof(msg),
1799 	};
1800 
1801 	msg = (struct rtrs_msg_conn_req) {
1802 		.magic = cpu_to_le16(RTRS_MAGIC),
1803 		.version = cpu_to_le16(RTRS_PROTO_VER),
1804 		.cid = cpu_to_le16(con->c.cid),
1805 		.cid_num = cpu_to_le16(clt_path->s.con_num),
1806 		.recon_cnt = cpu_to_le16(clt_path->s.recon_cnt),
1807 	};
1808 	msg.first_conn = clt_path->for_new_clt ? FIRST_CONN : 0;
1809 	uuid_copy(&msg.sess_uuid, &clt_path->s.uuid);
1810 	uuid_copy(&msg.paths_uuid, &clt->paths_uuid);
1811 
1812 	err = rdma_connect_locked(con->c.cm_id, &param);
1813 	if (err)
1814 		rtrs_err(clt, "rdma_connect_locked(): %d\n", err);
1815 
1816 	return err;
1817 }
1818 
rtrs_rdma_conn_established(struct rtrs_clt_con * con,struct rdma_cm_event * ev)1819 static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
1820 				       struct rdma_cm_event *ev)
1821 {
1822 	struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1823 	struct rtrs_clt_sess *clt = clt_path->clt;
1824 	const struct rtrs_msg_conn_rsp *msg;
1825 	u16 version, queue_depth;
1826 	int errno;
1827 	u8 len;
1828 
1829 	msg = ev->param.conn.private_data;
1830 	len = ev->param.conn.private_data_len;
1831 	if (len < sizeof(*msg)) {
1832 		rtrs_err(clt, "Invalid RTRS connection response\n");
1833 		return -ECONNRESET;
1834 	}
1835 	if (le16_to_cpu(msg->magic) != RTRS_MAGIC) {
1836 		rtrs_err(clt, "Invalid RTRS magic\n");
1837 		return -ECONNRESET;
1838 	}
1839 	version = le16_to_cpu(msg->version);
1840 	if (version >> 8 != RTRS_PROTO_VER_MAJOR) {
1841 		rtrs_err(clt, "Unsupported major RTRS version: %d, expected %d\n",
1842 			  version >> 8, RTRS_PROTO_VER_MAJOR);
1843 		return -ECONNRESET;
1844 	}
1845 	errno = le16_to_cpu(msg->errno);
1846 	if (errno) {
1847 		rtrs_err(clt, "Invalid RTRS message: errno %d\n",
1848 			  errno);
1849 		return -ECONNRESET;
1850 	}
1851 	if (con->c.cid == 0) {
1852 		queue_depth = le16_to_cpu(msg->queue_depth);
1853 
1854 		if (clt_path->queue_depth > 0 && queue_depth != clt_path->queue_depth) {
1855 			rtrs_err(clt, "Error: queue depth changed\n");
1856 
1857 			/*
1858 			 * Stop any more reconnection attempts
1859 			 */
1860 			clt_path->reconnect_attempts = -1;
1861 			rtrs_err(clt,
1862 				"Disabling auto-reconnect. Trigger a manual reconnect after issue is resolved\n");
1863 			return -ECONNRESET;
1864 		}
1865 
1866 		if (!clt_path->rbufs) {
1867 			clt_path->rbufs = kcalloc(queue_depth,
1868 						  sizeof(*clt_path->rbufs),
1869 						  GFP_KERNEL);
1870 			if (!clt_path->rbufs)
1871 				return -ENOMEM;
1872 		}
1873 		clt_path->queue_depth = queue_depth;
1874 		clt_path->s.signal_interval = min_not_zero(queue_depth,
1875 						(unsigned short) SERVICE_CON_QUEUE_DEPTH);
1876 		clt_path->max_hdr_size = le32_to_cpu(msg->max_hdr_size);
1877 		clt_path->max_io_size = le32_to_cpu(msg->max_io_size);
1878 		clt_path->flags = le32_to_cpu(msg->flags);
1879 		clt_path->chunk_size = clt_path->max_io_size + clt_path->max_hdr_size;
1880 
1881 		/*
1882 		 * Global IO size is always a minimum.
1883 		 * If while a reconnection server sends us a value a bit
1884 		 * higher - client does not care and uses cached minimum.
1885 		 *
1886 		 * Since we can have several sessions (paths) restablishing
1887 		 * connections in parallel, use lock.
1888 		 */
1889 		mutex_lock(&clt->paths_mutex);
1890 		clt->queue_depth = clt_path->queue_depth;
1891 		clt->max_io_size = min_not_zero(clt_path->max_io_size,
1892 						clt->max_io_size);
1893 		mutex_unlock(&clt->paths_mutex);
1894 
1895 		/*
1896 		 * Cache the hca_port and hca_name for sysfs
1897 		 */
1898 		clt_path->hca_port = con->c.cm_id->port_num;
1899 		scnprintf(clt_path->hca_name, sizeof(clt_path->hca_name),
1900 			  clt_path->s.dev->ib_dev->name);
1901 		clt_path->s.src_addr = con->c.cm_id->route.addr.src_addr;
1902 		/* set for_new_clt, to allow future reconnect on any path */
1903 		clt_path->for_new_clt = 1;
1904 	}
1905 
1906 	return 0;
1907 }
1908 
flag_success_on_conn(struct rtrs_clt_con * con)1909 static inline void flag_success_on_conn(struct rtrs_clt_con *con)
1910 {
1911 	struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
1912 
1913 	atomic_inc(&clt_path->connected_cnt);
1914 	con->cm_err = 1;
1915 }
1916 
rtrs_rdma_conn_rejected(struct rtrs_clt_con * con,struct rdma_cm_event * ev)1917 static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con,
1918 				    struct rdma_cm_event *ev)
1919 {
1920 	struct rtrs_path *s = con->c.path;
1921 	const struct rtrs_msg_conn_rsp *msg;
1922 	const char *rej_msg;
1923 	int status, errno;
1924 	u8 data_len;
1925 
1926 	status = ev->status;
1927 	rej_msg = rdma_reject_msg(con->c.cm_id, status);
1928 	msg = rdma_consumer_reject_data(con->c.cm_id, ev, &data_len);
1929 
1930 	if (msg && data_len >= sizeof(*msg)) {
1931 		errno = (int16_t)le16_to_cpu(msg->errno);
1932 		if (errno == -EBUSY)
1933 			rtrs_err(s,
1934 				  "Previous session is still exists on the server, please reconnect later\n");
1935 		else
1936 			rtrs_err(s,
1937 				  "Connect rejected: status %d (%s), rtrs errno %d\n",
1938 				  status, rej_msg, errno);
1939 	} else {
1940 		rtrs_err(s,
1941 			  "Connect rejected but with malformed message: status %d (%s)\n",
1942 			  status, rej_msg);
1943 	}
1944 
1945 	return -ECONNRESET;
1946 }
1947 
rtrs_clt_close_conns(struct rtrs_clt_path * clt_path,bool wait)1948 void rtrs_clt_close_conns(struct rtrs_clt_path *clt_path, bool wait)
1949 {
1950 	trace_rtrs_clt_close_conns(clt_path);
1951 
1952 	if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSING, NULL))
1953 		queue_work(rtrs_wq, &clt_path->close_work);
1954 	if (wait)
1955 		flush_work(&clt_path->close_work);
1956 }
1957 
flag_error_on_conn(struct rtrs_clt_con * con,int cm_err)1958 static inline void flag_error_on_conn(struct rtrs_clt_con *con, int cm_err)
1959 {
1960 	if (con->cm_err == 1) {
1961 		struct rtrs_clt_path *clt_path;
1962 
1963 		clt_path = to_clt_path(con->c.path);
1964 		if (atomic_dec_and_test(&clt_path->connected_cnt))
1965 
1966 			wake_up(&clt_path->state_wq);
1967 	}
1968 	con->cm_err = cm_err;
1969 }
1970 
rtrs_clt_rdma_cm_handler(struct rdma_cm_id * cm_id,struct rdma_cm_event * ev)1971 static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
1972 				     struct rdma_cm_event *ev)
1973 {
1974 	struct rtrs_clt_con *con = cm_id->context;
1975 	struct rtrs_path *s = con->c.path;
1976 	struct rtrs_clt_path *clt_path = to_clt_path(s);
1977 	int cm_err = 0;
1978 
1979 	switch (ev->event) {
1980 	case RDMA_CM_EVENT_ADDR_RESOLVED:
1981 		cm_err = rtrs_rdma_addr_resolved(con);
1982 		break;
1983 	case RDMA_CM_EVENT_ROUTE_RESOLVED:
1984 		cm_err = rtrs_rdma_route_resolved(con);
1985 		break;
1986 	case RDMA_CM_EVENT_ESTABLISHED:
1987 		cm_err = rtrs_rdma_conn_established(con, ev);
1988 		if (!cm_err) {
1989 			/*
1990 			 * Report success and wake up. Here we abuse state_wq,
1991 			 * i.e. wake up without state change, but we set cm_err.
1992 			 */
1993 			flag_success_on_conn(con);
1994 			wake_up(&clt_path->state_wq);
1995 			return 0;
1996 		}
1997 		break;
1998 	case RDMA_CM_EVENT_REJECTED:
1999 		cm_err = rtrs_rdma_conn_rejected(con, ev);
2000 		break;
2001 	case RDMA_CM_EVENT_DISCONNECTED:
2002 		/* No message for disconnecting */
2003 		cm_err = -ECONNRESET;
2004 		break;
2005 	case RDMA_CM_EVENT_CONNECT_ERROR:
2006 	case RDMA_CM_EVENT_UNREACHABLE:
2007 	case RDMA_CM_EVENT_ADDR_CHANGE:
2008 	case RDMA_CM_EVENT_TIMEWAIT_EXIT:
2009 		rtrs_wrn(s, "CM error (CM event: %s, err: %d)\n",
2010 			 rdma_event_msg(ev->event), ev->status);
2011 		cm_err = -ECONNRESET;
2012 		break;
2013 	case RDMA_CM_EVENT_ADDR_ERROR:
2014 	case RDMA_CM_EVENT_ROUTE_ERROR:
2015 		rtrs_wrn(s, "CM error (CM event: %s, err: %d)\n",
2016 			 rdma_event_msg(ev->event), ev->status);
2017 		cm_err = -EHOSTUNREACH;
2018 		break;
2019 	case RDMA_CM_EVENT_DEVICE_REMOVAL:
2020 		/*
2021 		 * Device removal is a special case.  Queue close and return 0.
2022 		 */
2023 		rtrs_clt_close_conns(clt_path, false);
2024 		return 0;
2025 	default:
2026 		rtrs_err(s, "Unexpected RDMA CM error (CM event: %s, err: %d)\n",
2027 			 rdma_event_msg(ev->event), ev->status);
2028 		cm_err = -ECONNRESET;
2029 		break;
2030 	}
2031 
2032 	if (cm_err) {
2033 		/*
2034 		 * cm error makes sense only on connection establishing,
2035 		 * in other cases we rely on normal procedure of reconnecting.
2036 		 */
2037 		flag_error_on_conn(con, cm_err);
2038 		rtrs_rdma_error_recovery(con);
2039 	}
2040 
2041 	return 0;
2042 }
2043 
2044 /* The caller should do the cleanup in case of error */
create_cm(struct rtrs_clt_con * con)2045 static int create_cm(struct rtrs_clt_con *con)
2046 {
2047 	struct rtrs_path *s = con->c.path;
2048 	struct rtrs_clt_path *clt_path = to_clt_path(s);
2049 	struct rdma_cm_id *cm_id;
2050 	int err;
2051 
2052 	cm_id = rdma_create_id(&init_net, rtrs_clt_rdma_cm_handler, con,
2053 			       clt_path->s.dst_addr.ss_family == AF_IB ?
2054 			       RDMA_PS_IB : RDMA_PS_TCP, IB_QPT_RC);
2055 	if (IS_ERR(cm_id)) {
2056 		err = PTR_ERR(cm_id);
2057 		rtrs_err(s, "Failed to create CM ID, err: %d\n", err);
2058 
2059 		return err;
2060 	}
2061 	con->c.cm_id = cm_id;
2062 	con->cm_err = 0;
2063 	/* allow the port to be reused */
2064 	err = rdma_set_reuseaddr(cm_id, 1);
2065 	if (err != 0) {
2066 		rtrs_err(s, "Set address reuse failed, err: %d\n", err);
2067 		return err;
2068 	}
2069 	err = rdma_resolve_addr(cm_id, (struct sockaddr *)&clt_path->s.src_addr,
2070 				(struct sockaddr *)&clt_path->s.dst_addr,
2071 				RTRS_CONNECT_TIMEOUT_MS);
2072 	if (err) {
2073 		rtrs_err(s, "Failed to resolve address, err: %d\n", err);
2074 		return err;
2075 	}
2076 	/*
2077 	 * Combine connection status and session events. This is needed
2078 	 * for waiting two possible cases: cm_err has something meaningful
2079 	 * or session state was really changed to error by device removal.
2080 	 */
2081 	err = wait_event_interruptible_timeout(
2082 			clt_path->state_wq,
2083 			con->cm_err || clt_path->state != RTRS_CLT_CONNECTING,
2084 			msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS));
2085 	if (err == 0 || err == -ERESTARTSYS) {
2086 		if (err == 0)
2087 			err = -ETIMEDOUT;
2088 		/* Timedout or interrupted */
2089 		return err;
2090 	}
2091 	if (con->cm_err < 0)
2092 		return con->cm_err;
2093 	if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTING)
2094 		/* Device removal */
2095 		return -ECONNABORTED;
2096 
2097 	return 0;
2098 }
2099 
rtrs_clt_path_up(struct rtrs_clt_path * clt_path)2100 static void rtrs_clt_path_up(struct rtrs_clt_path *clt_path)
2101 {
2102 	struct rtrs_clt_sess *clt = clt_path->clt;
2103 	int up;
2104 
2105 	/*
2106 	 * We can fire RECONNECTED event only when all paths were
2107 	 * connected on rtrs_clt_open(), then each was disconnected
2108 	 * and the first one connected again.  That's why this nasty
2109 	 * game with counter value.
2110 	 */
2111 
2112 	mutex_lock(&clt->paths_ev_mutex);
2113 	up = ++clt->paths_up;
2114 	/*
2115 	 * Here it is safe to access paths num directly since up counter
2116 	 * is greater than MAX_PATHS_NUM only while rtrs_clt_open() is
2117 	 * in progress, thus paths removals are impossible.
2118 	 */
2119 	if (up > MAX_PATHS_NUM && up == MAX_PATHS_NUM + clt->paths_num)
2120 		clt->paths_up = clt->paths_num;
2121 	else if (up == 1)
2122 		clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_RECONNECTED);
2123 	mutex_unlock(&clt->paths_ev_mutex);
2124 
2125 	/* Mark session as established */
2126 	clt_path->established = true;
2127 	clt_path->reconnect_attempts = 0;
2128 	clt_path->stats->reconnects.successful_cnt++;
2129 }
2130 
rtrs_clt_path_down(struct rtrs_clt_path * clt_path)2131 static void rtrs_clt_path_down(struct rtrs_clt_path *clt_path)
2132 {
2133 	struct rtrs_clt_sess *clt = clt_path->clt;
2134 
2135 	if (!clt_path->established)
2136 		return;
2137 
2138 	clt_path->established = false;
2139 	mutex_lock(&clt->paths_ev_mutex);
2140 	WARN_ON(!clt->paths_up);
2141 	if (--clt->paths_up == 0)
2142 		clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_DISCONNECTED);
2143 	mutex_unlock(&clt->paths_ev_mutex);
2144 }
2145 
rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_path * clt_path)2146 static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_path *clt_path)
2147 {
2148 	struct rtrs_clt_con *con;
2149 	unsigned int cid;
2150 
2151 	WARN_ON(READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED);
2152 
2153 	/*
2154 	 * Possible race with rtrs_clt_open(), when DEVICE_REMOVAL comes
2155 	 * exactly in between.  Start destroying after it finishes.
2156 	 */
2157 	mutex_lock(&clt_path->init_mutex);
2158 	mutex_unlock(&clt_path->init_mutex);
2159 
2160 	/*
2161 	 * All IO paths must observe !CONNECTED state before we
2162 	 * free everything.
2163 	 */
2164 	synchronize_rcu();
2165 
2166 	rtrs_stop_hb(&clt_path->s);
2167 
2168 	/*
2169 	 * The order it utterly crucial: firstly disconnect and complete all
2170 	 * rdma requests with error (thus set in_use=false for requests),
2171 	 * then fail outstanding requests checking in_use for each, and
2172 	 * eventually notify upper layer about session disconnection.
2173 	 */
2174 
2175 	for (cid = 0; cid < clt_path->s.con_num; cid++) {
2176 		if (!clt_path->s.con[cid])
2177 			break;
2178 		con = to_clt_con(clt_path->s.con[cid]);
2179 		stop_cm(con);
2180 	}
2181 	fail_all_outstanding_reqs(clt_path);
2182 	free_path_reqs(clt_path);
2183 	rtrs_clt_path_down(clt_path);
2184 
2185 	/*
2186 	 * Wait for graceful shutdown, namely when peer side invokes
2187 	 * rdma_disconnect(). 'connected_cnt' is decremented only on
2188 	 * CM events, thus if other side had crashed and hb has detected
2189 	 * something is wrong, here we will stuck for exactly timeout ms,
2190 	 * since CM does not fire anything.  That is fine, we are not in
2191 	 * hurry.
2192 	 */
2193 	wait_event_timeout(clt_path->state_wq,
2194 			   !atomic_read(&clt_path->connected_cnt),
2195 			   msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS));
2196 
2197 	for (cid = 0; cid < clt_path->s.con_num; cid++) {
2198 		if (!clt_path->s.con[cid])
2199 			break;
2200 		con = to_clt_con(clt_path->s.con[cid]);
2201 		mutex_lock(&con->con_mutex);
2202 		destroy_con_cq_qp(con);
2203 		mutex_unlock(&con->con_mutex);
2204 		destroy_cm(con);
2205 		destroy_con(con);
2206 	}
2207 }
2208 
rtrs_clt_remove_path_from_arr(struct rtrs_clt_path * clt_path)2209 static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_path *clt_path)
2210 {
2211 	struct rtrs_clt_sess *clt = clt_path->clt;
2212 	struct rtrs_clt_path *next;
2213 	bool wait_for_grace = false;
2214 	int cpu;
2215 
2216 	mutex_lock(&clt->paths_mutex);
2217 	list_del_rcu(&clt_path->s.entry);
2218 
2219 	/* Make sure everybody observes path removal. */
2220 	synchronize_rcu();
2221 
2222 	/*
2223 	 * At this point nobody sees @sess in the list, but still we have
2224 	 * dangling pointer @pcpu_path which _can_ point to @sess.  Since
2225 	 * nobody can observe @sess in the list, we guarantee that IO path
2226 	 * will not assign @sess to @pcpu_path, i.e. @pcpu_path can be equal
2227 	 * to @sess, but can never again become @sess.
2228 	 */
2229 
2230 	/*
2231 	 * Decrement paths number only after grace period, because
2232 	 * caller of do_each_path() must firstly observe list without
2233 	 * path and only then decremented paths number.
2234 	 *
2235 	 * Otherwise there can be the following situation:
2236 	 *    o Two paths exist and IO is coming.
2237 	 *    o One path is removed:
2238 	 *      CPU#0                          CPU#1
2239 	 *      do_each_path():                rtrs_clt_remove_path_from_arr():
2240 	 *          path = get_next_path()
2241 	 *          ^^^                            list_del_rcu(path)
2242 	 *          [!CONNECTED path]              clt->paths_num--
2243 	 *                                              ^^^^^^^^^
2244 	 *          load clt->paths_num                 from 2 to 1
2245 	 *                    ^^^^^^^^^
2246 	 *                    sees 1
2247 	 *
2248 	 *      path is observed as !CONNECTED, but do_each_path() loop
2249 	 *      ends, because expression i < clt->paths_num is false.
2250 	 */
2251 	clt->paths_num--;
2252 
2253 	/*
2254 	 * Get @next connection from current @sess which is going to be
2255 	 * removed.  If @sess is the last element, then @next is NULL.
2256 	 */
2257 	rcu_read_lock();
2258 	next = rtrs_clt_get_next_path_or_null(&clt->paths_list, clt_path);
2259 	rcu_read_unlock();
2260 
2261 	/*
2262 	 * @pcpu paths can still point to the path which is going to be
2263 	 * removed, so change the pointer manually.
2264 	 */
2265 	for_each_possible_cpu(cpu) {
2266 		struct rtrs_clt_path __rcu **ppcpu_path;
2267 
2268 		ppcpu_path = per_cpu_ptr(clt->pcpu_path, cpu);
2269 		if (rcu_dereference_protected(*ppcpu_path,
2270 			lockdep_is_held(&clt->paths_mutex)) != clt_path)
2271 			/*
2272 			 * synchronize_rcu() was called just after deleting
2273 			 * entry from the list, thus IO code path cannot
2274 			 * change pointer back to the pointer which is going
2275 			 * to be removed, we are safe here.
2276 			 */
2277 			continue;
2278 
2279 		/*
2280 		 * We race with IO code path, which also changes pointer,
2281 		 * thus we have to be careful not to overwrite it.
2282 		 */
2283 		if (try_cmpxchg((struct rtrs_clt_path **)ppcpu_path, &clt_path,
2284 				next))
2285 			/*
2286 			 * @ppcpu_path was successfully replaced with @next,
2287 			 * that means that someone could also pick up the
2288 			 * @sess and dereferencing it right now, so wait for
2289 			 * a grace period is required.
2290 			 */
2291 			wait_for_grace = true;
2292 	}
2293 	if (wait_for_grace)
2294 		synchronize_rcu();
2295 
2296 	mutex_unlock(&clt->paths_mutex);
2297 }
2298 
rtrs_clt_add_path_to_arr(struct rtrs_clt_path * clt_path)2299 static void rtrs_clt_add_path_to_arr(struct rtrs_clt_path *clt_path)
2300 {
2301 	struct rtrs_clt_sess *clt = clt_path->clt;
2302 
2303 	mutex_lock(&clt->paths_mutex);
2304 	clt->paths_num++;
2305 
2306 	list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list);
2307 	mutex_unlock(&clt->paths_mutex);
2308 }
2309 
rtrs_clt_close_work(struct work_struct * work)2310 static void rtrs_clt_close_work(struct work_struct *work)
2311 {
2312 	struct rtrs_clt_path *clt_path;
2313 
2314 	clt_path = container_of(work, struct rtrs_clt_path, close_work);
2315 
2316 	cancel_work_sync(&clt_path->err_recovery_work);
2317 	cancel_delayed_work_sync(&clt_path->reconnect_dwork);
2318 	rtrs_clt_stop_and_destroy_conns(clt_path);
2319 	rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSED, NULL);
2320 }
2321 
init_conns(struct rtrs_clt_path * clt_path)2322 static int init_conns(struct rtrs_clt_path *clt_path)
2323 {
2324 	unsigned int cid;
2325 	int err, i;
2326 
2327 	/*
2328 	 * On every new session connections increase reconnect counter
2329 	 * to avoid clashes with previous sessions not yet closed
2330 	 * sessions on a server side.
2331 	 */
2332 	clt_path->s.recon_cnt++;
2333 
2334 	/* Establish all RDMA connections  */
2335 	for (cid = 0; cid < clt_path->s.con_num; cid++) {
2336 		err = create_con(clt_path, cid);
2337 		if (err)
2338 			goto destroy;
2339 
2340 		err = create_cm(to_clt_con(clt_path->s.con[cid]));
2341 		if (err)
2342 			goto destroy;
2343 	}
2344 	err = alloc_path_reqs(clt_path);
2345 	if (err)
2346 		goto destroy;
2347 
2348 	return 0;
2349 
2350 destroy:
2351 	/* Make sure we do the cleanup in the order they are created */
2352 	for (i = 0; i <= cid; i++) {
2353 		struct rtrs_clt_con *con;
2354 
2355 		if (!clt_path->s.con[i])
2356 			break;
2357 
2358 		con = to_clt_con(clt_path->s.con[i]);
2359 		if (con->c.cm_id) {
2360 			stop_cm(con);
2361 			mutex_lock(&con->con_mutex);
2362 			destroy_con_cq_qp(con);
2363 			mutex_unlock(&con->con_mutex);
2364 			destroy_cm(con);
2365 		}
2366 		destroy_con(con);
2367 	}
2368 	/*
2369 	 * If we've never taken async path and got an error, say,
2370 	 * doing rdma_resolve_addr(), switch to CONNECTION_ERR state
2371 	 * manually to keep reconnecting.
2372 	 */
2373 	rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING_ERR, NULL);
2374 
2375 	return err;
2376 }
2377 
rtrs_clt_info_req_done(struct ib_cq * cq,struct ib_wc * wc)2378 static void rtrs_clt_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
2379 {
2380 	struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
2381 	struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
2382 	struct rtrs_iu *iu;
2383 
2384 	iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
2385 	rtrs_iu_free(iu, clt_path->s.dev->ib_dev, 1);
2386 
2387 	if (wc->status != IB_WC_SUCCESS) {
2388 		rtrs_err(clt_path->clt, "Path info request send failed: %s\n",
2389 			  ib_wc_status_msg(wc->status));
2390 		rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING_ERR, NULL);
2391 		return;
2392 	}
2393 
2394 	rtrs_clt_update_wc_stats(con);
2395 }
2396 
process_info_rsp(struct rtrs_clt_path * clt_path,const struct rtrs_msg_info_rsp * msg)2397 static int process_info_rsp(struct rtrs_clt_path *clt_path,
2398 			    const struct rtrs_msg_info_rsp *msg)
2399 {
2400 	unsigned int sg_cnt, total_len;
2401 	int i, sgi;
2402 
2403 	sg_cnt = le16_to_cpu(msg->sg_cnt);
2404 	if (!sg_cnt || (clt_path->queue_depth % sg_cnt)) {
2405 		rtrs_err(clt_path->clt,
2406 			  "Incorrect sg_cnt %d, is not multiple\n",
2407 			  sg_cnt);
2408 		return -EINVAL;
2409 	}
2410 
2411 	/*
2412 	 * Check if IB immediate data size is enough to hold the mem_id and
2413 	 * the offset inside the memory chunk.
2414 	 */
2415 	if ((ilog2(sg_cnt - 1) + 1) + (ilog2(clt_path->chunk_size - 1) + 1) >
2416 	    MAX_IMM_PAYL_BITS) {
2417 		rtrs_err(clt_path->clt,
2418 			  "RDMA immediate size (%db) not enough to encode %d buffers of size %dB\n",
2419 			  MAX_IMM_PAYL_BITS, sg_cnt, clt_path->chunk_size);
2420 		return -EINVAL;
2421 	}
2422 	total_len = 0;
2423 	for (sgi = 0, i = 0; sgi < sg_cnt && i < clt_path->queue_depth; sgi++) {
2424 		const struct rtrs_sg_desc *desc = &msg->desc[sgi];
2425 		u32 len, rkey;
2426 		u64 addr;
2427 
2428 		addr = le64_to_cpu(desc->addr);
2429 		rkey = le32_to_cpu(desc->key);
2430 		len  = le32_to_cpu(desc->len);
2431 
2432 		total_len += len;
2433 
2434 		if (!len || (len % clt_path->chunk_size)) {
2435 			rtrs_err(clt_path->clt, "Incorrect [%d].len %d\n",
2436 				  sgi,
2437 				  len);
2438 			return -EINVAL;
2439 		}
2440 		for ( ; len && i < clt_path->queue_depth; i++) {
2441 			clt_path->rbufs[i].addr = addr;
2442 			clt_path->rbufs[i].rkey = rkey;
2443 
2444 			len  -= clt_path->chunk_size;
2445 			addr += clt_path->chunk_size;
2446 		}
2447 	}
2448 	/* Sanity check */
2449 	if (sgi != sg_cnt || i != clt_path->queue_depth) {
2450 		rtrs_err(clt_path->clt,
2451 			 "Incorrect sg vector, not fully mapped\n");
2452 		return -EINVAL;
2453 	}
2454 	if (total_len != clt_path->chunk_size * clt_path->queue_depth) {
2455 		rtrs_err(clt_path->clt, "Incorrect total_len %d\n", total_len);
2456 		return -EINVAL;
2457 	}
2458 
2459 	return 0;
2460 }
2461 
rtrs_clt_info_rsp_done(struct ib_cq * cq,struct ib_wc * wc)2462 static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
2463 {
2464 	struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
2465 	struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
2466 	struct rtrs_msg_info_rsp *msg;
2467 	enum rtrs_clt_state state;
2468 	struct rtrs_iu *iu;
2469 	size_t rx_sz;
2470 	int err;
2471 
2472 	state = RTRS_CLT_CONNECTING_ERR;
2473 
2474 	WARN_ON(con->c.cid);
2475 	iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
2476 	if (wc->status != IB_WC_SUCCESS) {
2477 		rtrs_err(clt_path->clt, "Path info response recv failed: %s\n",
2478 			  ib_wc_status_msg(wc->status));
2479 		goto out;
2480 	}
2481 	WARN_ON(wc->opcode != IB_WC_RECV);
2482 
2483 	if (wc->byte_len < sizeof(*msg)) {
2484 		rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n",
2485 			  wc->byte_len);
2486 		goto out;
2487 	}
2488 	ib_dma_sync_single_for_cpu(clt_path->s.dev->ib_dev, iu->dma_addr,
2489 				   iu->size, DMA_FROM_DEVICE);
2490 	msg = iu->buf;
2491 	if (le16_to_cpu(msg->type) != RTRS_MSG_INFO_RSP) {
2492 		rtrs_err(clt_path->clt, "Path info response is malformed: type %d\n",
2493 			  le16_to_cpu(msg->type));
2494 		goto out;
2495 	}
2496 	rx_sz  = sizeof(*msg);
2497 	rx_sz += sizeof(msg->desc[0]) * le16_to_cpu(msg->sg_cnt);
2498 	if (wc->byte_len < rx_sz) {
2499 		rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n",
2500 			  wc->byte_len);
2501 		goto out;
2502 	}
2503 	err = process_info_rsp(clt_path, msg);
2504 	if (err)
2505 		goto out;
2506 
2507 	err = post_recv_path(clt_path);
2508 	if (err)
2509 		goto out;
2510 
2511 	state = RTRS_CLT_CONNECTED;
2512 
2513 out:
2514 	rtrs_clt_update_wc_stats(con);
2515 	rtrs_iu_free(iu, clt_path->s.dev->ib_dev, 1);
2516 	rtrs_clt_change_state_get_old(clt_path, state, NULL);
2517 }
2518 
rtrs_send_path_info(struct rtrs_clt_path * clt_path)2519 static int rtrs_send_path_info(struct rtrs_clt_path *clt_path)
2520 {
2521 	struct rtrs_clt_con *usr_con = to_clt_con(clt_path->s.con[0]);
2522 	struct rtrs_msg_info_req *msg;
2523 	struct rtrs_iu *tx_iu, *rx_iu;
2524 	size_t rx_sz;
2525 	int err;
2526 
2527 	rx_sz  = sizeof(struct rtrs_msg_info_rsp);
2528 	rx_sz += sizeof(struct rtrs_sg_desc) * clt_path->queue_depth;
2529 
2530 	tx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req), GFP_KERNEL,
2531 			       clt_path->s.dev->ib_dev, DMA_TO_DEVICE,
2532 			       rtrs_clt_info_req_done);
2533 	rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, clt_path->s.dev->ib_dev,
2534 			       DMA_FROM_DEVICE, rtrs_clt_info_rsp_done);
2535 	if (!tx_iu || !rx_iu) {
2536 		err = -ENOMEM;
2537 		goto out;
2538 	}
2539 	/* Prepare for getting info response */
2540 	err = rtrs_iu_post_recv(&usr_con->c, rx_iu);
2541 	if (err) {
2542 		rtrs_err(clt_path->clt, "rtrs_iu_post_recv(), err: %d\n", err);
2543 		goto out;
2544 	}
2545 	rx_iu = NULL;
2546 
2547 	msg = tx_iu->buf;
2548 	msg->type = cpu_to_le16(RTRS_MSG_INFO_REQ);
2549 	memcpy(msg->pathname, clt_path->s.sessname, sizeof(msg->pathname));
2550 
2551 	ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev,
2552 				      tx_iu->dma_addr,
2553 				      tx_iu->size, DMA_TO_DEVICE);
2554 
2555 	/* Send info request */
2556 	err = rtrs_iu_post_send(&usr_con->c, tx_iu, sizeof(*msg), NULL);
2557 	if (err) {
2558 		rtrs_err(clt_path->clt, "rtrs_iu_post_send(), err: %d\n", err);
2559 		goto out;
2560 	}
2561 	tx_iu = NULL;
2562 
2563 	/* Wait for state change */
2564 	wait_event_interruptible_timeout(clt_path->state_wq,
2565 					 clt_path->state != RTRS_CLT_CONNECTING,
2566 					 msecs_to_jiffies(
2567 						 RTRS_CONNECT_TIMEOUT_MS));
2568 	if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) {
2569 		if (READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTING_ERR)
2570 			err = -ECONNRESET;
2571 		else
2572 			err = -ETIMEDOUT;
2573 	}
2574 
2575 out:
2576 	if (tx_iu)
2577 		rtrs_iu_free(tx_iu, clt_path->s.dev->ib_dev, 1);
2578 	if (rx_iu)
2579 		rtrs_iu_free(rx_iu, clt_path->s.dev->ib_dev, 1);
2580 	if (err)
2581 		/* If we've never taken async path because of malloc problems */
2582 		rtrs_clt_change_state_get_old(clt_path,
2583 					      RTRS_CLT_CONNECTING_ERR, NULL);
2584 
2585 	return err;
2586 }
2587 
2588 /**
2589  * init_path() - establishes all path connections and does handshake
2590  * @clt_path: client path.
2591  * In case of error full close or reconnect procedure should be taken,
2592  * because reconnect or close async works can be started.
2593  */
init_path(struct rtrs_clt_path * clt_path)2594 static int init_path(struct rtrs_clt_path *clt_path)
2595 {
2596 	int err;
2597 	char str[NAME_MAX];
2598 	struct rtrs_addr path = {
2599 		.src = &clt_path->s.src_addr,
2600 		.dst = &clt_path->s.dst_addr,
2601 	};
2602 
2603 	rtrs_addr_to_str(&path, str, sizeof(str));
2604 
2605 	mutex_lock(&clt_path->init_mutex);
2606 	err = init_conns(clt_path);
2607 	if (err) {
2608 		rtrs_err(clt_path->clt,
2609 			 "init_conns() failed: err=%d path=%s [%s:%u]\n", err,
2610 			 str, clt_path->hca_name, clt_path->hca_port);
2611 		goto out;
2612 	}
2613 	err = rtrs_send_path_info(clt_path);
2614 	if (err) {
2615 		rtrs_err(clt_path->clt,
2616 			 "rtrs_send_path_info() failed: err=%d path=%s [%s:%u]\n",
2617 			 err, str, clt_path->hca_name, clt_path->hca_port);
2618 		goto out;
2619 	}
2620 	rtrs_clt_path_up(clt_path);
2621 	rtrs_start_hb(&clt_path->s);
2622 out:
2623 	mutex_unlock(&clt_path->init_mutex);
2624 
2625 	return err;
2626 }
2627 
rtrs_clt_reconnect_work(struct work_struct * work)2628 static void rtrs_clt_reconnect_work(struct work_struct *work)
2629 {
2630 	struct rtrs_clt_path *clt_path;
2631 	struct rtrs_clt_sess *clt;
2632 	int err;
2633 
2634 	clt_path = container_of(to_delayed_work(work), struct rtrs_clt_path,
2635 				reconnect_dwork);
2636 	clt = clt_path->clt;
2637 
2638 	trace_rtrs_clt_reconnect_work(clt_path);
2639 
2640 	if (READ_ONCE(clt_path->state) != RTRS_CLT_RECONNECTING)
2641 		return;
2642 
2643 	if (clt_path->reconnect_attempts >= clt->max_reconnect_attempts) {
2644 		/* Close a path completely if max attempts is reached */
2645 		rtrs_clt_close_conns(clt_path, false);
2646 		return;
2647 	}
2648 	clt_path->reconnect_attempts++;
2649 
2650 	msleep(RTRS_RECONNECT_BACKOFF);
2651 	if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING, NULL)) {
2652 		err = init_path(clt_path);
2653 		if (err)
2654 			goto reconnect_again;
2655 	}
2656 
2657 	return;
2658 
2659 reconnect_again:
2660 	if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_RECONNECTING, NULL)) {
2661 		clt_path->stats->reconnects.fail_cnt++;
2662 		queue_work(rtrs_wq, &clt_path->err_recovery_work);
2663 	}
2664 }
2665 
rtrs_clt_dev_release(struct device * dev)2666 static void rtrs_clt_dev_release(struct device *dev)
2667 {
2668 	struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess,
2669 						 dev);
2670 
2671 	mutex_destroy(&clt->paths_ev_mutex);
2672 	mutex_destroy(&clt->paths_mutex);
2673 	kfree(clt);
2674 }
2675 
alloc_clt(const char * sessname,size_t paths_num,u16 port,size_t pdu_sz,void * priv,void (* link_ev)(void * priv,enum rtrs_clt_link_ev ev),unsigned int reconnect_delay_sec,unsigned int max_reconnect_attempts)2676 static struct rtrs_clt_sess *alloc_clt(const char *sessname, size_t paths_num,
2677 				  u16 port, size_t pdu_sz, void *priv,
2678 				  void	(*link_ev)(void *priv,
2679 						   enum rtrs_clt_link_ev ev),
2680 				  unsigned int reconnect_delay_sec,
2681 				  unsigned int max_reconnect_attempts)
2682 {
2683 	struct rtrs_clt_sess *clt;
2684 	int err;
2685 
2686 	if (!paths_num || paths_num > MAX_PATHS_NUM)
2687 		return ERR_PTR(-EINVAL);
2688 
2689 	if (strlen(sessname) >= sizeof(clt->sessname))
2690 		return ERR_PTR(-EINVAL);
2691 
2692 	clt = kzalloc(sizeof(*clt), GFP_KERNEL);
2693 	if (!clt)
2694 		return ERR_PTR(-ENOMEM);
2695 
2696 	clt->pcpu_path = alloc_percpu(typeof(*clt->pcpu_path));
2697 	if (!clt->pcpu_path) {
2698 		kfree(clt);
2699 		return ERR_PTR(-ENOMEM);
2700 	}
2701 
2702 	clt->dev.class = &rtrs_clt_dev_class;
2703 	clt->dev.release = rtrs_clt_dev_release;
2704 	uuid_gen(&clt->paths_uuid);
2705 	INIT_LIST_HEAD_RCU(&clt->paths_list);
2706 	clt->paths_num = paths_num;
2707 	clt->paths_up = MAX_PATHS_NUM;
2708 	clt->port = port;
2709 	clt->pdu_sz = pdu_sz;
2710 	clt->max_segments = RTRS_MAX_SEGMENTS;
2711 	clt->reconnect_delay_sec = reconnect_delay_sec;
2712 	clt->max_reconnect_attempts = max_reconnect_attempts;
2713 	clt->priv = priv;
2714 	clt->link_ev = link_ev;
2715 	clt->mp_policy = MP_POLICY_MIN_INFLIGHT;
2716 	strscpy(clt->sessname, sessname, sizeof(clt->sessname));
2717 	init_waitqueue_head(&clt->permits_wait);
2718 	mutex_init(&clt->paths_ev_mutex);
2719 	mutex_init(&clt->paths_mutex);
2720 	device_initialize(&clt->dev);
2721 
2722 	err = dev_set_name(&clt->dev, "%s", sessname);
2723 	if (err)
2724 		goto err_put;
2725 
2726 	/*
2727 	 * Suppress user space notification until
2728 	 * sysfs files are created
2729 	 */
2730 	dev_set_uevent_suppress(&clt->dev, true);
2731 	err = device_add(&clt->dev);
2732 	if (err)
2733 		goto err_put;
2734 
2735 	clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj);
2736 	if (!clt->kobj_paths) {
2737 		err = -ENOMEM;
2738 		goto err_del;
2739 	}
2740 	err = rtrs_clt_create_sysfs_root_files(clt);
2741 	if (err) {
2742 		kobject_del(clt->kobj_paths);
2743 		kobject_put(clt->kobj_paths);
2744 		goto err_del;
2745 	}
2746 	dev_set_uevent_suppress(&clt->dev, false);
2747 	kobject_uevent(&clt->dev.kobj, KOBJ_ADD);
2748 
2749 	return clt;
2750 err_del:
2751 	device_del(&clt->dev);
2752 err_put:
2753 	free_percpu(clt->pcpu_path);
2754 	put_device(&clt->dev);
2755 	return ERR_PTR(err);
2756 }
2757 
free_clt(struct rtrs_clt_sess * clt)2758 static void free_clt(struct rtrs_clt_sess *clt)
2759 {
2760 	free_percpu(clt->pcpu_path);
2761 
2762 	/*
2763 	 * release callback will free clt and destroy mutexes in last put
2764 	 */
2765 	device_unregister(&clt->dev);
2766 }
2767 
2768 /**
2769  * rtrs_clt_open() - Open a path to an RTRS server
2770  * @ops: holds the link event callback and the private pointer.
2771  * @pathname: name of the path to an RTRS server
2772  * @paths: Paths to be established defined by their src and dst addresses
2773  * @paths_num: Number of elements in the @paths array
2774  * @port: port to be used by the RTRS session
2775  * @pdu_sz: Size of extra payload which can be accessed after permit allocation.
2776  * @reconnect_delay_sec: time between reconnect tries
2777  * @max_reconnect_attempts: Number of times to reconnect on error before giving
2778  *			    up, 0 for * disabled, -1 for forever
2779  * @nr_poll_queues: number of polling mode connection using IB_POLL_DIRECT flag
2780  *
2781  * Starts session establishment with the rtrs_server. The function can block
2782  * up to ~2000ms before it returns.
2783  *
2784  * Return a valid pointer on success otherwise PTR_ERR.
2785  */
rtrs_clt_open(struct rtrs_clt_ops * ops,const char * pathname,const struct rtrs_addr * paths,size_t paths_num,u16 port,size_t pdu_sz,u8 reconnect_delay_sec,s16 max_reconnect_attempts,u32 nr_poll_queues)2786 struct rtrs_clt_sess *rtrs_clt_open(struct rtrs_clt_ops *ops,
2787 				 const char *pathname,
2788 				 const struct rtrs_addr *paths,
2789 				 size_t paths_num, u16 port,
2790 				 size_t pdu_sz, u8 reconnect_delay_sec,
2791 				 s16 max_reconnect_attempts, u32 nr_poll_queues)
2792 {
2793 	struct rtrs_clt_path *clt_path, *tmp;
2794 	struct rtrs_clt_sess *clt;
2795 	int err, i;
2796 
2797 	if (strchr(pathname, '/') || strchr(pathname, '.')) {
2798 		pr_err("pathname cannot contain / and .\n");
2799 		err = -EINVAL;
2800 		goto out;
2801 	}
2802 
2803 	clt = alloc_clt(pathname, paths_num, port, pdu_sz, ops->priv,
2804 			ops->link_ev,
2805 			reconnect_delay_sec,
2806 			max_reconnect_attempts);
2807 	if (IS_ERR(clt)) {
2808 		err = PTR_ERR(clt);
2809 		goto out;
2810 	}
2811 	for (i = 0; i < paths_num; i++) {
2812 		struct rtrs_clt_path *clt_path;
2813 
2814 		clt_path = alloc_path(clt, &paths[i], nr_cpu_ids,
2815 				  nr_poll_queues);
2816 		if (IS_ERR(clt_path)) {
2817 			err = PTR_ERR(clt_path);
2818 			goto close_all_path;
2819 		}
2820 		if (!i)
2821 			clt_path->for_new_clt = 1;
2822 		list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list);
2823 
2824 		err = init_path(clt_path);
2825 		if (err) {
2826 			list_del_rcu(&clt_path->s.entry);
2827 			rtrs_clt_close_conns(clt_path, true);
2828 			free_percpu(clt_path->stats->pcpu_stats);
2829 			kfree(clt_path->stats);
2830 			free_path(clt_path);
2831 			goto close_all_path;
2832 		}
2833 
2834 		err = rtrs_clt_create_path_files(clt_path);
2835 		if (err) {
2836 			list_del_rcu(&clt_path->s.entry);
2837 			rtrs_clt_close_conns(clt_path, true);
2838 			free_percpu(clt_path->stats->pcpu_stats);
2839 			kfree(clt_path->stats);
2840 			free_path(clt_path);
2841 			goto close_all_path;
2842 		}
2843 	}
2844 	err = alloc_permits(clt);
2845 	if (err)
2846 		goto close_all_path;
2847 
2848 	return clt;
2849 
2850 close_all_path:
2851 	list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) {
2852 		rtrs_clt_destroy_path_files(clt_path, NULL);
2853 		rtrs_clt_close_conns(clt_path, true);
2854 		kobject_put(&clt_path->kobj);
2855 	}
2856 	rtrs_clt_destroy_sysfs_root(clt);
2857 	free_clt(clt);
2858 
2859 out:
2860 	return ERR_PTR(err);
2861 }
2862 EXPORT_SYMBOL(rtrs_clt_open);
2863 
2864 /**
2865  * rtrs_clt_close() - Close a path
2866  * @clt: Session handle. Session is freed upon return.
2867  */
rtrs_clt_close(struct rtrs_clt_sess * clt)2868 void rtrs_clt_close(struct rtrs_clt_sess *clt)
2869 {
2870 	struct rtrs_clt_path *clt_path, *tmp;
2871 
2872 	/* Firstly forbid sysfs access */
2873 	rtrs_clt_destroy_sysfs_root(clt);
2874 
2875 	/* Now it is safe to iterate over all paths without locks */
2876 	list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) {
2877 		rtrs_clt_close_conns(clt_path, true);
2878 		rtrs_clt_destroy_path_files(clt_path, NULL);
2879 		kobject_put(&clt_path->kobj);
2880 	}
2881 	free_permits(clt);
2882 	free_clt(clt);
2883 }
2884 EXPORT_SYMBOL(rtrs_clt_close);
2885 
rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_path * clt_path)2886 int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_path *clt_path)
2887 {
2888 	enum rtrs_clt_state old_state;
2889 	int err = -EBUSY;
2890 	bool changed;
2891 
2892 	changed = rtrs_clt_change_state_get_old(clt_path,
2893 						 RTRS_CLT_RECONNECTING,
2894 						 &old_state);
2895 	if (changed) {
2896 		clt_path->reconnect_attempts = 0;
2897 		rtrs_clt_stop_and_destroy_conns(clt_path);
2898 		queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork, 0);
2899 	}
2900 	if (changed || old_state == RTRS_CLT_RECONNECTING) {
2901 		/*
2902 		 * flush_delayed_work() queues pending work for immediate
2903 		 * execution, so do the flush if we have queued something
2904 		 * right now or work is pending.
2905 		 */
2906 		flush_delayed_work(&clt_path->reconnect_dwork);
2907 		err = (READ_ONCE(clt_path->state) ==
2908 		       RTRS_CLT_CONNECTED ? 0 : -ENOTCONN);
2909 	}
2910 
2911 	return err;
2912 }
2913 
rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_path * clt_path,const struct attribute * sysfs_self)2914 int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_path *clt_path,
2915 				     const struct attribute *sysfs_self)
2916 {
2917 	enum rtrs_clt_state old_state;
2918 	bool changed;
2919 
2920 	/*
2921 	 * Continue stopping path till state was changed to DEAD or
2922 	 * state was observed as DEAD:
2923 	 * 1. State was changed to DEAD - we were fast and nobody
2924 	 *    invoked rtrs_clt_reconnect(), which can again start
2925 	 *    reconnecting.
2926 	 * 2. State was observed as DEAD - we have someone in parallel
2927 	 *    removing the path.
2928 	 */
2929 	do {
2930 		rtrs_clt_close_conns(clt_path, true);
2931 		changed = rtrs_clt_change_state_get_old(clt_path,
2932 							RTRS_CLT_DEAD,
2933 							&old_state);
2934 	} while (!changed && old_state != RTRS_CLT_DEAD);
2935 
2936 	if (changed) {
2937 		rtrs_clt_remove_path_from_arr(clt_path);
2938 		rtrs_clt_destroy_path_files(clt_path, sysfs_self);
2939 		kobject_put(&clt_path->kobj);
2940 	}
2941 
2942 	return 0;
2943 }
2944 
rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt_sess * clt,int value)2945 void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt_sess *clt, int value)
2946 {
2947 	clt->max_reconnect_attempts = (unsigned int)value;
2948 }
2949 
rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt_sess * clt)2950 int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt_sess *clt)
2951 {
2952 	return (int)clt->max_reconnect_attempts;
2953 }
2954 
2955 /**
2956  * rtrs_clt_request() - Request data transfer to/from server via RDMA.
2957  *
2958  * @dir:	READ/WRITE
2959  * @ops:	callback function to be called as confirmation, and the pointer.
2960  * @clt:	Session
2961  * @permit:	Preallocated permit
2962  * @vec:	Message that is sent to server together with the request.
2963  *		Sum of len of all @vec elements limited to <= IO_MSG_SIZE.
2964  *		Since the msg is copied internally it can be allocated on stack.
2965  * @nr:		Number of elements in @vec.
2966  * @data_len:	length of data sent to/from server
2967  * @sg:		Pages to be sent/received to/from server.
2968  * @sg_cnt:	Number of elements in the @sg
2969  *
2970  * Return:
2971  * 0:		Success
2972  * <0:		Error
2973  *
2974  * On dir=READ rtrs client will request a data transfer from Server to client.
2975  * The data that the server will respond with will be stored in @sg when
2976  * the user receives an %RTRS_CLT_RDMA_EV_RDMA_REQUEST_WRITE_COMPL event.
2977  * On dir=WRITE rtrs client will rdma write data in sg to server side.
2978  */
rtrs_clt_request(int dir,struct rtrs_clt_req_ops * ops,struct rtrs_clt_sess * clt,struct rtrs_permit * permit,const struct kvec * vec,size_t nr,size_t data_len,struct scatterlist * sg,unsigned int sg_cnt)2979 int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops,
2980 		     struct rtrs_clt_sess *clt, struct rtrs_permit *permit,
2981 		     const struct kvec *vec, size_t nr, size_t data_len,
2982 		     struct scatterlist *sg, unsigned int sg_cnt)
2983 {
2984 	struct rtrs_clt_io_req *req;
2985 	struct rtrs_clt_path *clt_path;
2986 
2987 	enum dma_data_direction dma_dir;
2988 	int err = -ECONNABORTED, i;
2989 	size_t usr_len, hdr_len;
2990 	struct path_it it;
2991 
2992 	/* Get kvec length */
2993 	for (i = 0, usr_len = 0; i < nr; i++)
2994 		usr_len += vec[i].iov_len;
2995 
2996 	if (dir == READ) {
2997 		hdr_len = sizeof(struct rtrs_msg_rdma_read) +
2998 			  sg_cnt * sizeof(struct rtrs_sg_desc);
2999 		dma_dir = DMA_FROM_DEVICE;
3000 	} else {
3001 		hdr_len = sizeof(struct rtrs_msg_rdma_write);
3002 		dma_dir = DMA_TO_DEVICE;
3003 	}
3004 
3005 	rcu_read_lock();
3006 	for (path_it_init(&it, clt);
3007 	     (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
3008 		if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
3009 			continue;
3010 
3011 		if (usr_len + hdr_len > clt_path->max_hdr_size) {
3012 			rtrs_wrn_rl(clt_path->clt,
3013 				     "%s request failed, user message size is %zu and header length %zu, but max size is %u\n",
3014 				     dir == READ ? "Read" : "Write",
3015 				     usr_len, hdr_len, clt_path->max_hdr_size);
3016 			err = -EMSGSIZE;
3017 			break;
3018 		}
3019 		req = rtrs_clt_get_req(clt_path, ops->conf_fn, permit, ops->priv,
3020 				       vec, usr_len, sg, sg_cnt, data_len,
3021 				       dma_dir);
3022 		if (dir == READ)
3023 			err = rtrs_clt_read_req(req);
3024 		else
3025 			err = rtrs_clt_write_req(req);
3026 		if (err) {
3027 			req->in_use = false;
3028 			continue;
3029 		}
3030 		/* Success path */
3031 		break;
3032 	}
3033 	path_it_deinit(&it);
3034 	rcu_read_unlock();
3035 
3036 	return err;
3037 }
3038 EXPORT_SYMBOL(rtrs_clt_request);
3039 
rtrs_clt_rdma_cq_direct(struct rtrs_clt_sess * clt,unsigned int index)3040 int rtrs_clt_rdma_cq_direct(struct rtrs_clt_sess *clt, unsigned int index)
3041 {
3042 	/* If no path, return -1 for block layer not to try again */
3043 	int cnt = -1;
3044 	struct rtrs_con *con;
3045 	struct rtrs_clt_path *clt_path;
3046 	struct path_it it;
3047 
3048 	rcu_read_lock();
3049 	for (path_it_init(&it, clt);
3050 	     (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
3051 		if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
3052 			continue;
3053 
3054 		con = clt_path->s.con[index + 1];
3055 		cnt = ib_process_cq_direct(con->cq, -1);
3056 		if (cnt)
3057 			break;
3058 	}
3059 	path_it_deinit(&it);
3060 	rcu_read_unlock();
3061 
3062 	return cnt;
3063 }
3064 EXPORT_SYMBOL(rtrs_clt_rdma_cq_direct);
3065 
3066 /**
3067  * rtrs_clt_query() - queries RTRS session attributes
3068  *@clt: session pointer
3069  *@attr: query results for session attributes.
3070  * Returns:
3071  *    0 on success
3072  *    -ECOMM		no connection to the server
3073  */
rtrs_clt_query(struct rtrs_clt_sess * clt,struct rtrs_attrs * attr)3074 int rtrs_clt_query(struct rtrs_clt_sess *clt, struct rtrs_attrs *attr)
3075 {
3076 	if (!rtrs_clt_is_connected(clt))
3077 		return -ECOMM;
3078 
3079 	attr->queue_depth      = clt->queue_depth;
3080 	attr->max_segments     = clt->max_segments;
3081 	/* Cap max_io_size to min of remote buffer size and the fr pages */
3082 	attr->max_io_size = min_t(int, clt->max_io_size,
3083 				  clt->max_segments * SZ_4K);
3084 
3085 	return 0;
3086 }
3087 EXPORT_SYMBOL(rtrs_clt_query);
3088 
rtrs_clt_create_path_from_sysfs(struct rtrs_clt_sess * clt,struct rtrs_addr * addr)3089 int rtrs_clt_create_path_from_sysfs(struct rtrs_clt_sess *clt,
3090 				     struct rtrs_addr *addr)
3091 {
3092 	struct rtrs_clt_path *clt_path;
3093 	int err;
3094 
3095 	clt_path = alloc_path(clt, addr, nr_cpu_ids, 0);
3096 	if (IS_ERR(clt_path))
3097 		return PTR_ERR(clt_path);
3098 
3099 	mutex_lock(&clt->paths_mutex);
3100 	if (clt->paths_num == 0) {
3101 		/*
3102 		 * When all the paths are removed for a session,
3103 		 * the addition of the first path is like a new session for
3104 		 * the storage server
3105 		 */
3106 		clt_path->for_new_clt = 1;
3107 	}
3108 
3109 	mutex_unlock(&clt->paths_mutex);
3110 
3111 	/*
3112 	 * It is totally safe to add path in CONNECTING state: coming
3113 	 * IO will never grab it.  Also it is very important to add
3114 	 * path before init, since init fires LINK_CONNECTED event.
3115 	 */
3116 	rtrs_clt_add_path_to_arr(clt_path);
3117 
3118 	err = init_path(clt_path);
3119 	if (err)
3120 		goto close_path;
3121 
3122 	err = rtrs_clt_create_path_files(clt_path);
3123 	if (err)
3124 		goto close_path;
3125 
3126 	return 0;
3127 
3128 close_path:
3129 	rtrs_clt_remove_path_from_arr(clt_path);
3130 	rtrs_clt_close_conns(clt_path, true);
3131 	free_percpu(clt_path->stats->pcpu_stats);
3132 	kfree(clt_path->stats);
3133 	free_path(clt_path);
3134 
3135 	return err;
3136 }
3137 
rtrs_clt_ib_dev_init(struct rtrs_ib_dev * dev)3138 static int rtrs_clt_ib_dev_init(struct rtrs_ib_dev *dev)
3139 {
3140 	if (!(dev->ib_dev->attrs.device_cap_flags &
3141 	      IB_DEVICE_MEM_MGT_EXTENSIONS)) {
3142 		pr_err("Memory registrations not supported.\n");
3143 		return -ENOTSUPP;
3144 	}
3145 
3146 	return 0;
3147 }
3148 
3149 static const struct rtrs_rdma_dev_pd_ops dev_pd_ops = {
3150 	.init = rtrs_clt_ib_dev_init
3151 };
3152 
rtrs_client_init(void)3153 static int __init rtrs_client_init(void)
3154 {
3155 	int ret = 0;
3156 
3157 	rtrs_rdma_dev_pd_init(0, &dev_pd);
3158 	ret = class_register(&rtrs_clt_dev_class);
3159 	if (ret) {
3160 		pr_err("Failed to create rtrs-client dev class\n");
3161 		return ret;
3162 	}
3163 	rtrs_wq = alloc_workqueue("rtrs_client_wq", 0, 0);
3164 	if (!rtrs_wq) {
3165 		class_unregister(&rtrs_clt_dev_class);
3166 		return -ENOMEM;
3167 	}
3168 
3169 	return 0;
3170 }
3171 
rtrs_client_exit(void)3172 static void __exit rtrs_client_exit(void)
3173 {
3174 	destroy_workqueue(rtrs_wq);
3175 	class_unregister(&rtrs_clt_dev_class);
3176 	rtrs_rdma_dev_pd_deinit(&dev_pd);
3177 }
3178 
3179 module_init(rtrs_client_init);
3180 module_exit(rtrs_client_exit);
3181