• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *   Copyright (C) 2017, Microsoft Corporation.
4  *
5  *   Author(s): Long Li <longli@microsoft.com>
6  */
7 #include <linux/module.h>
8 #include <linux/highmem.h>
9 #include "smbdirect.h"
10 #include "cifs_debug.h"
11 #include "cifsproto.h"
12 #include "smb2proto.h"
13 
14 static struct smbd_response *get_empty_queue_buffer(
15 		struct smbd_connection *info);
16 static struct smbd_response *get_receive_buffer(
17 		struct smbd_connection *info);
18 static void put_receive_buffer(
19 		struct smbd_connection *info,
20 		struct smbd_response *response);
21 static int allocate_receive_buffers(struct smbd_connection *info, int num_buf);
22 static void destroy_receive_buffers(struct smbd_connection *info);
23 
24 static void put_empty_packet(
25 		struct smbd_connection *info, struct smbd_response *response);
26 static void enqueue_reassembly(
27 		struct smbd_connection *info,
28 		struct smbd_response *response, int data_length);
29 static struct smbd_response *_get_first_reassembly(
30 		struct smbd_connection *info);
31 
32 static int smbd_post_recv(
33 		struct smbd_connection *info,
34 		struct smbd_response *response);
35 
36 static int smbd_post_send_empty(struct smbd_connection *info);
37 static int smbd_post_send_data(
38 		struct smbd_connection *info,
39 		struct kvec *iov, int n_vec, int remaining_data_length);
40 static int smbd_post_send_page(struct smbd_connection *info,
41 		struct page *page, unsigned long offset,
42 		size_t size, int remaining_data_length);
43 
44 static void destroy_mr_list(struct smbd_connection *info);
45 static int allocate_mr_list(struct smbd_connection *info);
46 
47 /* SMBD version number */
48 #define SMBD_V1	0x0100
49 
50 /* Port numbers for SMBD transport */
51 #define SMB_PORT	445
52 #define SMBD_PORT	5445
53 
54 /* Address lookup and resolve timeout in ms */
55 #define RDMA_RESOLVE_TIMEOUT	5000
56 
57 /* SMBD negotiation timeout in seconds */
58 #define SMBD_NEGOTIATE_TIMEOUT	120
59 
60 /* SMBD minimum receive size and fragmented sized defined in [MS-SMBD] */
61 #define SMBD_MIN_RECEIVE_SIZE		128
62 #define SMBD_MIN_FRAGMENTED_SIZE	131072
63 
64 /*
65  * Default maximum number of RDMA read/write outstanding on this connection
66  * This value is possibly decreased during QP creation on hardware limit
67  */
68 #define SMBD_CM_RESPONDER_RESOURCES	32
69 
70 /* Maximum number of retries on data transfer operations */
71 #define SMBD_CM_RETRY			6
72 /* No need to retry on Receiver Not Ready since SMBD manages credits */
73 #define SMBD_CM_RNR_RETRY		0
74 
75 /*
76  * User configurable initial values per SMBD transport connection
77  * as defined in [MS-SMBD] 3.1.1.1
78  * Those may change after a SMBD negotiation
79  */
80 /* The local peer's maximum number of credits to grant to the peer */
81 int smbd_receive_credit_max = 255;
82 
83 /* The remote peer's credit request of local peer */
84 int smbd_send_credit_target = 255;
85 
86 /* The maximum single message size can be sent to remote peer */
87 int smbd_max_send_size = 1364;
88 
89 /*  The maximum fragmented upper-layer payload receive size supported */
90 int smbd_max_fragmented_recv_size = 1024 * 1024;
91 
92 /*  The maximum single-message size which can be received */
93 int smbd_max_receive_size = 8192;
94 
95 /* The timeout to initiate send of a keepalive message on idle */
96 int smbd_keep_alive_interval = 120;
97 
98 /*
99  * User configurable initial values for RDMA transport
100  * The actual values used may be lower and are limited to hardware capabilities
101  */
102 /* Default maximum number of SGEs in a RDMA write/read */
103 int smbd_max_frmr_depth = 2048;
104 
105 /* If payload is less than this byte, use RDMA send/recv not read/write */
106 int rdma_readwrite_threshold = 4096;
107 
108 /* Transport logging functions
109  * Logging are defined as classes. They can be OR'ed to define the actual
110  * logging level via module parameter smbd_logging_class
111  * e.g. cifs.smbd_logging_class=0xa0 will log all log_rdma_recv() and
112  * log_rdma_event()
113  */
114 #define LOG_OUTGOING			0x1
115 #define LOG_INCOMING			0x2
116 #define LOG_READ			0x4
117 #define LOG_WRITE			0x8
118 #define LOG_RDMA_SEND			0x10
119 #define LOG_RDMA_RECV			0x20
120 #define LOG_KEEP_ALIVE			0x40
121 #define LOG_RDMA_EVENT			0x80
122 #define LOG_RDMA_MR			0x100
123 static unsigned int smbd_logging_class;
124 module_param(smbd_logging_class, uint, 0644);
125 MODULE_PARM_DESC(smbd_logging_class,
126 	"Logging class for SMBD transport 0x0 to 0x100");
127 
128 #define ERR		0x0
129 #define INFO		0x1
130 static unsigned int smbd_logging_level = ERR;
131 module_param(smbd_logging_level, uint, 0644);
132 MODULE_PARM_DESC(smbd_logging_level,
133 	"Logging level for SMBD transport, 0 (default): error, 1: info");
134 
135 #define log_rdma(level, class, fmt, args...)				\
136 do {									\
137 	if (level <= smbd_logging_level || class & smbd_logging_class)	\
138 		cifs_dbg(VFS, "%s:%d " fmt, __func__, __LINE__, ##args);\
139 } while (0)
140 
141 #define log_outgoing(level, fmt, args...) \
142 		log_rdma(level, LOG_OUTGOING, fmt, ##args)
143 #define log_incoming(level, fmt, args...) \
144 		log_rdma(level, LOG_INCOMING, fmt, ##args)
145 #define log_read(level, fmt, args...)	log_rdma(level, LOG_READ, fmt, ##args)
146 #define log_write(level, fmt, args...)	log_rdma(level, LOG_WRITE, fmt, ##args)
147 #define log_rdma_send(level, fmt, args...) \
148 		log_rdma(level, LOG_RDMA_SEND, fmt, ##args)
149 #define log_rdma_recv(level, fmt, args...) \
150 		log_rdma(level, LOG_RDMA_RECV, fmt, ##args)
151 #define log_keep_alive(level, fmt, args...) \
152 		log_rdma(level, LOG_KEEP_ALIVE, fmt, ##args)
153 #define log_rdma_event(level, fmt, args...) \
154 		log_rdma(level, LOG_RDMA_EVENT, fmt, ##args)
155 #define log_rdma_mr(level, fmt, args...) \
156 		log_rdma(level, LOG_RDMA_MR, fmt, ##args)
157 
smbd_disconnect_rdma_work(struct work_struct * work)158 static void smbd_disconnect_rdma_work(struct work_struct *work)
159 {
160 	struct smbd_connection *info =
161 		container_of(work, struct smbd_connection, disconnect_work);
162 
163 	if (info->transport_status == SMBD_CONNECTED) {
164 		info->transport_status = SMBD_DISCONNECTING;
165 		rdma_disconnect(info->id);
166 	}
167 }
168 
smbd_disconnect_rdma_connection(struct smbd_connection * info)169 static void smbd_disconnect_rdma_connection(struct smbd_connection *info)
170 {
171 	queue_work(info->workqueue, &info->disconnect_work);
172 }
173 
174 /* Upcall from RDMA CM */
smbd_conn_upcall(struct rdma_cm_id * id,struct rdma_cm_event * event)175 static int smbd_conn_upcall(
176 		struct rdma_cm_id *id, struct rdma_cm_event *event)
177 {
178 	struct smbd_connection *info = id->context;
179 
180 	log_rdma_event(INFO, "event=%d status=%d\n",
181 		event->event, event->status);
182 
183 	switch (event->event) {
184 	case RDMA_CM_EVENT_ADDR_RESOLVED:
185 	case RDMA_CM_EVENT_ROUTE_RESOLVED:
186 		info->ri_rc = 0;
187 		complete(&info->ri_done);
188 		break;
189 
190 	case RDMA_CM_EVENT_ADDR_ERROR:
191 		info->ri_rc = -EHOSTUNREACH;
192 		complete(&info->ri_done);
193 		break;
194 
195 	case RDMA_CM_EVENT_ROUTE_ERROR:
196 		info->ri_rc = -ENETUNREACH;
197 		complete(&info->ri_done);
198 		break;
199 
200 	case RDMA_CM_EVENT_ESTABLISHED:
201 		log_rdma_event(INFO, "connected event=%d\n", event->event);
202 		info->transport_status = SMBD_CONNECTED;
203 		wake_up_interruptible(&info->conn_wait);
204 		break;
205 
206 	case RDMA_CM_EVENT_CONNECT_ERROR:
207 	case RDMA_CM_EVENT_UNREACHABLE:
208 	case RDMA_CM_EVENT_REJECTED:
209 		log_rdma_event(INFO, "connecting failed event=%d\n", event->event);
210 		info->transport_status = SMBD_DISCONNECTED;
211 		wake_up_interruptible(&info->conn_wait);
212 		break;
213 
214 	case RDMA_CM_EVENT_DEVICE_REMOVAL:
215 	case RDMA_CM_EVENT_DISCONNECTED:
216 		/* This happenes when we fail the negotiation */
217 		if (info->transport_status == SMBD_NEGOTIATE_FAILED) {
218 			info->transport_status = SMBD_DISCONNECTED;
219 			wake_up(&info->conn_wait);
220 			break;
221 		}
222 
223 		info->transport_status = SMBD_DISCONNECTED;
224 		wake_up_interruptible(&info->disconn_wait);
225 		wake_up_interruptible(&info->wait_reassembly_queue);
226 		wake_up_interruptible_all(&info->wait_send_queue);
227 		break;
228 
229 	default:
230 		break;
231 	}
232 
233 	return 0;
234 }
235 
236 /* Upcall from RDMA QP */
237 static void
smbd_qp_async_error_upcall(struct ib_event * event,void * context)238 smbd_qp_async_error_upcall(struct ib_event *event, void *context)
239 {
240 	struct smbd_connection *info = context;
241 
242 	log_rdma_event(ERR, "%s on device %s info %p\n",
243 		ib_event_msg(event->event), event->device->name, info);
244 
245 	switch (event->event) {
246 	case IB_EVENT_CQ_ERR:
247 	case IB_EVENT_QP_FATAL:
248 		smbd_disconnect_rdma_connection(info);
249 
250 	default:
251 		break;
252 	}
253 }
254 
smbd_request_payload(struct smbd_request * request)255 static inline void *smbd_request_payload(struct smbd_request *request)
256 {
257 	return (void *)request->packet;
258 }
259 
smbd_response_payload(struct smbd_response * response)260 static inline void *smbd_response_payload(struct smbd_response *response)
261 {
262 	return (void *)response->packet;
263 }
264 
265 /* Called when a RDMA send is done */
send_done(struct ib_cq * cq,struct ib_wc * wc)266 static void send_done(struct ib_cq *cq, struct ib_wc *wc)
267 {
268 	int i;
269 	struct smbd_request *request =
270 		container_of(wc->wr_cqe, struct smbd_request, cqe);
271 
272 	log_rdma_send(INFO, "smbd_request %p completed wc->status=%d\n",
273 		request, wc->status);
274 
275 	if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
276 		log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n",
277 			wc->status, wc->opcode);
278 		smbd_disconnect_rdma_connection(request->info);
279 	}
280 
281 	for (i = 0; i < request->num_sge; i++)
282 		ib_dma_unmap_single(request->info->id->device,
283 			request->sge[i].addr,
284 			request->sge[i].length,
285 			DMA_TO_DEVICE);
286 
287 	if (request->has_payload) {
288 		if (atomic_dec_and_test(&request->info->send_payload_pending))
289 			wake_up(&request->info->wait_send_payload_pending);
290 	} else {
291 		if (atomic_dec_and_test(&request->info->send_pending))
292 			wake_up(&request->info->wait_send_pending);
293 	}
294 
295 	mempool_free(request, request->info->request_mempool);
296 }
297 
dump_smbd_negotiate_resp(struct smbd_negotiate_resp * resp)298 static void dump_smbd_negotiate_resp(struct smbd_negotiate_resp *resp)
299 {
300 	log_rdma_event(INFO, "resp message min_version %u max_version %u "
301 		"negotiated_version %u credits_requested %u "
302 		"credits_granted %u status %u max_readwrite_size %u "
303 		"preferred_send_size %u max_receive_size %u "
304 		"max_fragmented_size %u\n",
305 		resp->min_version, resp->max_version, resp->negotiated_version,
306 		resp->credits_requested, resp->credits_granted, resp->status,
307 		resp->max_readwrite_size, resp->preferred_send_size,
308 		resp->max_receive_size, resp->max_fragmented_size);
309 }
310 
311 /*
312  * Process a negotiation response message, according to [MS-SMBD]3.1.5.7
313  * response, packet_length: the negotiation response message
314  * return value: true if negotiation is a success, false if failed
315  */
process_negotiation_response(struct smbd_response * response,int packet_length)316 static bool process_negotiation_response(
317 		struct smbd_response *response, int packet_length)
318 {
319 	struct smbd_connection *info = response->info;
320 	struct smbd_negotiate_resp *packet = smbd_response_payload(response);
321 
322 	if (packet_length < sizeof(struct smbd_negotiate_resp)) {
323 		log_rdma_event(ERR,
324 			"error: packet_length=%d\n", packet_length);
325 		return false;
326 	}
327 
328 	if (le16_to_cpu(packet->negotiated_version) != SMBD_V1) {
329 		log_rdma_event(ERR, "error: negotiated_version=%x\n",
330 			le16_to_cpu(packet->negotiated_version));
331 		return false;
332 	}
333 	info->protocol = le16_to_cpu(packet->negotiated_version);
334 
335 	if (packet->credits_requested == 0) {
336 		log_rdma_event(ERR, "error: credits_requested==0\n");
337 		return false;
338 	}
339 	info->receive_credit_target = le16_to_cpu(packet->credits_requested);
340 
341 	if (packet->credits_granted == 0) {
342 		log_rdma_event(ERR, "error: credits_granted==0\n");
343 		return false;
344 	}
345 	atomic_set(&info->send_credits, le16_to_cpu(packet->credits_granted));
346 
347 	atomic_set(&info->receive_credits, 0);
348 
349 	if (le32_to_cpu(packet->preferred_send_size) > info->max_receive_size) {
350 		log_rdma_event(ERR, "error: preferred_send_size=%d\n",
351 			le32_to_cpu(packet->preferred_send_size));
352 		return false;
353 	}
354 	info->max_receive_size = le32_to_cpu(packet->preferred_send_size);
355 
356 	if (le32_to_cpu(packet->max_receive_size) < SMBD_MIN_RECEIVE_SIZE) {
357 		log_rdma_event(ERR, "error: max_receive_size=%d\n",
358 			le32_to_cpu(packet->max_receive_size));
359 		return false;
360 	}
361 	info->max_send_size = min_t(int, info->max_send_size,
362 					le32_to_cpu(packet->max_receive_size));
363 
364 	if (le32_to_cpu(packet->max_fragmented_size) <
365 			SMBD_MIN_FRAGMENTED_SIZE) {
366 		log_rdma_event(ERR, "error: max_fragmented_size=%d\n",
367 			le32_to_cpu(packet->max_fragmented_size));
368 		return false;
369 	}
370 	info->max_fragmented_send_size =
371 		le32_to_cpu(packet->max_fragmented_size);
372 	info->rdma_readwrite_threshold =
373 		rdma_readwrite_threshold > info->max_fragmented_send_size ?
374 		info->max_fragmented_send_size :
375 		rdma_readwrite_threshold;
376 
377 
378 	info->max_readwrite_size = min_t(u32,
379 			le32_to_cpu(packet->max_readwrite_size),
380 			info->max_frmr_depth * PAGE_SIZE);
381 	info->max_frmr_depth = info->max_readwrite_size / PAGE_SIZE;
382 
383 	return true;
384 }
385 
386 /*
387  * Check and schedule to send an immediate packet
388  * This is used to extend credtis to remote peer to keep the transport busy
389  */
check_and_send_immediate(struct smbd_connection * info)390 static void check_and_send_immediate(struct smbd_connection *info)
391 {
392 	if (info->transport_status != SMBD_CONNECTED)
393 		return;
394 
395 	info->send_immediate = true;
396 
397 	/*
398 	 * Promptly send a packet if our peer is running low on receive
399 	 * credits
400 	 */
401 	if (atomic_read(&info->receive_credits) <
402 		info->receive_credit_target - 1)
403 		queue_delayed_work(
404 			info->workqueue, &info->send_immediate_work, 0);
405 }
406 
smbd_post_send_credits(struct work_struct * work)407 static void smbd_post_send_credits(struct work_struct *work)
408 {
409 	int ret = 0;
410 	int use_receive_queue = 1;
411 	int rc;
412 	struct smbd_response *response;
413 	struct smbd_connection *info =
414 		container_of(work, struct smbd_connection,
415 			post_send_credits_work);
416 
417 	if (info->transport_status != SMBD_CONNECTED) {
418 		wake_up(&info->wait_receive_queues);
419 		return;
420 	}
421 
422 	if (info->receive_credit_target >
423 		atomic_read(&info->receive_credits)) {
424 		while (true) {
425 			if (use_receive_queue)
426 				response = get_receive_buffer(info);
427 			else
428 				response = get_empty_queue_buffer(info);
429 			if (!response) {
430 				/* now switch to emtpy packet queue */
431 				if (use_receive_queue) {
432 					use_receive_queue = 0;
433 					continue;
434 				} else
435 					break;
436 			}
437 
438 			response->type = SMBD_TRANSFER_DATA;
439 			response->first_segment = false;
440 			rc = smbd_post_recv(info, response);
441 			if (rc) {
442 				log_rdma_recv(ERR,
443 					"post_recv failed rc=%d\n", rc);
444 				put_receive_buffer(info, response);
445 				break;
446 			}
447 
448 			ret++;
449 		}
450 	}
451 
452 	spin_lock(&info->lock_new_credits_offered);
453 	info->new_credits_offered += ret;
454 	spin_unlock(&info->lock_new_credits_offered);
455 
456 	atomic_add(ret, &info->receive_credits);
457 
458 	/* Check if we can post new receive and grant credits to peer */
459 	check_and_send_immediate(info);
460 }
461 
smbd_recv_done_work(struct work_struct * work)462 static void smbd_recv_done_work(struct work_struct *work)
463 {
464 	struct smbd_connection *info =
465 		container_of(work, struct smbd_connection, recv_done_work);
466 
467 	/*
468 	 * We may have new send credits granted from remote peer
469 	 * If any sender is blcoked on lack of credets, unblock it
470 	 */
471 	if (atomic_read(&info->send_credits))
472 		wake_up_interruptible(&info->wait_send_queue);
473 
474 	/*
475 	 * Check if we need to send something to remote peer to
476 	 * grant more credits or respond to KEEP_ALIVE packet
477 	 */
478 	check_and_send_immediate(info);
479 }
480 
481 /* Called from softirq, when recv is done */
recv_done(struct ib_cq * cq,struct ib_wc * wc)482 static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
483 {
484 	struct smbd_data_transfer *data_transfer;
485 	struct smbd_response *response =
486 		container_of(wc->wr_cqe, struct smbd_response, cqe);
487 	struct smbd_connection *info = response->info;
488 	int data_length = 0;
489 
490 	log_rdma_recv(INFO, "response=%p type=%d wc status=%d wc opcode %d "
491 		      "byte_len=%d pkey_index=%x\n",
492 		response, response->type, wc->status, wc->opcode,
493 		wc->byte_len, wc->pkey_index);
494 
495 	if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
496 		log_rdma_recv(INFO, "wc->status=%d opcode=%d\n",
497 			wc->status, wc->opcode);
498 		smbd_disconnect_rdma_connection(info);
499 		goto error;
500 	}
501 
502 	ib_dma_sync_single_for_cpu(
503 		wc->qp->device,
504 		response->sge.addr,
505 		response->sge.length,
506 		DMA_FROM_DEVICE);
507 
508 	switch (response->type) {
509 	/* SMBD negotiation response */
510 	case SMBD_NEGOTIATE_RESP:
511 		dump_smbd_negotiate_resp(smbd_response_payload(response));
512 		info->full_packet_received = true;
513 		info->negotiate_done =
514 			process_negotiation_response(response, wc->byte_len);
515 		complete(&info->negotiate_completion);
516 		break;
517 
518 	/* SMBD data transfer packet */
519 	case SMBD_TRANSFER_DATA:
520 		data_transfer = smbd_response_payload(response);
521 		data_length = le32_to_cpu(data_transfer->data_length);
522 
523 		/*
524 		 * If this is a packet with data playload place the data in
525 		 * reassembly queue and wake up the reading thread
526 		 */
527 		if (data_length) {
528 			if (info->full_packet_received)
529 				response->first_segment = true;
530 
531 			if (le32_to_cpu(data_transfer->remaining_data_length))
532 				info->full_packet_received = false;
533 			else
534 				info->full_packet_received = true;
535 
536 			enqueue_reassembly(
537 				info,
538 				response,
539 				data_length);
540 		} else
541 			put_empty_packet(info, response);
542 
543 		if (data_length)
544 			wake_up_interruptible(&info->wait_reassembly_queue);
545 
546 		atomic_dec(&info->receive_credits);
547 		info->receive_credit_target =
548 			le16_to_cpu(data_transfer->credits_requested);
549 		atomic_add(le16_to_cpu(data_transfer->credits_granted),
550 			&info->send_credits);
551 
552 		log_incoming(INFO, "data flags %d data_offset %d "
553 			"data_length %d remaining_data_length %d\n",
554 			le16_to_cpu(data_transfer->flags),
555 			le32_to_cpu(data_transfer->data_offset),
556 			le32_to_cpu(data_transfer->data_length),
557 			le32_to_cpu(data_transfer->remaining_data_length));
558 
559 		/* Send a KEEP_ALIVE response right away if requested */
560 		info->keep_alive_requested = KEEP_ALIVE_NONE;
561 		if (le16_to_cpu(data_transfer->flags) &
562 				SMB_DIRECT_RESPONSE_REQUESTED) {
563 			info->keep_alive_requested = KEEP_ALIVE_PENDING;
564 		}
565 
566 		queue_work(info->workqueue, &info->recv_done_work);
567 		return;
568 
569 	default:
570 		log_rdma_recv(ERR,
571 			"unexpected response type=%d\n", response->type);
572 	}
573 
574 error:
575 	put_receive_buffer(info, response);
576 }
577 
smbd_create_id(struct smbd_connection * info,struct sockaddr * dstaddr,int port)578 static struct rdma_cm_id *smbd_create_id(
579 		struct smbd_connection *info,
580 		struct sockaddr *dstaddr, int port)
581 {
582 	struct rdma_cm_id *id;
583 	int rc;
584 	__be16 *sport;
585 
586 	id = rdma_create_id(&init_net, smbd_conn_upcall, info,
587 		RDMA_PS_TCP, IB_QPT_RC);
588 	if (IS_ERR(id)) {
589 		rc = PTR_ERR(id);
590 		log_rdma_event(ERR, "rdma_create_id() failed %i\n", rc);
591 		return id;
592 	}
593 
594 	if (dstaddr->sa_family == AF_INET6)
595 		sport = &((struct sockaddr_in6 *)dstaddr)->sin6_port;
596 	else
597 		sport = &((struct sockaddr_in *)dstaddr)->sin_port;
598 
599 	*sport = htons(port);
600 
601 	init_completion(&info->ri_done);
602 	info->ri_rc = -ETIMEDOUT;
603 
604 	rc = rdma_resolve_addr(id, NULL, (struct sockaddr *)dstaddr,
605 		RDMA_RESOLVE_TIMEOUT);
606 	if (rc) {
607 		log_rdma_event(ERR, "rdma_resolve_addr() failed %i\n", rc);
608 		goto out;
609 	}
610 	wait_for_completion_interruptible_timeout(
611 		&info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT));
612 	rc = info->ri_rc;
613 	if (rc) {
614 		log_rdma_event(ERR, "rdma_resolve_addr() completed %i\n", rc);
615 		goto out;
616 	}
617 
618 	info->ri_rc = -ETIMEDOUT;
619 	rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
620 	if (rc) {
621 		log_rdma_event(ERR, "rdma_resolve_route() failed %i\n", rc);
622 		goto out;
623 	}
624 	wait_for_completion_interruptible_timeout(
625 		&info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT));
626 	rc = info->ri_rc;
627 	if (rc) {
628 		log_rdma_event(ERR, "rdma_resolve_route() completed %i\n", rc);
629 		goto out;
630 	}
631 
632 	return id;
633 
634 out:
635 	rdma_destroy_id(id);
636 	return ERR_PTR(rc);
637 }
638 
639 /*
640  * Test if FRWR (Fast Registration Work Requests) is supported on the device
641  * This implementation requries FRWR on RDMA read/write
642  * return value: true if it is supported
643  */
frwr_is_supported(struct ib_device_attr * attrs)644 static bool frwr_is_supported(struct ib_device_attr *attrs)
645 {
646 	if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
647 		return false;
648 	if (attrs->max_fast_reg_page_list_len == 0)
649 		return false;
650 	return true;
651 }
652 
smbd_ia_open(struct smbd_connection * info,struct sockaddr * dstaddr,int port)653 static int smbd_ia_open(
654 		struct smbd_connection *info,
655 		struct sockaddr *dstaddr, int port)
656 {
657 	int rc;
658 
659 	info->id = smbd_create_id(info, dstaddr, port);
660 	if (IS_ERR(info->id)) {
661 		rc = PTR_ERR(info->id);
662 		goto out1;
663 	}
664 
665 	if (!frwr_is_supported(&info->id->device->attrs)) {
666 		log_rdma_event(ERR,
667 			"Fast Registration Work Requests "
668 			"(FRWR) is not supported\n");
669 		log_rdma_event(ERR,
670 			"Device capability flags = %llx "
671 			"max_fast_reg_page_list_len = %u\n",
672 			info->id->device->attrs.device_cap_flags,
673 			info->id->device->attrs.max_fast_reg_page_list_len);
674 		rc = -EPROTONOSUPPORT;
675 		goto out2;
676 	}
677 	info->max_frmr_depth = min_t(int,
678 		smbd_max_frmr_depth,
679 		info->id->device->attrs.max_fast_reg_page_list_len);
680 	info->mr_type = IB_MR_TYPE_MEM_REG;
681 	if (info->id->device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
682 		info->mr_type = IB_MR_TYPE_SG_GAPS;
683 
684 	info->pd = ib_alloc_pd(info->id->device, 0);
685 	if (IS_ERR(info->pd)) {
686 		rc = PTR_ERR(info->pd);
687 		log_rdma_event(ERR, "ib_alloc_pd() returned %d\n", rc);
688 		goto out2;
689 	}
690 
691 	return 0;
692 
693 out2:
694 	rdma_destroy_id(info->id);
695 	info->id = NULL;
696 
697 out1:
698 	return rc;
699 }
700 
701 /*
702  * Send a negotiation request message to the peer
703  * The negotiation procedure is in [MS-SMBD] 3.1.5.2 and 3.1.5.3
704  * After negotiation, the transport is connected and ready for
705  * carrying upper layer SMB payload
706  */
smbd_post_send_negotiate_req(struct smbd_connection * info)707 static int smbd_post_send_negotiate_req(struct smbd_connection *info)
708 {
709 	struct ib_send_wr send_wr;
710 	int rc = -ENOMEM;
711 	struct smbd_request *request;
712 	struct smbd_negotiate_req *packet;
713 
714 	request = mempool_alloc(info->request_mempool, GFP_KERNEL);
715 	if (!request)
716 		return rc;
717 
718 	request->info = info;
719 
720 	packet = smbd_request_payload(request);
721 	packet->min_version = cpu_to_le16(SMBD_V1);
722 	packet->max_version = cpu_to_le16(SMBD_V1);
723 	packet->reserved = 0;
724 	packet->credits_requested = cpu_to_le16(info->send_credit_target);
725 	packet->preferred_send_size = cpu_to_le32(info->max_send_size);
726 	packet->max_receive_size = cpu_to_le32(info->max_receive_size);
727 	packet->max_fragmented_size =
728 		cpu_to_le32(info->max_fragmented_recv_size);
729 
730 	request->num_sge = 1;
731 	request->sge[0].addr = ib_dma_map_single(
732 				info->id->device, (void *)packet,
733 				sizeof(*packet), DMA_TO_DEVICE);
734 	if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
735 		rc = -EIO;
736 		goto dma_mapping_failed;
737 	}
738 
739 	request->sge[0].length = sizeof(*packet);
740 	request->sge[0].lkey = info->pd->local_dma_lkey;
741 
742 	ib_dma_sync_single_for_device(
743 		info->id->device, request->sge[0].addr,
744 		request->sge[0].length, DMA_TO_DEVICE);
745 
746 	request->cqe.done = send_done;
747 
748 	send_wr.next = NULL;
749 	send_wr.wr_cqe = &request->cqe;
750 	send_wr.sg_list = request->sge;
751 	send_wr.num_sge = request->num_sge;
752 	send_wr.opcode = IB_WR_SEND;
753 	send_wr.send_flags = IB_SEND_SIGNALED;
754 
755 	log_rdma_send(INFO, "sge addr=%llx length=%x lkey=%x\n",
756 		request->sge[0].addr,
757 		request->sge[0].length, request->sge[0].lkey);
758 
759 	request->has_payload = false;
760 	atomic_inc(&info->send_pending);
761 	rc = ib_post_send(info->id->qp, &send_wr, NULL);
762 	if (!rc)
763 		return 0;
764 
765 	/* if we reach here, post send failed */
766 	log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
767 	atomic_dec(&info->send_pending);
768 	ib_dma_unmap_single(info->id->device, request->sge[0].addr,
769 		request->sge[0].length, DMA_TO_DEVICE);
770 
771 	smbd_disconnect_rdma_connection(info);
772 
773 dma_mapping_failed:
774 	mempool_free(request, info->request_mempool);
775 	return rc;
776 }
777 
778 /*
779  * Extend the credits to remote peer
780  * This implements [MS-SMBD] 3.1.5.9
781  * The idea is that we should extend credits to remote peer as quickly as
782  * it's allowed, to maintain data flow. We allocate as much receive
783  * buffer as possible, and extend the receive credits to remote peer
784  * return value: the new credtis being granted.
785  */
manage_credits_prior_sending(struct smbd_connection * info)786 static int manage_credits_prior_sending(struct smbd_connection *info)
787 {
788 	int new_credits;
789 
790 	spin_lock(&info->lock_new_credits_offered);
791 	new_credits = info->new_credits_offered;
792 	info->new_credits_offered = 0;
793 	spin_unlock(&info->lock_new_credits_offered);
794 
795 	return new_credits;
796 }
797 
798 /*
799  * Check if we need to send a KEEP_ALIVE message
800  * The idle connection timer triggers a KEEP_ALIVE message when expires
801  * SMB_DIRECT_RESPONSE_REQUESTED is set in the message flag to have peer send
802  * back a response.
803  * return value:
804  * 1 if SMB_DIRECT_RESPONSE_REQUESTED needs to be set
805  * 0: otherwise
806  */
manage_keep_alive_before_sending(struct smbd_connection * info)807 static int manage_keep_alive_before_sending(struct smbd_connection *info)
808 {
809 	if (info->keep_alive_requested == KEEP_ALIVE_PENDING) {
810 		info->keep_alive_requested = KEEP_ALIVE_SENT;
811 		return 1;
812 	}
813 	return 0;
814 }
815 
816 /*
817  * Build and prepare the SMBD packet header
818  * This function waits for avaialbe send credits and build a SMBD packet
819  * header. The caller then optional append payload to the packet after
820  * the header
821  * intput values
822  * size: the size of the payload
823  * remaining_data_length: remaining data to send if this is part of a
824  * fragmented packet
825  * output values
826  * request_out: the request allocated from this function
827  * return values: 0 on success, otherwise actual error code returned
828  */
smbd_create_header(struct smbd_connection * info,int size,int remaining_data_length,struct smbd_request ** request_out)829 static int smbd_create_header(struct smbd_connection *info,
830 		int size, int remaining_data_length,
831 		struct smbd_request **request_out)
832 {
833 	struct smbd_request *request;
834 	struct smbd_data_transfer *packet;
835 	int header_length;
836 	int rc;
837 
838 	/* Wait for send credits. A SMBD packet needs one credit */
839 	rc = wait_event_interruptible(info->wait_send_queue,
840 		atomic_read(&info->send_credits) > 0 ||
841 		info->transport_status != SMBD_CONNECTED);
842 	if (rc)
843 		return rc;
844 
845 	if (info->transport_status != SMBD_CONNECTED) {
846 		log_outgoing(ERR, "disconnected not sending\n");
847 		return -EAGAIN;
848 	}
849 	atomic_dec(&info->send_credits);
850 
851 	request = mempool_alloc(info->request_mempool, GFP_KERNEL);
852 	if (!request) {
853 		rc = -ENOMEM;
854 		goto err;
855 	}
856 
857 	request->info = info;
858 
859 	/* Fill in the packet header */
860 	packet = smbd_request_payload(request);
861 	packet->credits_requested = cpu_to_le16(info->send_credit_target);
862 	packet->credits_granted =
863 		cpu_to_le16(manage_credits_prior_sending(info));
864 	info->send_immediate = false;
865 
866 	packet->flags = 0;
867 	if (manage_keep_alive_before_sending(info))
868 		packet->flags |= cpu_to_le16(SMB_DIRECT_RESPONSE_REQUESTED);
869 
870 	packet->reserved = 0;
871 	if (!size)
872 		packet->data_offset = 0;
873 	else
874 		packet->data_offset = cpu_to_le32(24);
875 	packet->data_length = cpu_to_le32(size);
876 	packet->remaining_data_length = cpu_to_le32(remaining_data_length);
877 	packet->padding = 0;
878 
879 	log_outgoing(INFO, "credits_requested=%d credits_granted=%d "
880 		"data_offset=%d data_length=%d remaining_data_length=%d\n",
881 		le16_to_cpu(packet->credits_requested),
882 		le16_to_cpu(packet->credits_granted),
883 		le32_to_cpu(packet->data_offset),
884 		le32_to_cpu(packet->data_length),
885 		le32_to_cpu(packet->remaining_data_length));
886 
887 	/* Map the packet to DMA */
888 	header_length = sizeof(struct smbd_data_transfer);
889 	/* If this is a packet without payload, don't send padding */
890 	if (!size)
891 		header_length = offsetof(struct smbd_data_transfer, padding);
892 
893 	request->num_sge = 1;
894 	request->sge[0].addr = ib_dma_map_single(info->id->device,
895 						 (void *)packet,
896 						 header_length,
897 						 DMA_TO_DEVICE);
898 	if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
899 		mempool_free(request, info->request_mempool);
900 		rc = -EIO;
901 		goto err;
902 	}
903 
904 	request->sge[0].length = header_length;
905 	request->sge[0].lkey = info->pd->local_dma_lkey;
906 
907 	*request_out = request;
908 	return 0;
909 
910 err:
911 	atomic_inc(&info->send_credits);
912 	return rc;
913 }
914 
smbd_destroy_header(struct smbd_connection * info,struct smbd_request * request)915 static void smbd_destroy_header(struct smbd_connection *info,
916 		struct smbd_request *request)
917 {
918 
919 	ib_dma_unmap_single(info->id->device,
920 			    request->sge[0].addr,
921 			    request->sge[0].length,
922 			    DMA_TO_DEVICE);
923 	mempool_free(request, info->request_mempool);
924 	atomic_inc(&info->send_credits);
925 }
926 
927 /* Post the send request */
smbd_post_send(struct smbd_connection * info,struct smbd_request * request,bool has_payload)928 static int smbd_post_send(struct smbd_connection *info,
929 		struct smbd_request *request, bool has_payload)
930 {
931 	struct ib_send_wr send_wr;
932 	int rc, i;
933 
934 	for (i = 0; i < request->num_sge; i++) {
935 		log_rdma_send(INFO,
936 			"rdma_request sge[%d] addr=%llu length=%u\n",
937 			i, request->sge[i].addr, request->sge[i].length);
938 		ib_dma_sync_single_for_device(
939 			info->id->device,
940 			request->sge[i].addr,
941 			request->sge[i].length,
942 			DMA_TO_DEVICE);
943 	}
944 
945 	request->cqe.done = send_done;
946 
947 	send_wr.next = NULL;
948 	send_wr.wr_cqe = &request->cqe;
949 	send_wr.sg_list = request->sge;
950 	send_wr.num_sge = request->num_sge;
951 	send_wr.opcode = IB_WR_SEND;
952 	send_wr.send_flags = IB_SEND_SIGNALED;
953 
954 	if (has_payload) {
955 		request->has_payload = true;
956 		atomic_inc(&info->send_payload_pending);
957 	} else {
958 		request->has_payload = false;
959 		atomic_inc(&info->send_pending);
960 	}
961 
962 	rc = ib_post_send(info->id->qp, &send_wr, NULL);
963 	if (rc) {
964 		log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
965 		if (has_payload) {
966 			if (atomic_dec_and_test(&info->send_payload_pending))
967 				wake_up(&info->wait_send_payload_pending);
968 		} else {
969 			if (atomic_dec_and_test(&info->send_pending))
970 				wake_up(&info->wait_send_pending);
971 		}
972 		smbd_disconnect_rdma_connection(info);
973 		rc = -EAGAIN;
974 	} else
975 		/* Reset timer for idle connection after packet is sent */
976 		mod_delayed_work(info->workqueue, &info->idle_timer_work,
977 			info->keep_alive_interval*HZ);
978 
979 	return rc;
980 }
981 
smbd_post_send_sgl(struct smbd_connection * info,struct scatterlist * sgl,int data_length,int remaining_data_length)982 static int smbd_post_send_sgl(struct smbd_connection *info,
983 	struct scatterlist *sgl, int data_length, int remaining_data_length)
984 {
985 	int num_sgs;
986 	int i, rc;
987 	struct smbd_request *request;
988 	struct scatterlist *sg;
989 
990 	rc = smbd_create_header(
991 		info, data_length, remaining_data_length, &request);
992 	if (rc)
993 		return rc;
994 
995 	num_sgs = sgl ? sg_nents(sgl) : 0;
996 	for_each_sg(sgl, sg, num_sgs, i) {
997 		request->sge[i+1].addr =
998 			ib_dma_map_page(info->id->device, sg_page(sg),
999 			       sg->offset, sg->length, DMA_TO_DEVICE);
1000 		if (ib_dma_mapping_error(
1001 				info->id->device, request->sge[i+1].addr)) {
1002 			rc = -EIO;
1003 			request->sge[i+1].addr = 0;
1004 			goto dma_mapping_failure;
1005 		}
1006 		request->sge[i+1].length = sg->length;
1007 		request->sge[i+1].lkey = info->pd->local_dma_lkey;
1008 		request->num_sge++;
1009 	}
1010 
1011 	rc = smbd_post_send(info, request, data_length);
1012 	if (!rc)
1013 		return 0;
1014 
1015 dma_mapping_failure:
1016 	for (i = 1; i < request->num_sge; i++)
1017 		if (request->sge[i].addr)
1018 			ib_dma_unmap_single(info->id->device,
1019 					    request->sge[i].addr,
1020 					    request->sge[i].length,
1021 					    DMA_TO_DEVICE);
1022 	smbd_destroy_header(info, request);
1023 	return rc;
1024 }
1025 
1026 /*
1027  * Send a page
1028  * page: the page to send
1029  * offset: offset in the page to send
1030  * size: length in the page to send
1031  * remaining_data_length: remaining data to send in this payload
1032  */
smbd_post_send_page(struct smbd_connection * info,struct page * page,unsigned long offset,size_t size,int remaining_data_length)1033 static int smbd_post_send_page(struct smbd_connection *info, struct page *page,
1034 		unsigned long offset, size_t size, int remaining_data_length)
1035 {
1036 	struct scatterlist sgl;
1037 
1038 	sg_init_table(&sgl, 1);
1039 	sg_set_page(&sgl, page, size, offset);
1040 
1041 	return smbd_post_send_sgl(info, &sgl, size, remaining_data_length);
1042 }
1043 
1044 /*
1045  * Send an empty message
1046  * Empty message is used to extend credits to peer to for keep live
1047  * while there is no upper layer payload to send at the time
1048  */
smbd_post_send_empty(struct smbd_connection * info)1049 static int smbd_post_send_empty(struct smbd_connection *info)
1050 {
1051 	info->count_send_empty++;
1052 	return smbd_post_send_sgl(info, NULL, 0, 0);
1053 }
1054 
1055 /*
1056  * Send a data buffer
1057  * iov: the iov array describing the data buffers
1058  * n_vec: number of iov array
1059  * remaining_data_length: remaining data to send following this packet
1060  * in segmented SMBD packet
1061  */
smbd_post_send_data(struct smbd_connection * info,struct kvec * iov,int n_vec,int remaining_data_length)1062 static int smbd_post_send_data(
1063 	struct smbd_connection *info, struct kvec *iov, int n_vec,
1064 	int remaining_data_length)
1065 {
1066 	int i;
1067 	u32 data_length = 0;
1068 	struct scatterlist sgl[SMBDIRECT_MAX_SGE];
1069 
1070 	if (n_vec > SMBDIRECT_MAX_SGE) {
1071 		cifs_dbg(VFS, "Can't fit data to SGL, n_vec=%d\n", n_vec);
1072 		return -EINVAL;
1073 	}
1074 
1075 	sg_init_table(sgl, n_vec);
1076 	for (i = 0; i < n_vec; i++) {
1077 		data_length += iov[i].iov_len;
1078 		sg_set_buf(&sgl[i], iov[i].iov_base, iov[i].iov_len);
1079 	}
1080 
1081 	return smbd_post_send_sgl(info, sgl, data_length, remaining_data_length);
1082 }
1083 
1084 /*
1085  * Post a receive request to the transport
1086  * The remote peer can only send data when a receive request is posted
1087  * The interaction is controlled by send/receive credit system
1088  */
smbd_post_recv(struct smbd_connection * info,struct smbd_response * response)1089 static int smbd_post_recv(
1090 		struct smbd_connection *info, struct smbd_response *response)
1091 {
1092 	struct ib_recv_wr recv_wr;
1093 	int rc = -EIO;
1094 
1095 	response->sge.addr = ib_dma_map_single(
1096 				info->id->device, response->packet,
1097 				info->max_receive_size, DMA_FROM_DEVICE);
1098 	if (ib_dma_mapping_error(info->id->device, response->sge.addr))
1099 		return rc;
1100 
1101 	response->sge.length = info->max_receive_size;
1102 	response->sge.lkey = info->pd->local_dma_lkey;
1103 
1104 	response->cqe.done = recv_done;
1105 
1106 	recv_wr.wr_cqe = &response->cqe;
1107 	recv_wr.next = NULL;
1108 	recv_wr.sg_list = &response->sge;
1109 	recv_wr.num_sge = 1;
1110 
1111 	rc = ib_post_recv(info->id->qp, &recv_wr, NULL);
1112 	if (rc) {
1113 		ib_dma_unmap_single(info->id->device, response->sge.addr,
1114 				    response->sge.length, DMA_FROM_DEVICE);
1115 		smbd_disconnect_rdma_connection(info);
1116 		log_rdma_recv(ERR, "ib_post_recv failed rc=%d\n", rc);
1117 	}
1118 
1119 	return rc;
1120 }
1121 
1122 /* Perform SMBD negotiate according to [MS-SMBD] 3.1.5.2 */
smbd_negotiate(struct smbd_connection * info)1123 static int smbd_negotiate(struct smbd_connection *info)
1124 {
1125 	int rc;
1126 	struct smbd_response *response = get_receive_buffer(info);
1127 
1128 	response->type = SMBD_NEGOTIATE_RESP;
1129 	rc = smbd_post_recv(info, response);
1130 	log_rdma_event(INFO,
1131 		"smbd_post_recv rc=%d iov.addr=%llx iov.length=%x "
1132 		"iov.lkey=%x\n",
1133 		rc, response->sge.addr,
1134 		response->sge.length, response->sge.lkey);
1135 	if (rc)
1136 		return rc;
1137 
1138 	init_completion(&info->negotiate_completion);
1139 	info->negotiate_done = false;
1140 	rc = smbd_post_send_negotiate_req(info);
1141 	if (rc)
1142 		return rc;
1143 
1144 	rc = wait_for_completion_interruptible_timeout(
1145 		&info->negotiate_completion, SMBD_NEGOTIATE_TIMEOUT * HZ);
1146 	log_rdma_event(INFO, "wait_for_completion_timeout rc=%d\n", rc);
1147 
1148 	if (info->negotiate_done)
1149 		return 0;
1150 
1151 	if (rc == 0)
1152 		rc = -ETIMEDOUT;
1153 	else if (rc == -ERESTARTSYS)
1154 		rc = -EINTR;
1155 	else
1156 		rc = -ENOTCONN;
1157 
1158 	return rc;
1159 }
1160 
put_empty_packet(struct smbd_connection * info,struct smbd_response * response)1161 static void put_empty_packet(
1162 		struct smbd_connection *info, struct smbd_response *response)
1163 {
1164 	spin_lock(&info->empty_packet_queue_lock);
1165 	list_add_tail(&response->list, &info->empty_packet_queue);
1166 	info->count_empty_packet_queue++;
1167 	spin_unlock(&info->empty_packet_queue_lock);
1168 
1169 	queue_work(info->workqueue, &info->post_send_credits_work);
1170 }
1171 
1172 /*
1173  * Implement Connection.FragmentReassemblyBuffer defined in [MS-SMBD] 3.1.1.1
1174  * This is a queue for reassembling upper layer payload and present to upper
1175  * layer. All the inncoming payload go to the reassembly queue, regardless of
1176  * if reassembly is required. The uuper layer code reads from the queue for all
1177  * incoming payloads.
1178  * Put a received packet to the reassembly queue
1179  * response: the packet received
1180  * data_length: the size of payload in this packet
1181  */
enqueue_reassembly(struct smbd_connection * info,struct smbd_response * response,int data_length)1182 static void enqueue_reassembly(
1183 	struct smbd_connection *info,
1184 	struct smbd_response *response,
1185 	int data_length)
1186 {
1187 	spin_lock(&info->reassembly_queue_lock);
1188 	list_add_tail(&response->list, &info->reassembly_queue);
1189 	info->reassembly_queue_length++;
1190 	/*
1191 	 * Make sure reassembly_data_length is updated after list and
1192 	 * reassembly_queue_length are updated. On the dequeue side
1193 	 * reassembly_data_length is checked without a lock to determine
1194 	 * if reassembly_queue_length and list is up to date
1195 	 */
1196 	virt_wmb();
1197 	info->reassembly_data_length += data_length;
1198 	spin_unlock(&info->reassembly_queue_lock);
1199 	info->count_reassembly_queue++;
1200 	info->count_enqueue_reassembly_queue++;
1201 }
1202 
1203 /*
1204  * Get the first entry at the front of reassembly queue
1205  * Caller is responsible for locking
1206  * return value: the first entry if any, NULL if queue is empty
1207  */
_get_first_reassembly(struct smbd_connection * info)1208 static struct smbd_response *_get_first_reassembly(struct smbd_connection *info)
1209 {
1210 	struct smbd_response *ret = NULL;
1211 
1212 	if (!list_empty(&info->reassembly_queue)) {
1213 		ret = list_first_entry(
1214 			&info->reassembly_queue,
1215 			struct smbd_response, list);
1216 	}
1217 	return ret;
1218 }
1219 
get_empty_queue_buffer(struct smbd_connection * info)1220 static struct smbd_response *get_empty_queue_buffer(
1221 		struct smbd_connection *info)
1222 {
1223 	struct smbd_response *ret = NULL;
1224 	unsigned long flags;
1225 
1226 	spin_lock_irqsave(&info->empty_packet_queue_lock, flags);
1227 	if (!list_empty(&info->empty_packet_queue)) {
1228 		ret = list_first_entry(
1229 			&info->empty_packet_queue,
1230 			struct smbd_response, list);
1231 		list_del(&ret->list);
1232 		info->count_empty_packet_queue--;
1233 	}
1234 	spin_unlock_irqrestore(&info->empty_packet_queue_lock, flags);
1235 
1236 	return ret;
1237 }
1238 
1239 /*
1240  * Get a receive buffer
1241  * For each remote send, we need to post a receive. The receive buffers are
1242  * pre-allocated in advance.
1243  * return value: the receive buffer, NULL if none is available
1244  */
get_receive_buffer(struct smbd_connection * info)1245 static struct smbd_response *get_receive_buffer(struct smbd_connection *info)
1246 {
1247 	struct smbd_response *ret = NULL;
1248 	unsigned long flags;
1249 
1250 	spin_lock_irqsave(&info->receive_queue_lock, flags);
1251 	if (!list_empty(&info->receive_queue)) {
1252 		ret = list_first_entry(
1253 			&info->receive_queue,
1254 			struct smbd_response, list);
1255 		list_del(&ret->list);
1256 		info->count_receive_queue--;
1257 		info->count_get_receive_buffer++;
1258 	}
1259 	spin_unlock_irqrestore(&info->receive_queue_lock, flags);
1260 
1261 	return ret;
1262 }
1263 
1264 /*
1265  * Return a receive buffer
1266  * Upon returning of a receive buffer, we can post new receive and extend
1267  * more receive credits to remote peer. This is done immediately after a
1268  * receive buffer is returned.
1269  */
put_receive_buffer(struct smbd_connection * info,struct smbd_response * response)1270 static void put_receive_buffer(
1271 	struct smbd_connection *info, struct smbd_response *response)
1272 {
1273 	unsigned long flags;
1274 
1275 	ib_dma_unmap_single(info->id->device, response->sge.addr,
1276 		response->sge.length, DMA_FROM_DEVICE);
1277 
1278 	spin_lock_irqsave(&info->receive_queue_lock, flags);
1279 	list_add_tail(&response->list, &info->receive_queue);
1280 	info->count_receive_queue++;
1281 	info->count_put_receive_buffer++;
1282 	spin_unlock_irqrestore(&info->receive_queue_lock, flags);
1283 
1284 	queue_work(info->workqueue, &info->post_send_credits_work);
1285 }
1286 
1287 /* Preallocate all receive buffer on transport establishment */
allocate_receive_buffers(struct smbd_connection * info,int num_buf)1288 static int allocate_receive_buffers(struct smbd_connection *info, int num_buf)
1289 {
1290 	int i;
1291 	struct smbd_response *response;
1292 
1293 	INIT_LIST_HEAD(&info->reassembly_queue);
1294 	spin_lock_init(&info->reassembly_queue_lock);
1295 	info->reassembly_data_length = 0;
1296 	info->reassembly_queue_length = 0;
1297 
1298 	INIT_LIST_HEAD(&info->receive_queue);
1299 	spin_lock_init(&info->receive_queue_lock);
1300 	info->count_receive_queue = 0;
1301 
1302 	INIT_LIST_HEAD(&info->empty_packet_queue);
1303 	spin_lock_init(&info->empty_packet_queue_lock);
1304 	info->count_empty_packet_queue = 0;
1305 
1306 	init_waitqueue_head(&info->wait_receive_queues);
1307 
1308 	for (i = 0; i < num_buf; i++) {
1309 		response = mempool_alloc(info->response_mempool, GFP_KERNEL);
1310 		if (!response)
1311 			goto allocate_failed;
1312 
1313 		response->info = info;
1314 		list_add_tail(&response->list, &info->receive_queue);
1315 		info->count_receive_queue++;
1316 	}
1317 
1318 	return 0;
1319 
1320 allocate_failed:
1321 	while (!list_empty(&info->receive_queue)) {
1322 		response = list_first_entry(
1323 				&info->receive_queue,
1324 				struct smbd_response, list);
1325 		list_del(&response->list);
1326 		info->count_receive_queue--;
1327 
1328 		mempool_free(response, info->response_mempool);
1329 	}
1330 	return -ENOMEM;
1331 }
1332 
destroy_receive_buffers(struct smbd_connection * info)1333 static void destroy_receive_buffers(struct smbd_connection *info)
1334 {
1335 	struct smbd_response *response;
1336 
1337 	while ((response = get_receive_buffer(info)))
1338 		mempool_free(response, info->response_mempool);
1339 
1340 	while ((response = get_empty_queue_buffer(info)))
1341 		mempool_free(response, info->response_mempool);
1342 }
1343 
1344 /*
1345  * Check and send an immediate or keep alive packet
1346  * The condition to send those packets are defined in [MS-SMBD] 3.1.1.1
1347  * Connection.KeepaliveRequested and Connection.SendImmediate
1348  * The idea is to extend credits to server as soon as it becomes available
1349  */
send_immediate_work(struct work_struct * work)1350 static void send_immediate_work(struct work_struct *work)
1351 {
1352 	struct smbd_connection *info = container_of(
1353 					work, struct smbd_connection,
1354 					send_immediate_work.work);
1355 
1356 	if (info->keep_alive_requested == KEEP_ALIVE_PENDING ||
1357 	    info->send_immediate) {
1358 		log_keep_alive(INFO, "send an empty message\n");
1359 		smbd_post_send_empty(info);
1360 	}
1361 }
1362 
1363 /* Implement idle connection timer [MS-SMBD] 3.1.6.2 */
idle_connection_timer(struct work_struct * work)1364 static void idle_connection_timer(struct work_struct *work)
1365 {
1366 	struct smbd_connection *info = container_of(
1367 					work, struct smbd_connection,
1368 					idle_timer_work.work);
1369 
1370 	if (info->keep_alive_requested != KEEP_ALIVE_NONE) {
1371 		log_keep_alive(ERR,
1372 			"error status info->keep_alive_requested=%d\n",
1373 			info->keep_alive_requested);
1374 		smbd_disconnect_rdma_connection(info);
1375 		return;
1376 	}
1377 
1378 	log_keep_alive(INFO, "about to send an empty idle message\n");
1379 	smbd_post_send_empty(info);
1380 
1381 	/* Setup the next idle timeout work */
1382 	queue_delayed_work(info->workqueue, &info->idle_timer_work,
1383 			info->keep_alive_interval*HZ);
1384 }
1385 
1386 /*
1387  * Destroy the transport and related RDMA and memory resources
1388  * Need to go through all the pending counters and make sure on one is using
1389  * the transport while it is destroyed
1390  */
smbd_destroy(struct TCP_Server_Info * server)1391 void smbd_destroy(struct TCP_Server_Info *server)
1392 {
1393 	struct smbd_connection *info = server->smbd_conn;
1394 	struct smbd_response *response;
1395 	unsigned long flags;
1396 
1397 	if (!info) {
1398 		log_rdma_event(INFO, "rdma session already destroyed\n");
1399 		return;
1400 	}
1401 
1402 	log_rdma_event(INFO, "destroying rdma session\n");
1403 	if (info->transport_status != SMBD_DISCONNECTED) {
1404 		rdma_disconnect(server->smbd_conn->id);
1405 		log_rdma_event(INFO, "wait for transport being disconnected\n");
1406 		wait_event_interruptible(
1407 			info->disconn_wait,
1408 			info->transport_status == SMBD_DISCONNECTED);
1409 	}
1410 
1411 	log_rdma_event(INFO, "destroying qp\n");
1412 	ib_drain_qp(info->id->qp);
1413 	rdma_destroy_qp(info->id);
1414 
1415 	log_rdma_event(INFO, "cancelling idle timer\n");
1416 	cancel_delayed_work_sync(&info->idle_timer_work);
1417 	log_rdma_event(INFO, "cancelling send immediate work\n");
1418 	cancel_delayed_work_sync(&info->send_immediate_work);
1419 
1420 	log_rdma_event(INFO, "wait for all send posted to IB to finish\n");
1421 	wait_event(info->wait_send_pending,
1422 		atomic_read(&info->send_pending) == 0);
1423 	wait_event(info->wait_send_payload_pending,
1424 		atomic_read(&info->send_payload_pending) == 0);
1425 
1426 	/* It's not posssible for upper layer to get to reassembly */
1427 	log_rdma_event(INFO, "drain the reassembly queue\n");
1428 	do {
1429 		spin_lock_irqsave(&info->reassembly_queue_lock, flags);
1430 		response = _get_first_reassembly(info);
1431 		if (response) {
1432 			list_del(&response->list);
1433 			spin_unlock_irqrestore(
1434 				&info->reassembly_queue_lock, flags);
1435 			put_receive_buffer(info, response);
1436 		} else
1437 			spin_unlock_irqrestore(
1438 				&info->reassembly_queue_lock, flags);
1439 	} while (response);
1440 	info->reassembly_data_length = 0;
1441 
1442 	log_rdma_event(INFO, "free receive buffers\n");
1443 	wait_event(info->wait_receive_queues,
1444 		info->count_receive_queue + info->count_empty_packet_queue
1445 			== info->receive_credit_max);
1446 	destroy_receive_buffers(info);
1447 
1448 	/*
1449 	 * For performance reasons, memory registration and deregistration
1450 	 * are not locked by srv_mutex. It is possible some processes are
1451 	 * blocked on transport srv_mutex while holding memory registration.
1452 	 * Release the transport srv_mutex to allow them to hit the failure
1453 	 * path when sending data, and then release memory registartions.
1454 	 */
1455 	log_rdma_event(INFO, "freeing mr list\n");
1456 	wake_up_interruptible_all(&info->wait_mr);
1457 	while (atomic_read(&info->mr_used_count)) {
1458 		mutex_unlock(&server->srv_mutex);
1459 		msleep(1000);
1460 		mutex_lock(&server->srv_mutex);
1461 	}
1462 	destroy_mr_list(info);
1463 
1464 	ib_free_cq(info->send_cq);
1465 	ib_free_cq(info->recv_cq);
1466 	ib_dealloc_pd(info->pd);
1467 	rdma_destroy_id(info->id);
1468 
1469 	/* free mempools */
1470 	mempool_destroy(info->request_mempool);
1471 	kmem_cache_destroy(info->request_cache);
1472 
1473 	mempool_destroy(info->response_mempool);
1474 	kmem_cache_destroy(info->response_cache);
1475 
1476 	info->transport_status = SMBD_DESTROYED;
1477 
1478 	destroy_workqueue(info->workqueue);
1479 	log_rdma_event(INFO,  "rdma session destroyed\n");
1480 	kfree(info);
1481 }
1482 
1483 /*
1484  * Reconnect this SMBD connection, called from upper layer
1485  * return value: 0 on success, or actual error code
1486  */
smbd_reconnect(struct TCP_Server_Info * server)1487 int smbd_reconnect(struct TCP_Server_Info *server)
1488 {
1489 	log_rdma_event(INFO, "reconnecting rdma session\n");
1490 
1491 	if (!server->smbd_conn) {
1492 		log_rdma_event(INFO, "rdma session already destroyed\n");
1493 		goto create_conn;
1494 	}
1495 
1496 	/*
1497 	 * This is possible if transport is disconnected and we haven't received
1498 	 * notification from RDMA, but upper layer has detected timeout
1499 	 */
1500 	if (server->smbd_conn->transport_status == SMBD_CONNECTED) {
1501 		log_rdma_event(INFO, "disconnecting transport\n");
1502 		smbd_destroy(server);
1503 	}
1504 
1505 create_conn:
1506 	log_rdma_event(INFO, "creating rdma session\n");
1507 	server->smbd_conn = smbd_get_connection(
1508 		server, (struct sockaddr *) &server->dstaddr);
1509 
1510 	if (server->smbd_conn)
1511 		cifs_dbg(VFS, "RDMA transport re-established\n");
1512 
1513 	return server->smbd_conn ? 0 : -ENOENT;
1514 }
1515 
destroy_caches_and_workqueue(struct smbd_connection * info)1516 static void destroy_caches_and_workqueue(struct smbd_connection *info)
1517 {
1518 	destroy_receive_buffers(info);
1519 	destroy_workqueue(info->workqueue);
1520 	mempool_destroy(info->response_mempool);
1521 	kmem_cache_destroy(info->response_cache);
1522 	mempool_destroy(info->request_mempool);
1523 	kmem_cache_destroy(info->request_cache);
1524 }
1525 
1526 #define MAX_NAME_LEN	80
allocate_caches_and_workqueue(struct smbd_connection * info)1527 static int allocate_caches_and_workqueue(struct smbd_connection *info)
1528 {
1529 	char name[MAX_NAME_LEN];
1530 	int rc;
1531 
1532 	scnprintf(name, MAX_NAME_LEN, "smbd_request_%p", info);
1533 	info->request_cache =
1534 		kmem_cache_create(
1535 			name,
1536 			sizeof(struct smbd_request) +
1537 				sizeof(struct smbd_data_transfer),
1538 			0, SLAB_HWCACHE_ALIGN, NULL);
1539 	if (!info->request_cache)
1540 		return -ENOMEM;
1541 
1542 	info->request_mempool =
1543 		mempool_create(info->send_credit_target, mempool_alloc_slab,
1544 			mempool_free_slab, info->request_cache);
1545 	if (!info->request_mempool)
1546 		goto out1;
1547 
1548 	scnprintf(name, MAX_NAME_LEN, "smbd_response_%p", info);
1549 	info->response_cache =
1550 		kmem_cache_create(
1551 			name,
1552 			sizeof(struct smbd_response) +
1553 				info->max_receive_size,
1554 			0, SLAB_HWCACHE_ALIGN, NULL);
1555 	if (!info->response_cache)
1556 		goto out2;
1557 
1558 	info->response_mempool =
1559 		mempool_create(info->receive_credit_max, mempool_alloc_slab,
1560 		       mempool_free_slab, info->response_cache);
1561 	if (!info->response_mempool)
1562 		goto out3;
1563 
1564 	scnprintf(name, MAX_NAME_LEN, "smbd_%p", info);
1565 	info->workqueue = create_workqueue(name);
1566 	if (!info->workqueue)
1567 		goto out4;
1568 
1569 	rc = allocate_receive_buffers(info, info->receive_credit_max);
1570 	if (rc) {
1571 		log_rdma_event(ERR, "failed to allocate receive buffers\n");
1572 		goto out5;
1573 	}
1574 
1575 	return 0;
1576 
1577 out5:
1578 	destroy_workqueue(info->workqueue);
1579 out4:
1580 	mempool_destroy(info->response_mempool);
1581 out3:
1582 	kmem_cache_destroy(info->response_cache);
1583 out2:
1584 	mempool_destroy(info->request_mempool);
1585 out1:
1586 	kmem_cache_destroy(info->request_cache);
1587 	return -ENOMEM;
1588 }
1589 
1590 /* Create a SMBD connection, called by upper layer */
_smbd_get_connection(struct TCP_Server_Info * server,struct sockaddr * dstaddr,int port)1591 static struct smbd_connection *_smbd_get_connection(
1592 	struct TCP_Server_Info *server, struct sockaddr *dstaddr, int port)
1593 {
1594 	int rc;
1595 	struct smbd_connection *info;
1596 	struct rdma_conn_param conn_param;
1597 	struct ib_qp_init_attr qp_attr;
1598 	struct sockaddr_in *addr_in = (struct sockaddr_in *) dstaddr;
1599 	struct ib_port_immutable port_immutable;
1600 	u32 ird_ord_hdr[2];
1601 
1602 	info = kzalloc(sizeof(struct smbd_connection), GFP_KERNEL);
1603 	if (!info)
1604 		return NULL;
1605 
1606 	info->transport_status = SMBD_CONNECTING;
1607 	rc = smbd_ia_open(info, dstaddr, port);
1608 	if (rc) {
1609 		log_rdma_event(INFO, "smbd_ia_open rc=%d\n", rc);
1610 		goto create_id_failed;
1611 	}
1612 
1613 	if (smbd_send_credit_target > info->id->device->attrs.max_cqe ||
1614 	    smbd_send_credit_target > info->id->device->attrs.max_qp_wr) {
1615 		log_rdma_event(ERR,
1616 			"consider lowering send_credit_target = %d. "
1617 			"Possible CQE overrun, device "
1618 			"reporting max_cpe %d max_qp_wr %d\n",
1619 			smbd_send_credit_target,
1620 			info->id->device->attrs.max_cqe,
1621 			info->id->device->attrs.max_qp_wr);
1622 		goto config_failed;
1623 	}
1624 
1625 	if (smbd_receive_credit_max > info->id->device->attrs.max_cqe ||
1626 	    smbd_receive_credit_max > info->id->device->attrs.max_qp_wr) {
1627 		log_rdma_event(ERR,
1628 			"consider lowering receive_credit_max = %d. "
1629 			"Possible CQE overrun, device "
1630 			"reporting max_cpe %d max_qp_wr %d\n",
1631 			smbd_receive_credit_max,
1632 			info->id->device->attrs.max_cqe,
1633 			info->id->device->attrs.max_qp_wr);
1634 		goto config_failed;
1635 	}
1636 
1637 	info->receive_credit_max = smbd_receive_credit_max;
1638 	info->send_credit_target = smbd_send_credit_target;
1639 	info->max_send_size = smbd_max_send_size;
1640 	info->max_fragmented_recv_size = smbd_max_fragmented_recv_size;
1641 	info->max_receive_size = smbd_max_receive_size;
1642 	info->keep_alive_interval = smbd_keep_alive_interval;
1643 
1644 	if (info->id->device->attrs.max_send_sge < SMBDIRECT_MAX_SGE) {
1645 		log_rdma_event(ERR,
1646 			"warning: device max_send_sge = %d too small\n",
1647 			info->id->device->attrs.max_send_sge);
1648 		log_rdma_event(ERR, "Queue Pair creation may fail\n");
1649 	}
1650 	if (info->id->device->attrs.max_recv_sge < SMBDIRECT_MAX_SGE) {
1651 		log_rdma_event(ERR,
1652 			"warning: device max_recv_sge = %d too small\n",
1653 			info->id->device->attrs.max_recv_sge);
1654 		log_rdma_event(ERR, "Queue Pair creation may fail\n");
1655 	}
1656 
1657 	info->send_cq = NULL;
1658 	info->recv_cq = NULL;
1659 	info->send_cq =
1660 		ib_alloc_cq_any(info->id->device, info,
1661 				info->send_credit_target, IB_POLL_SOFTIRQ);
1662 	if (IS_ERR(info->send_cq)) {
1663 		info->send_cq = NULL;
1664 		goto alloc_cq_failed;
1665 	}
1666 
1667 	info->recv_cq =
1668 		ib_alloc_cq_any(info->id->device, info,
1669 				info->receive_credit_max, IB_POLL_SOFTIRQ);
1670 	if (IS_ERR(info->recv_cq)) {
1671 		info->recv_cq = NULL;
1672 		goto alloc_cq_failed;
1673 	}
1674 
1675 	memset(&qp_attr, 0, sizeof(qp_attr));
1676 	qp_attr.event_handler = smbd_qp_async_error_upcall;
1677 	qp_attr.qp_context = info;
1678 	qp_attr.cap.max_send_wr = info->send_credit_target;
1679 	qp_attr.cap.max_recv_wr = info->receive_credit_max;
1680 	qp_attr.cap.max_send_sge = SMBDIRECT_MAX_SGE;
1681 	qp_attr.cap.max_recv_sge = SMBDIRECT_MAX_SGE;
1682 	qp_attr.cap.max_inline_data = 0;
1683 	qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
1684 	qp_attr.qp_type = IB_QPT_RC;
1685 	qp_attr.send_cq = info->send_cq;
1686 	qp_attr.recv_cq = info->recv_cq;
1687 	qp_attr.port_num = ~0;
1688 
1689 	rc = rdma_create_qp(info->id, info->pd, &qp_attr);
1690 	if (rc) {
1691 		log_rdma_event(ERR, "rdma_create_qp failed %i\n", rc);
1692 		goto create_qp_failed;
1693 	}
1694 
1695 	memset(&conn_param, 0, sizeof(conn_param));
1696 	conn_param.initiator_depth = 0;
1697 
1698 	conn_param.responder_resources =
1699 		info->id->device->attrs.max_qp_rd_atom
1700 			< SMBD_CM_RESPONDER_RESOURCES ?
1701 		info->id->device->attrs.max_qp_rd_atom :
1702 		SMBD_CM_RESPONDER_RESOURCES;
1703 	info->responder_resources = conn_param.responder_resources;
1704 	log_rdma_mr(INFO, "responder_resources=%d\n",
1705 		info->responder_resources);
1706 
1707 	/* Need to send IRD/ORD in private data for iWARP */
1708 	info->id->device->ops.get_port_immutable(
1709 		info->id->device, info->id->port_num, &port_immutable);
1710 	if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) {
1711 		ird_ord_hdr[0] = info->responder_resources;
1712 		ird_ord_hdr[1] = 1;
1713 		conn_param.private_data = ird_ord_hdr;
1714 		conn_param.private_data_len = sizeof(ird_ord_hdr);
1715 	} else {
1716 		conn_param.private_data = NULL;
1717 		conn_param.private_data_len = 0;
1718 	}
1719 
1720 	conn_param.retry_count = SMBD_CM_RETRY;
1721 	conn_param.rnr_retry_count = SMBD_CM_RNR_RETRY;
1722 	conn_param.flow_control = 0;
1723 
1724 	log_rdma_event(INFO, "connecting to IP %pI4 port %d\n",
1725 		&addr_in->sin_addr, port);
1726 
1727 	init_waitqueue_head(&info->conn_wait);
1728 	init_waitqueue_head(&info->disconn_wait);
1729 	init_waitqueue_head(&info->wait_reassembly_queue);
1730 	rc = rdma_connect(info->id, &conn_param);
1731 	if (rc) {
1732 		log_rdma_event(ERR, "rdma_connect() failed with %i\n", rc);
1733 		goto rdma_connect_failed;
1734 	}
1735 
1736 	wait_event_interruptible(
1737 		info->conn_wait, info->transport_status != SMBD_CONNECTING);
1738 
1739 	if (info->transport_status != SMBD_CONNECTED) {
1740 		log_rdma_event(ERR, "rdma_connect failed port=%d\n", port);
1741 		goto rdma_connect_failed;
1742 	}
1743 
1744 	log_rdma_event(INFO, "rdma_connect connected\n");
1745 
1746 	rc = allocate_caches_and_workqueue(info);
1747 	if (rc) {
1748 		log_rdma_event(ERR, "cache allocation failed\n");
1749 		goto allocate_cache_failed;
1750 	}
1751 
1752 	init_waitqueue_head(&info->wait_send_queue);
1753 	INIT_DELAYED_WORK(&info->idle_timer_work, idle_connection_timer);
1754 	INIT_DELAYED_WORK(&info->send_immediate_work, send_immediate_work);
1755 	queue_delayed_work(info->workqueue, &info->idle_timer_work,
1756 		info->keep_alive_interval*HZ);
1757 
1758 	init_waitqueue_head(&info->wait_send_pending);
1759 	atomic_set(&info->send_pending, 0);
1760 
1761 	init_waitqueue_head(&info->wait_send_payload_pending);
1762 	atomic_set(&info->send_payload_pending, 0);
1763 
1764 	INIT_WORK(&info->disconnect_work, smbd_disconnect_rdma_work);
1765 	INIT_WORK(&info->recv_done_work, smbd_recv_done_work);
1766 	INIT_WORK(&info->post_send_credits_work, smbd_post_send_credits);
1767 	info->new_credits_offered = 0;
1768 	spin_lock_init(&info->lock_new_credits_offered);
1769 
1770 	rc = smbd_negotiate(info);
1771 	if (rc) {
1772 		log_rdma_event(ERR, "smbd_negotiate rc=%d\n", rc);
1773 		goto negotiation_failed;
1774 	}
1775 
1776 	rc = allocate_mr_list(info);
1777 	if (rc) {
1778 		log_rdma_mr(ERR, "memory registration allocation failed\n");
1779 		goto allocate_mr_failed;
1780 	}
1781 
1782 	return info;
1783 
1784 allocate_mr_failed:
1785 	/* At this point, need to a full transport shutdown */
1786 	smbd_destroy(server);
1787 	return NULL;
1788 
1789 negotiation_failed:
1790 	cancel_delayed_work_sync(&info->idle_timer_work);
1791 	destroy_caches_and_workqueue(info);
1792 	info->transport_status = SMBD_NEGOTIATE_FAILED;
1793 	init_waitqueue_head(&info->conn_wait);
1794 	rdma_disconnect(info->id);
1795 	wait_event(info->conn_wait,
1796 		info->transport_status == SMBD_DISCONNECTED);
1797 
1798 allocate_cache_failed:
1799 rdma_connect_failed:
1800 	rdma_destroy_qp(info->id);
1801 
1802 create_qp_failed:
1803 alloc_cq_failed:
1804 	if (info->send_cq)
1805 		ib_free_cq(info->send_cq);
1806 	if (info->recv_cq)
1807 		ib_free_cq(info->recv_cq);
1808 
1809 config_failed:
1810 	ib_dealloc_pd(info->pd);
1811 	rdma_destroy_id(info->id);
1812 
1813 create_id_failed:
1814 	kfree(info);
1815 	return NULL;
1816 }
1817 
smbd_get_connection(struct TCP_Server_Info * server,struct sockaddr * dstaddr)1818 struct smbd_connection *smbd_get_connection(
1819 	struct TCP_Server_Info *server, struct sockaddr *dstaddr)
1820 {
1821 	struct smbd_connection *ret;
1822 	int port = SMBD_PORT;
1823 
1824 try_again:
1825 	ret = _smbd_get_connection(server, dstaddr, port);
1826 
1827 	/* Try SMB_PORT if SMBD_PORT doesn't work */
1828 	if (!ret && port == SMBD_PORT) {
1829 		port = SMB_PORT;
1830 		goto try_again;
1831 	}
1832 	return ret;
1833 }
1834 
1835 /*
1836  * Receive data from receive reassembly queue
1837  * All the incoming data packets are placed in reassembly queue
1838  * buf: the buffer to read data into
1839  * size: the length of data to read
1840  * return value: actual data read
1841  * Note: this implementation copies the data from reassebmly queue to receive
1842  * buffers used by upper layer. This is not the optimal code path. A better way
1843  * to do it is to not have upper layer allocate its receive buffers but rather
1844  * borrow the buffer from reassembly queue, and return it after data is
1845  * consumed. But this will require more changes to upper layer code, and also
1846  * need to consider packet boundaries while they still being reassembled.
1847  */
smbd_recv_buf(struct smbd_connection * info,char * buf,unsigned int size)1848 static int smbd_recv_buf(struct smbd_connection *info, char *buf,
1849 		unsigned int size)
1850 {
1851 	struct smbd_response *response;
1852 	struct smbd_data_transfer *data_transfer;
1853 	int to_copy, to_read, data_read, offset;
1854 	u32 data_length, remaining_data_length, data_offset;
1855 	int rc;
1856 
1857 again:
1858 	/*
1859 	 * No need to hold the reassembly queue lock all the time as we are
1860 	 * the only one reading from the front of the queue. The transport
1861 	 * may add more entries to the back of the queue at the same time
1862 	 */
1863 	log_read(INFO, "size=%d info->reassembly_data_length=%d\n", size,
1864 		info->reassembly_data_length);
1865 	if (info->reassembly_data_length >= size) {
1866 		int queue_length;
1867 		int queue_removed = 0;
1868 
1869 		/*
1870 		 * Need to make sure reassembly_data_length is read before
1871 		 * reading reassembly_queue_length and calling
1872 		 * _get_first_reassembly. This call is lock free
1873 		 * as we never read at the end of the queue which are being
1874 		 * updated in SOFTIRQ as more data is received
1875 		 */
1876 		virt_rmb();
1877 		queue_length = info->reassembly_queue_length;
1878 		data_read = 0;
1879 		to_read = size;
1880 		offset = info->first_entry_offset;
1881 		while (data_read < size) {
1882 			response = _get_first_reassembly(info);
1883 			data_transfer = smbd_response_payload(response);
1884 			data_length = le32_to_cpu(data_transfer->data_length);
1885 			remaining_data_length =
1886 				le32_to_cpu(
1887 					data_transfer->remaining_data_length);
1888 			data_offset = le32_to_cpu(data_transfer->data_offset);
1889 
1890 			/*
1891 			 * The upper layer expects RFC1002 length at the
1892 			 * beginning of the payload. Return it to indicate
1893 			 * the total length of the packet. This minimize the
1894 			 * change to upper layer packet processing logic. This
1895 			 * will be eventually remove when an intermediate
1896 			 * transport layer is added
1897 			 */
1898 			if (response->first_segment && size == 4) {
1899 				unsigned int rfc1002_len =
1900 					data_length + remaining_data_length;
1901 				*((__be32 *)buf) = cpu_to_be32(rfc1002_len);
1902 				data_read = 4;
1903 				response->first_segment = false;
1904 				log_read(INFO, "returning rfc1002 length %d\n",
1905 					rfc1002_len);
1906 				goto read_rfc1002_done;
1907 			}
1908 
1909 			to_copy = min_t(int, data_length - offset, to_read);
1910 			memcpy(
1911 				buf + data_read,
1912 				(char *)data_transfer + data_offset + offset,
1913 				to_copy);
1914 
1915 			/* move on to the next buffer? */
1916 			if (to_copy == data_length - offset) {
1917 				queue_length--;
1918 				/*
1919 				 * No need to lock if we are not at the
1920 				 * end of the queue
1921 				 */
1922 				if (queue_length)
1923 					list_del(&response->list);
1924 				else {
1925 					spin_lock_irq(
1926 						&info->reassembly_queue_lock);
1927 					list_del(&response->list);
1928 					spin_unlock_irq(
1929 						&info->reassembly_queue_lock);
1930 				}
1931 				queue_removed++;
1932 				info->count_reassembly_queue--;
1933 				info->count_dequeue_reassembly_queue++;
1934 				put_receive_buffer(info, response);
1935 				offset = 0;
1936 				log_read(INFO, "put_receive_buffer offset=0\n");
1937 			} else
1938 				offset += to_copy;
1939 
1940 			to_read -= to_copy;
1941 			data_read += to_copy;
1942 
1943 			log_read(INFO, "_get_first_reassembly memcpy %d bytes "
1944 				"data_transfer_length-offset=%d after that "
1945 				"to_read=%d data_read=%d offset=%d\n",
1946 				to_copy, data_length - offset,
1947 				to_read, data_read, offset);
1948 		}
1949 
1950 		spin_lock_irq(&info->reassembly_queue_lock);
1951 		info->reassembly_data_length -= data_read;
1952 		info->reassembly_queue_length -= queue_removed;
1953 		spin_unlock_irq(&info->reassembly_queue_lock);
1954 
1955 		info->first_entry_offset = offset;
1956 		log_read(INFO, "returning to thread data_read=%d "
1957 			"reassembly_data_length=%d first_entry_offset=%d\n",
1958 			data_read, info->reassembly_data_length,
1959 			info->first_entry_offset);
1960 read_rfc1002_done:
1961 		return data_read;
1962 	}
1963 
1964 	log_read(INFO, "wait_event on more data\n");
1965 	rc = wait_event_interruptible(
1966 		info->wait_reassembly_queue,
1967 		info->reassembly_data_length >= size ||
1968 			info->transport_status != SMBD_CONNECTED);
1969 	/* Don't return any data if interrupted */
1970 	if (rc)
1971 		return rc;
1972 
1973 	if (info->transport_status != SMBD_CONNECTED) {
1974 		log_read(ERR, "disconnected\n");
1975 		return -ECONNABORTED;
1976 	}
1977 
1978 	goto again;
1979 }
1980 
1981 /*
1982  * Receive a page from receive reassembly queue
1983  * page: the page to read data into
1984  * to_read: the length of data to read
1985  * return value: actual data read
1986  */
smbd_recv_page(struct smbd_connection * info,struct page * page,unsigned int page_offset,unsigned int to_read)1987 static int smbd_recv_page(struct smbd_connection *info,
1988 		struct page *page, unsigned int page_offset,
1989 		unsigned int to_read)
1990 {
1991 	int ret;
1992 	char *to_address;
1993 	void *page_address;
1994 
1995 	/* make sure we have the page ready for read */
1996 	ret = wait_event_interruptible(
1997 		info->wait_reassembly_queue,
1998 		info->reassembly_data_length >= to_read ||
1999 			info->transport_status != SMBD_CONNECTED);
2000 	if (ret)
2001 		return ret;
2002 
2003 	/* now we can read from reassembly queue and not sleep */
2004 	page_address = kmap_atomic(page);
2005 	to_address = (char *) page_address + page_offset;
2006 
2007 	log_read(INFO, "reading from page=%p address=%p to_read=%d\n",
2008 		page, to_address, to_read);
2009 
2010 	ret = smbd_recv_buf(info, to_address, to_read);
2011 	kunmap_atomic(page_address);
2012 
2013 	return ret;
2014 }
2015 
2016 /*
2017  * Receive data from transport
2018  * msg: a msghdr point to the buffer, can be ITER_KVEC or ITER_BVEC
2019  * return: total bytes read, or 0. SMB Direct will not do partial read.
2020  */
smbd_recv(struct smbd_connection * info,struct msghdr * msg)2021 int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
2022 {
2023 	char *buf;
2024 	struct page *page;
2025 	unsigned int to_read, page_offset;
2026 	int rc;
2027 
2028 	if (iov_iter_rw(&msg->msg_iter) == WRITE) {
2029 		/* It's a bug in upper layer to get there */
2030 		cifs_dbg(VFS, "CIFS: invalid msg iter dir %u\n",
2031 			 iov_iter_rw(&msg->msg_iter));
2032 		rc = -EINVAL;
2033 		goto out;
2034 	}
2035 
2036 	switch (iov_iter_type(&msg->msg_iter)) {
2037 	case ITER_KVEC:
2038 		buf = msg->msg_iter.kvec->iov_base;
2039 		to_read = msg->msg_iter.kvec->iov_len;
2040 		rc = smbd_recv_buf(info, buf, to_read);
2041 		break;
2042 
2043 	case ITER_BVEC:
2044 		page = msg->msg_iter.bvec->bv_page;
2045 		page_offset = msg->msg_iter.bvec->bv_offset;
2046 		to_read = msg->msg_iter.bvec->bv_len;
2047 		rc = smbd_recv_page(info, page, page_offset, to_read);
2048 		break;
2049 
2050 	default:
2051 		/* It's a bug in upper layer to get there */
2052 		cifs_dbg(VFS, "CIFS: invalid msg type %d\n",
2053 			 iov_iter_type(&msg->msg_iter));
2054 		rc = -EINVAL;
2055 	}
2056 
2057 out:
2058 	/* SMBDirect will read it all or nothing */
2059 	if (rc > 0)
2060 		msg->msg_iter.count = 0;
2061 	return rc;
2062 }
2063 
2064 /*
2065  * Send data to transport
2066  * Each rqst is transported as a SMBDirect payload
2067  * rqst: the data to write
2068  * return value: 0 if successfully write, otherwise error code
2069  */
smbd_send(struct TCP_Server_Info * server,int num_rqst,struct smb_rqst * rqst_array)2070 int smbd_send(struct TCP_Server_Info *server,
2071 	int num_rqst, struct smb_rqst *rqst_array)
2072 {
2073 	struct smbd_connection *info = server->smbd_conn;
2074 	struct kvec vec;
2075 	int nvecs;
2076 	int size;
2077 	unsigned int buflen, remaining_data_length;
2078 	int start, i, j;
2079 	int max_iov_size =
2080 		info->max_send_size - sizeof(struct smbd_data_transfer);
2081 	struct kvec *iov;
2082 	int rc;
2083 	struct smb_rqst *rqst;
2084 	int rqst_idx;
2085 
2086 	if (info->transport_status != SMBD_CONNECTED) {
2087 		rc = -EAGAIN;
2088 		goto done;
2089 	}
2090 
2091 	/*
2092 	 * Add in the page array if there is one. The caller needs to set
2093 	 * rq_tailsz to PAGE_SIZE when the buffer has multiple pages and
2094 	 * ends at page boundary
2095 	 */
2096 	remaining_data_length = 0;
2097 	for (i = 0; i < num_rqst; i++)
2098 		remaining_data_length += smb_rqst_len(server, &rqst_array[i]);
2099 
2100 	if (remaining_data_length + sizeof(struct smbd_data_transfer) >
2101 		info->max_fragmented_send_size) {
2102 		log_write(ERR, "payload size %d > max size %d\n",
2103 			remaining_data_length, info->max_fragmented_send_size);
2104 		rc = -EINVAL;
2105 		goto done;
2106 	}
2107 
2108 	log_write(INFO, "num_rqst=%d total length=%u\n",
2109 			num_rqst, remaining_data_length);
2110 
2111 	rqst_idx = 0;
2112 next_rqst:
2113 	rqst = &rqst_array[rqst_idx];
2114 	iov = rqst->rq_iov;
2115 
2116 	cifs_dbg(FYI, "Sending smb (RDMA): idx=%d smb_len=%lu\n",
2117 		rqst_idx, smb_rqst_len(server, rqst));
2118 	for (i = 0; i < rqst->rq_nvec; i++)
2119 		dump_smb(iov[i].iov_base, iov[i].iov_len);
2120 
2121 
2122 	log_write(INFO, "rqst_idx=%d nvec=%d rqst->rq_npages=%d rq_pagesz=%d "
2123 		"rq_tailsz=%d buflen=%lu\n",
2124 		rqst_idx, rqst->rq_nvec, rqst->rq_npages, rqst->rq_pagesz,
2125 		rqst->rq_tailsz, smb_rqst_len(server, rqst));
2126 
2127 	start = i = 0;
2128 	buflen = 0;
2129 	while (true) {
2130 		buflen += iov[i].iov_len;
2131 		if (buflen > max_iov_size) {
2132 			if (i > start) {
2133 				remaining_data_length -=
2134 					(buflen-iov[i].iov_len);
2135 				log_write(INFO, "sending iov[] from start=%d "
2136 					"i=%d nvecs=%d "
2137 					"remaining_data_length=%d\n",
2138 					start, i, i-start,
2139 					remaining_data_length);
2140 				rc = smbd_post_send_data(
2141 					info, &iov[start], i-start,
2142 					remaining_data_length);
2143 				if (rc)
2144 					goto done;
2145 			} else {
2146 				/* iov[start] is too big, break it */
2147 				nvecs = (buflen+max_iov_size-1)/max_iov_size;
2148 				log_write(INFO, "iov[%d] iov_base=%p buflen=%d"
2149 					" break to %d vectors\n",
2150 					start, iov[start].iov_base,
2151 					buflen, nvecs);
2152 				for (j = 0; j < nvecs; j++) {
2153 					vec.iov_base =
2154 						(char *)iov[start].iov_base +
2155 						j*max_iov_size;
2156 					vec.iov_len = max_iov_size;
2157 					if (j == nvecs-1)
2158 						vec.iov_len =
2159 							buflen -
2160 							max_iov_size*(nvecs-1);
2161 					remaining_data_length -= vec.iov_len;
2162 					log_write(INFO,
2163 						"sending vec j=%d iov_base=%p"
2164 						" iov_len=%zu "
2165 						"remaining_data_length=%d\n",
2166 						j, vec.iov_base, vec.iov_len,
2167 						remaining_data_length);
2168 					rc = smbd_post_send_data(
2169 						info, &vec, 1,
2170 						remaining_data_length);
2171 					if (rc)
2172 						goto done;
2173 				}
2174 				i++;
2175 				if (i == rqst->rq_nvec)
2176 					break;
2177 			}
2178 			start = i;
2179 			buflen = 0;
2180 		} else {
2181 			i++;
2182 			if (i == rqst->rq_nvec) {
2183 				/* send out all remaining vecs */
2184 				remaining_data_length -= buflen;
2185 				log_write(INFO,
2186 					"sending iov[] from start=%d i=%d "
2187 					"nvecs=%d remaining_data_length=%d\n",
2188 					start, i, i-start,
2189 					remaining_data_length);
2190 				rc = smbd_post_send_data(info, &iov[start],
2191 					i-start, remaining_data_length);
2192 				if (rc)
2193 					goto done;
2194 				break;
2195 			}
2196 		}
2197 		log_write(INFO, "looping i=%d buflen=%d\n", i, buflen);
2198 	}
2199 
2200 	/* now sending pages if there are any */
2201 	for (i = 0; i < rqst->rq_npages; i++) {
2202 		unsigned int offset;
2203 
2204 		rqst_page_get_length(rqst, i, &buflen, &offset);
2205 		nvecs = (buflen + max_iov_size - 1) / max_iov_size;
2206 		log_write(INFO, "sending pages buflen=%d nvecs=%d\n",
2207 			buflen, nvecs);
2208 		for (j = 0; j < nvecs; j++) {
2209 			size = max_iov_size;
2210 			if (j == nvecs-1)
2211 				size = buflen - j*max_iov_size;
2212 			remaining_data_length -= size;
2213 			log_write(INFO, "sending pages i=%d offset=%d size=%d"
2214 				" remaining_data_length=%d\n",
2215 				i, j*max_iov_size+offset, size,
2216 				remaining_data_length);
2217 			rc = smbd_post_send_page(
2218 				info, rqst->rq_pages[i],
2219 				j*max_iov_size + offset,
2220 				size, remaining_data_length);
2221 			if (rc)
2222 				goto done;
2223 		}
2224 	}
2225 
2226 	rqst_idx++;
2227 	if (rqst_idx < num_rqst)
2228 		goto next_rqst;
2229 
2230 done:
2231 	/*
2232 	 * As an optimization, we don't wait for individual I/O to finish
2233 	 * before sending the next one.
2234 	 * Send them all and wait for pending send count to get to 0
2235 	 * that means all the I/Os have been out and we are good to return
2236 	 */
2237 
2238 	wait_event(info->wait_send_payload_pending,
2239 		atomic_read(&info->send_payload_pending) == 0);
2240 
2241 	return rc;
2242 }
2243 
register_mr_done(struct ib_cq * cq,struct ib_wc * wc)2244 static void register_mr_done(struct ib_cq *cq, struct ib_wc *wc)
2245 {
2246 	struct smbd_mr *mr;
2247 	struct ib_cqe *cqe;
2248 
2249 	if (wc->status) {
2250 		log_rdma_mr(ERR, "status=%d\n", wc->status);
2251 		cqe = wc->wr_cqe;
2252 		mr = container_of(cqe, struct smbd_mr, cqe);
2253 		smbd_disconnect_rdma_connection(mr->conn);
2254 	}
2255 }
2256 
2257 /*
2258  * The work queue function that recovers MRs
2259  * We need to call ib_dereg_mr() and ib_alloc_mr() before this MR can be used
2260  * again. Both calls are slow, so finish them in a workqueue. This will not
2261  * block I/O path.
2262  * There is one workqueue that recovers MRs, there is no need to lock as the
2263  * I/O requests calling smbd_register_mr will never update the links in the
2264  * mr_list.
2265  */
smbd_mr_recovery_work(struct work_struct * work)2266 static void smbd_mr_recovery_work(struct work_struct *work)
2267 {
2268 	struct smbd_connection *info =
2269 		container_of(work, struct smbd_connection, mr_recovery_work);
2270 	struct smbd_mr *smbdirect_mr;
2271 	int rc;
2272 
2273 	list_for_each_entry(smbdirect_mr, &info->mr_list, list) {
2274 		if (smbdirect_mr->state == MR_ERROR) {
2275 
2276 			/* recover this MR entry */
2277 			rc = ib_dereg_mr(smbdirect_mr->mr);
2278 			if (rc) {
2279 				log_rdma_mr(ERR,
2280 					"ib_dereg_mr failed rc=%x\n",
2281 					rc);
2282 				smbd_disconnect_rdma_connection(info);
2283 				continue;
2284 			}
2285 
2286 			smbdirect_mr->mr = ib_alloc_mr(
2287 				info->pd, info->mr_type,
2288 				info->max_frmr_depth);
2289 			if (IS_ERR(smbdirect_mr->mr)) {
2290 				log_rdma_mr(ERR,
2291 					"ib_alloc_mr failed mr_type=%x "
2292 					"max_frmr_depth=%x\n",
2293 					info->mr_type,
2294 					info->max_frmr_depth);
2295 				smbd_disconnect_rdma_connection(info);
2296 				continue;
2297 			}
2298 		} else
2299 			/* This MR is being used, don't recover it */
2300 			continue;
2301 
2302 		smbdirect_mr->state = MR_READY;
2303 
2304 		/* smbdirect_mr->state is updated by this function
2305 		 * and is read and updated by I/O issuing CPUs trying
2306 		 * to get a MR, the call to atomic_inc_return
2307 		 * implicates a memory barrier and guarantees this
2308 		 * value is updated before waking up any calls to
2309 		 * get_mr() from the I/O issuing CPUs
2310 		 */
2311 		if (atomic_inc_return(&info->mr_ready_count) == 1)
2312 			wake_up_interruptible(&info->wait_mr);
2313 	}
2314 }
2315 
destroy_mr_list(struct smbd_connection * info)2316 static void destroy_mr_list(struct smbd_connection *info)
2317 {
2318 	struct smbd_mr *mr, *tmp;
2319 
2320 	cancel_work_sync(&info->mr_recovery_work);
2321 	list_for_each_entry_safe(mr, tmp, &info->mr_list, list) {
2322 		if (mr->state == MR_INVALIDATED)
2323 			ib_dma_unmap_sg(info->id->device, mr->sgl,
2324 				mr->sgl_count, mr->dir);
2325 		ib_dereg_mr(mr->mr);
2326 		kfree(mr->sgl);
2327 		kfree(mr);
2328 	}
2329 }
2330 
2331 /*
2332  * Allocate MRs used for RDMA read/write
2333  * The number of MRs will not exceed hardware capability in responder_resources
2334  * All MRs are kept in mr_list. The MR can be recovered after it's used
2335  * Recovery is done in smbd_mr_recovery_work. The content of list entry changes
2336  * as MRs are used and recovered for I/O, but the list links will not change
2337  */
allocate_mr_list(struct smbd_connection * info)2338 static int allocate_mr_list(struct smbd_connection *info)
2339 {
2340 	int i;
2341 	struct smbd_mr *smbdirect_mr, *tmp;
2342 
2343 	INIT_LIST_HEAD(&info->mr_list);
2344 	init_waitqueue_head(&info->wait_mr);
2345 	spin_lock_init(&info->mr_list_lock);
2346 	atomic_set(&info->mr_ready_count, 0);
2347 	atomic_set(&info->mr_used_count, 0);
2348 	init_waitqueue_head(&info->wait_for_mr_cleanup);
2349 	/* Allocate more MRs (2x) than hardware responder_resources */
2350 	for (i = 0; i < info->responder_resources * 2; i++) {
2351 		smbdirect_mr = kzalloc(sizeof(*smbdirect_mr), GFP_KERNEL);
2352 		if (!smbdirect_mr)
2353 			goto out;
2354 		smbdirect_mr->mr = ib_alloc_mr(info->pd, info->mr_type,
2355 					info->max_frmr_depth);
2356 		if (IS_ERR(smbdirect_mr->mr)) {
2357 			log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x "
2358 				"max_frmr_depth=%x\n",
2359 				info->mr_type, info->max_frmr_depth);
2360 			goto out;
2361 		}
2362 		smbdirect_mr->sgl = kcalloc(
2363 					info->max_frmr_depth,
2364 					sizeof(struct scatterlist),
2365 					GFP_KERNEL);
2366 		if (!smbdirect_mr->sgl) {
2367 			log_rdma_mr(ERR, "failed to allocate sgl\n");
2368 			ib_dereg_mr(smbdirect_mr->mr);
2369 			goto out;
2370 		}
2371 		smbdirect_mr->state = MR_READY;
2372 		smbdirect_mr->conn = info;
2373 
2374 		list_add_tail(&smbdirect_mr->list, &info->mr_list);
2375 		atomic_inc(&info->mr_ready_count);
2376 	}
2377 	INIT_WORK(&info->mr_recovery_work, smbd_mr_recovery_work);
2378 	return 0;
2379 
2380 out:
2381 	kfree(smbdirect_mr);
2382 
2383 	list_for_each_entry_safe(smbdirect_mr, tmp, &info->mr_list, list) {
2384 		ib_dereg_mr(smbdirect_mr->mr);
2385 		kfree(smbdirect_mr->sgl);
2386 		kfree(smbdirect_mr);
2387 	}
2388 	return -ENOMEM;
2389 }
2390 
2391 /*
2392  * Get a MR from mr_list. This function waits until there is at least one
2393  * MR available in the list. It may access the list while the
2394  * smbd_mr_recovery_work is recovering the MR list. This doesn't need a lock
2395  * as they never modify the same places. However, there may be several CPUs
2396  * issueing I/O trying to get MR at the same time, mr_list_lock is used to
2397  * protect this situation.
2398  */
get_mr(struct smbd_connection * info)2399 static struct smbd_mr *get_mr(struct smbd_connection *info)
2400 {
2401 	struct smbd_mr *ret;
2402 	int rc;
2403 again:
2404 	rc = wait_event_interruptible(info->wait_mr,
2405 		atomic_read(&info->mr_ready_count) ||
2406 		info->transport_status != SMBD_CONNECTED);
2407 	if (rc) {
2408 		log_rdma_mr(ERR, "wait_event_interruptible rc=%x\n", rc);
2409 		return NULL;
2410 	}
2411 
2412 	if (info->transport_status != SMBD_CONNECTED) {
2413 		log_rdma_mr(ERR, "info->transport_status=%x\n",
2414 			info->transport_status);
2415 		return NULL;
2416 	}
2417 
2418 	spin_lock(&info->mr_list_lock);
2419 	list_for_each_entry(ret, &info->mr_list, list) {
2420 		if (ret->state == MR_READY) {
2421 			ret->state = MR_REGISTERED;
2422 			spin_unlock(&info->mr_list_lock);
2423 			atomic_dec(&info->mr_ready_count);
2424 			atomic_inc(&info->mr_used_count);
2425 			return ret;
2426 		}
2427 	}
2428 
2429 	spin_unlock(&info->mr_list_lock);
2430 	/*
2431 	 * It is possible that we could fail to get MR because other processes may
2432 	 * try to acquire a MR at the same time. If this is the case, retry it.
2433 	 */
2434 	goto again;
2435 }
2436 
2437 /*
2438  * Register memory for RDMA read/write
2439  * pages[]: the list of pages to register memory with
2440  * num_pages: the number of pages to register
2441  * tailsz: if non-zero, the bytes to register in the last page
2442  * writing: true if this is a RDMA write (SMB read), false for RDMA read
2443  * need_invalidate: true if this MR needs to be locally invalidated after I/O
2444  * return value: the MR registered, NULL if failed.
2445  */
smbd_register_mr(struct smbd_connection * info,struct page * pages[],int num_pages,int offset,int tailsz,bool writing,bool need_invalidate)2446 struct smbd_mr *smbd_register_mr(
2447 	struct smbd_connection *info, struct page *pages[], int num_pages,
2448 	int offset, int tailsz, bool writing, bool need_invalidate)
2449 {
2450 	struct smbd_mr *smbdirect_mr;
2451 	int rc, i;
2452 	enum dma_data_direction dir;
2453 	struct ib_reg_wr *reg_wr;
2454 
2455 	if (num_pages > info->max_frmr_depth) {
2456 		log_rdma_mr(ERR, "num_pages=%d max_frmr_depth=%d\n",
2457 			num_pages, info->max_frmr_depth);
2458 		return NULL;
2459 	}
2460 
2461 	smbdirect_mr = get_mr(info);
2462 	if (!smbdirect_mr) {
2463 		log_rdma_mr(ERR, "get_mr returning NULL\n");
2464 		return NULL;
2465 	}
2466 	smbdirect_mr->need_invalidate = need_invalidate;
2467 	smbdirect_mr->sgl_count = num_pages;
2468 	sg_init_table(smbdirect_mr->sgl, num_pages);
2469 
2470 	log_rdma_mr(INFO, "num_pages=0x%x offset=0x%x tailsz=0x%x\n",
2471 			num_pages, offset, tailsz);
2472 
2473 	if (num_pages == 1) {
2474 		sg_set_page(&smbdirect_mr->sgl[0], pages[0], tailsz, offset);
2475 		goto skip_multiple_pages;
2476 	}
2477 
2478 	/* We have at least two pages to register */
2479 	sg_set_page(
2480 		&smbdirect_mr->sgl[0], pages[0], PAGE_SIZE - offset, offset);
2481 	i = 1;
2482 	while (i < num_pages - 1) {
2483 		sg_set_page(&smbdirect_mr->sgl[i], pages[i], PAGE_SIZE, 0);
2484 		i++;
2485 	}
2486 	sg_set_page(&smbdirect_mr->sgl[i], pages[i],
2487 		tailsz ? tailsz : PAGE_SIZE, 0);
2488 
2489 skip_multiple_pages:
2490 	dir = writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2491 	smbdirect_mr->dir = dir;
2492 	rc = ib_dma_map_sg(info->id->device, smbdirect_mr->sgl, num_pages, dir);
2493 	if (!rc) {
2494 		log_rdma_mr(ERR, "ib_dma_map_sg num_pages=%x dir=%x rc=%x\n",
2495 			num_pages, dir, rc);
2496 		goto dma_map_error;
2497 	}
2498 
2499 	rc = ib_map_mr_sg(smbdirect_mr->mr, smbdirect_mr->sgl, num_pages,
2500 		NULL, PAGE_SIZE);
2501 	if (rc != num_pages) {
2502 		log_rdma_mr(ERR,
2503 			"ib_map_mr_sg failed rc = %d num_pages = %x\n",
2504 			rc, num_pages);
2505 		goto map_mr_error;
2506 	}
2507 
2508 	ib_update_fast_reg_key(smbdirect_mr->mr,
2509 		ib_inc_rkey(smbdirect_mr->mr->rkey));
2510 	reg_wr = &smbdirect_mr->wr;
2511 	reg_wr->wr.opcode = IB_WR_REG_MR;
2512 	smbdirect_mr->cqe.done = register_mr_done;
2513 	reg_wr->wr.wr_cqe = &smbdirect_mr->cqe;
2514 	reg_wr->wr.num_sge = 0;
2515 	reg_wr->wr.send_flags = IB_SEND_SIGNALED;
2516 	reg_wr->mr = smbdirect_mr->mr;
2517 	reg_wr->key = smbdirect_mr->mr->rkey;
2518 	reg_wr->access = writing ?
2519 			IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
2520 			IB_ACCESS_REMOTE_READ;
2521 
2522 	/*
2523 	 * There is no need for waiting for complemtion on ib_post_send
2524 	 * on IB_WR_REG_MR. Hardware enforces a barrier and order of execution
2525 	 * on the next ib_post_send when we actaully send I/O to remote peer
2526 	 */
2527 	rc = ib_post_send(info->id->qp, &reg_wr->wr, NULL);
2528 	if (!rc)
2529 		return smbdirect_mr;
2530 
2531 	log_rdma_mr(ERR, "ib_post_send failed rc=%x reg_wr->key=%x\n",
2532 		rc, reg_wr->key);
2533 
2534 	/* If all failed, attempt to recover this MR by setting it MR_ERROR*/
2535 map_mr_error:
2536 	ib_dma_unmap_sg(info->id->device, smbdirect_mr->sgl,
2537 		smbdirect_mr->sgl_count, smbdirect_mr->dir);
2538 
2539 dma_map_error:
2540 	smbdirect_mr->state = MR_ERROR;
2541 	if (atomic_dec_and_test(&info->mr_used_count))
2542 		wake_up(&info->wait_for_mr_cleanup);
2543 
2544 	smbd_disconnect_rdma_connection(info);
2545 
2546 	return NULL;
2547 }
2548 
local_inv_done(struct ib_cq * cq,struct ib_wc * wc)2549 static void local_inv_done(struct ib_cq *cq, struct ib_wc *wc)
2550 {
2551 	struct smbd_mr *smbdirect_mr;
2552 	struct ib_cqe *cqe;
2553 
2554 	cqe = wc->wr_cqe;
2555 	smbdirect_mr = container_of(cqe, struct smbd_mr, cqe);
2556 	smbdirect_mr->state = MR_INVALIDATED;
2557 	if (wc->status != IB_WC_SUCCESS) {
2558 		log_rdma_mr(ERR, "invalidate failed status=%x\n", wc->status);
2559 		smbdirect_mr->state = MR_ERROR;
2560 	}
2561 	complete(&smbdirect_mr->invalidate_done);
2562 }
2563 
2564 /*
2565  * Deregister a MR after I/O is done
2566  * This function may wait if remote invalidation is not used
2567  * and we have to locally invalidate the buffer to prevent data is being
2568  * modified by remote peer after upper layer consumes it
2569  */
smbd_deregister_mr(struct smbd_mr * smbdirect_mr)2570 int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
2571 {
2572 	struct ib_send_wr *wr;
2573 	struct smbd_connection *info = smbdirect_mr->conn;
2574 	int rc = 0;
2575 
2576 	if (smbdirect_mr->need_invalidate) {
2577 		/* Need to finish local invalidation before returning */
2578 		wr = &smbdirect_mr->inv_wr;
2579 		wr->opcode = IB_WR_LOCAL_INV;
2580 		smbdirect_mr->cqe.done = local_inv_done;
2581 		wr->wr_cqe = &smbdirect_mr->cqe;
2582 		wr->num_sge = 0;
2583 		wr->ex.invalidate_rkey = smbdirect_mr->mr->rkey;
2584 		wr->send_flags = IB_SEND_SIGNALED;
2585 
2586 		init_completion(&smbdirect_mr->invalidate_done);
2587 		rc = ib_post_send(info->id->qp, wr, NULL);
2588 		if (rc) {
2589 			log_rdma_mr(ERR, "ib_post_send failed rc=%x\n", rc);
2590 			smbd_disconnect_rdma_connection(info);
2591 			goto done;
2592 		}
2593 		wait_for_completion(&smbdirect_mr->invalidate_done);
2594 		smbdirect_mr->need_invalidate = false;
2595 	} else
2596 		/*
2597 		 * For remote invalidation, just set it to MR_INVALIDATED
2598 		 * and defer to mr_recovery_work to recover the MR for next use
2599 		 */
2600 		smbdirect_mr->state = MR_INVALIDATED;
2601 
2602 	if (smbdirect_mr->state == MR_INVALIDATED) {
2603 		ib_dma_unmap_sg(
2604 			info->id->device, smbdirect_mr->sgl,
2605 			smbdirect_mr->sgl_count,
2606 			smbdirect_mr->dir);
2607 		smbdirect_mr->state = MR_READY;
2608 		if (atomic_inc_return(&info->mr_ready_count) == 1)
2609 			wake_up_interruptible(&info->wait_mr);
2610 	} else
2611 		/*
2612 		 * Schedule the work to do MR recovery for future I/Os MR
2613 		 * recovery is slow and don't want it to block current I/O
2614 		 */
2615 		queue_work(info->workqueue, &info->mr_recovery_work);
2616 
2617 done:
2618 	if (atomic_dec_and_test(&info->mr_used_count))
2619 		wake_up(&info->wait_for_mr_cleanup);
2620 
2621 	return rc;
2622 }
2623