• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *   Copyright (C) 2017, Microsoft Corporation.
4  *
5  *   Author(s): Long Li <longli@microsoft.com>
6  */
7 #include <linux/module.h>
8 #include <linux/highmem.h>
9 #include "smbdirect.h"
10 #include "cifs_debug.h"
11 #include "cifsproto.h"
12 #include "smb2proto.h"
13 
14 static struct smbd_response *get_empty_queue_buffer(
15 		struct smbd_connection *info);
16 static struct smbd_response *get_receive_buffer(
17 		struct smbd_connection *info);
18 static void put_receive_buffer(
19 		struct smbd_connection *info,
20 		struct smbd_response *response);
21 static int allocate_receive_buffers(struct smbd_connection *info, int num_buf);
22 static void destroy_receive_buffers(struct smbd_connection *info);
23 
24 static void put_empty_packet(
25 		struct smbd_connection *info, struct smbd_response *response);
26 static void enqueue_reassembly(
27 		struct smbd_connection *info,
28 		struct smbd_response *response, int data_length);
29 static struct smbd_response *_get_first_reassembly(
30 		struct smbd_connection *info);
31 
32 static int smbd_post_recv(
33 		struct smbd_connection *info,
34 		struct smbd_response *response);
35 
36 static int smbd_post_send_empty(struct smbd_connection *info);
37 static int smbd_post_send_data(
38 		struct smbd_connection *info,
39 		struct kvec *iov, int n_vec, int remaining_data_length);
40 static int smbd_post_send_page(struct smbd_connection *info,
41 		struct page *page, unsigned long offset,
42 		size_t size, int remaining_data_length);
43 
44 static void destroy_mr_list(struct smbd_connection *info);
45 static int allocate_mr_list(struct smbd_connection *info);
46 
47 /* SMBD version number */
48 #define SMBD_V1	0x0100
49 
50 /* Port numbers for SMBD transport */
51 #define SMB_PORT	445
52 #define SMBD_PORT	5445
53 
54 /* Address lookup and resolve timeout in ms */
55 #define RDMA_RESOLVE_TIMEOUT	5000
56 
57 /* SMBD negotiation timeout in seconds */
58 #define SMBD_NEGOTIATE_TIMEOUT	120
59 
60 /* SMBD minimum receive size and fragmented sized defined in [MS-SMBD] */
61 #define SMBD_MIN_RECEIVE_SIZE		128
62 #define SMBD_MIN_FRAGMENTED_SIZE	131072
63 
64 /*
65  * Default maximum number of RDMA read/write outstanding on this connection
66  * This value is possibly decreased during QP creation on hardware limit
67  */
68 #define SMBD_CM_RESPONDER_RESOURCES	32
69 
70 /* Maximum number of retries on data transfer operations */
71 #define SMBD_CM_RETRY			6
72 /* No need to retry on Receiver Not Ready since SMBD manages credits */
73 #define SMBD_CM_RNR_RETRY		0
74 
75 /*
76  * User configurable initial values per SMBD transport connection
77  * as defined in [MS-SMBD] 3.1.1.1
78  * Those may change after a SMBD negotiation
79  */
80 /* The local peer's maximum number of credits to grant to the peer */
81 int smbd_receive_credit_max = 255;
82 
83 /* The remote peer's credit request of local peer */
84 int smbd_send_credit_target = 255;
85 
86 /* The maximum single message size can be sent to remote peer */
87 int smbd_max_send_size = 1364;
88 
89 /*  The maximum fragmented upper-layer payload receive size supported */
90 int smbd_max_fragmented_recv_size = 1024 * 1024;
91 
92 /*  The maximum single-message size which can be received */
93 int smbd_max_receive_size = 8192;
94 
95 /* The timeout to initiate send of a keepalive message on idle */
96 int smbd_keep_alive_interval = 120;
97 
98 /*
99  * User configurable initial values for RDMA transport
100  * The actual values used may be lower and are limited to hardware capabilities
101  */
102 /* Default maximum number of SGEs in a RDMA write/read */
103 int smbd_max_frmr_depth = 2048;
104 
105 /* If payload is less than this byte, use RDMA send/recv not read/write */
106 int rdma_readwrite_threshold = 4096;
107 
108 /* Transport logging functions
109  * Logging are defined as classes. They can be OR'ed to define the actual
110  * logging level via module parameter smbd_logging_class
111  * e.g. cifs.smbd_logging_class=0xa0 will log all log_rdma_recv() and
112  * log_rdma_event()
113  */
114 #define LOG_OUTGOING			0x1
115 #define LOG_INCOMING			0x2
116 #define LOG_READ			0x4
117 #define LOG_WRITE			0x8
118 #define LOG_RDMA_SEND			0x10
119 #define LOG_RDMA_RECV			0x20
120 #define LOG_KEEP_ALIVE			0x40
121 #define LOG_RDMA_EVENT			0x80
122 #define LOG_RDMA_MR			0x100
123 static unsigned int smbd_logging_class;
124 module_param(smbd_logging_class, uint, 0644);
125 MODULE_PARM_DESC(smbd_logging_class,
126 	"Logging class for SMBD transport 0x0 to 0x100");
127 
128 #define ERR		0x0
129 #define INFO		0x1
130 static unsigned int smbd_logging_level = ERR;
131 module_param(smbd_logging_level, uint, 0644);
132 MODULE_PARM_DESC(smbd_logging_level,
133 	"Logging level for SMBD transport, 0 (default): error, 1: info");
134 
135 #define log_rdma(level, class, fmt, args...)				\
136 do {									\
137 	if (level <= smbd_logging_level || class & smbd_logging_class)	\
138 		cifs_dbg(VFS, "%s:%d " fmt, __func__, __LINE__, ##args);\
139 } while (0)
140 
141 #define log_outgoing(level, fmt, args...) \
142 		log_rdma(level, LOG_OUTGOING, fmt, ##args)
143 #define log_incoming(level, fmt, args...) \
144 		log_rdma(level, LOG_INCOMING, fmt, ##args)
145 #define log_read(level, fmt, args...)	log_rdma(level, LOG_READ, fmt, ##args)
146 #define log_write(level, fmt, args...)	log_rdma(level, LOG_WRITE, fmt, ##args)
147 #define log_rdma_send(level, fmt, args...) \
148 		log_rdma(level, LOG_RDMA_SEND, fmt, ##args)
149 #define log_rdma_recv(level, fmt, args...) \
150 		log_rdma(level, LOG_RDMA_RECV, fmt, ##args)
151 #define log_keep_alive(level, fmt, args...) \
152 		log_rdma(level, LOG_KEEP_ALIVE, fmt, ##args)
153 #define log_rdma_event(level, fmt, args...) \
154 		log_rdma(level, LOG_RDMA_EVENT, fmt, ##args)
155 #define log_rdma_mr(level, fmt, args...) \
156 		log_rdma(level, LOG_RDMA_MR, fmt, ##args)
157 
smbd_disconnect_rdma_work(struct work_struct * work)158 static void smbd_disconnect_rdma_work(struct work_struct *work)
159 {
160 	struct smbd_connection *info =
161 		container_of(work, struct smbd_connection, disconnect_work);
162 
163 	if (info->transport_status == SMBD_CONNECTED) {
164 		info->transport_status = SMBD_DISCONNECTING;
165 		rdma_disconnect(info->id);
166 	}
167 }
168 
smbd_disconnect_rdma_connection(struct smbd_connection * info)169 static void smbd_disconnect_rdma_connection(struct smbd_connection *info)
170 {
171 	queue_work(info->workqueue, &info->disconnect_work);
172 }
173 
174 /* Upcall from RDMA CM */
smbd_conn_upcall(struct rdma_cm_id * id,struct rdma_cm_event * event)175 static int smbd_conn_upcall(
176 		struct rdma_cm_id *id, struct rdma_cm_event *event)
177 {
178 	struct smbd_connection *info = id->context;
179 
180 	log_rdma_event(INFO, "event=%d status=%d\n",
181 		event->event, event->status);
182 
183 	switch (event->event) {
184 	case RDMA_CM_EVENT_ADDR_RESOLVED:
185 	case RDMA_CM_EVENT_ROUTE_RESOLVED:
186 		info->ri_rc = 0;
187 		complete(&info->ri_done);
188 		break;
189 
190 	case RDMA_CM_EVENT_ADDR_ERROR:
191 		info->ri_rc = -EHOSTUNREACH;
192 		complete(&info->ri_done);
193 		break;
194 
195 	case RDMA_CM_EVENT_ROUTE_ERROR:
196 		info->ri_rc = -ENETUNREACH;
197 		complete(&info->ri_done);
198 		break;
199 
200 	case RDMA_CM_EVENT_ESTABLISHED:
201 		log_rdma_event(INFO, "connected event=%d\n", event->event);
202 		info->transport_status = SMBD_CONNECTED;
203 		wake_up_interruptible(&info->conn_wait);
204 		break;
205 
206 	case RDMA_CM_EVENT_CONNECT_ERROR:
207 	case RDMA_CM_EVENT_UNREACHABLE:
208 	case RDMA_CM_EVENT_REJECTED:
209 		log_rdma_event(INFO, "connecting failed event=%d\n", event->event);
210 		info->transport_status = SMBD_DISCONNECTED;
211 		wake_up_interruptible(&info->conn_wait);
212 		break;
213 
214 	case RDMA_CM_EVENT_DEVICE_REMOVAL:
215 	case RDMA_CM_EVENT_DISCONNECTED:
216 		/* This happenes when we fail the negotiation */
217 		if (info->transport_status == SMBD_NEGOTIATE_FAILED) {
218 			info->transport_status = SMBD_DISCONNECTED;
219 			wake_up(&info->conn_wait);
220 			break;
221 		}
222 
223 		info->transport_status = SMBD_DISCONNECTED;
224 		wake_up_interruptible(&info->disconn_wait);
225 		wake_up_interruptible(&info->wait_reassembly_queue);
226 		wake_up_interruptible_all(&info->wait_send_queue);
227 		break;
228 
229 	default:
230 		break;
231 	}
232 
233 	return 0;
234 }
235 
236 /* Upcall from RDMA QP */
237 static void
smbd_qp_async_error_upcall(struct ib_event * event,void * context)238 smbd_qp_async_error_upcall(struct ib_event *event, void *context)
239 {
240 	struct smbd_connection *info = context;
241 
242 	log_rdma_event(ERR, "%s on device %s info %p\n",
243 		ib_event_msg(event->event), event->device->name, info);
244 
245 	switch (event->event) {
246 	case IB_EVENT_CQ_ERR:
247 	case IB_EVENT_QP_FATAL:
248 		smbd_disconnect_rdma_connection(info);
249 		break;
250 
251 	default:
252 		break;
253 	}
254 }
255 
smbd_request_payload(struct smbd_request * request)256 static inline void *smbd_request_payload(struct smbd_request *request)
257 {
258 	return (void *)request->packet;
259 }
260 
smbd_response_payload(struct smbd_response * response)261 static inline void *smbd_response_payload(struct smbd_response *response)
262 {
263 	return (void *)response->packet;
264 }
265 
266 /* Called when a RDMA send is done */
send_done(struct ib_cq * cq,struct ib_wc * wc)267 static void send_done(struct ib_cq *cq, struct ib_wc *wc)
268 {
269 	int i;
270 	struct smbd_request *request =
271 		container_of(wc->wr_cqe, struct smbd_request, cqe);
272 
273 	log_rdma_send(INFO, "smbd_request %p completed wc->status=%d\n",
274 		request, wc->status);
275 
276 	if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
277 		log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n",
278 			wc->status, wc->opcode);
279 		smbd_disconnect_rdma_connection(request->info);
280 	}
281 
282 	for (i = 0; i < request->num_sge; i++)
283 		ib_dma_unmap_single(request->info->id->device,
284 			request->sge[i].addr,
285 			request->sge[i].length,
286 			DMA_TO_DEVICE);
287 
288 	if (atomic_dec_and_test(&request->info->send_pending))
289 		wake_up(&request->info->wait_send_pending);
290 
291 	wake_up(&request->info->wait_post_send);
292 
293 	mempool_free(request, request->info->request_mempool);
294 }
295 
dump_smbd_negotiate_resp(struct smbd_negotiate_resp * resp)296 static void dump_smbd_negotiate_resp(struct smbd_negotiate_resp *resp)
297 {
298 	log_rdma_event(INFO, "resp message min_version %u max_version %u negotiated_version %u credits_requested %u credits_granted %u status %u max_readwrite_size %u preferred_send_size %u max_receive_size %u max_fragmented_size %u\n",
299 		       resp->min_version, resp->max_version,
300 		       resp->negotiated_version, resp->credits_requested,
301 		       resp->credits_granted, resp->status,
302 		       resp->max_readwrite_size, resp->preferred_send_size,
303 		       resp->max_receive_size, resp->max_fragmented_size);
304 }
305 
306 /*
307  * Process a negotiation response message, according to [MS-SMBD]3.1.5.7
308  * response, packet_length: the negotiation response message
309  * return value: true if negotiation is a success, false if failed
310  */
process_negotiation_response(struct smbd_response * response,int packet_length)311 static bool process_negotiation_response(
312 		struct smbd_response *response, int packet_length)
313 {
314 	struct smbd_connection *info = response->info;
315 	struct smbd_negotiate_resp *packet = smbd_response_payload(response);
316 
317 	if (packet_length < sizeof(struct smbd_negotiate_resp)) {
318 		log_rdma_event(ERR,
319 			"error: packet_length=%d\n", packet_length);
320 		return false;
321 	}
322 
323 	if (le16_to_cpu(packet->negotiated_version) != SMBD_V1) {
324 		log_rdma_event(ERR, "error: negotiated_version=%x\n",
325 			le16_to_cpu(packet->negotiated_version));
326 		return false;
327 	}
328 	info->protocol = le16_to_cpu(packet->negotiated_version);
329 
330 	if (packet->credits_requested == 0) {
331 		log_rdma_event(ERR, "error: credits_requested==0\n");
332 		return false;
333 	}
334 	info->receive_credit_target = le16_to_cpu(packet->credits_requested);
335 
336 	if (packet->credits_granted == 0) {
337 		log_rdma_event(ERR, "error: credits_granted==0\n");
338 		return false;
339 	}
340 	atomic_set(&info->send_credits, le16_to_cpu(packet->credits_granted));
341 
342 	atomic_set(&info->receive_credits, 0);
343 
344 	if (le32_to_cpu(packet->preferred_send_size) > info->max_receive_size) {
345 		log_rdma_event(ERR, "error: preferred_send_size=%d\n",
346 			le32_to_cpu(packet->preferred_send_size));
347 		return false;
348 	}
349 	info->max_receive_size = le32_to_cpu(packet->preferred_send_size);
350 
351 	if (le32_to_cpu(packet->max_receive_size) < SMBD_MIN_RECEIVE_SIZE) {
352 		log_rdma_event(ERR, "error: max_receive_size=%d\n",
353 			le32_to_cpu(packet->max_receive_size));
354 		return false;
355 	}
356 	info->max_send_size = min_t(int, info->max_send_size,
357 					le32_to_cpu(packet->max_receive_size));
358 
359 	if (le32_to_cpu(packet->max_fragmented_size) <
360 			SMBD_MIN_FRAGMENTED_SIZE) {
361 		log_rdma_event(ERR, "error: max_fragmented_size=%d\n",
362 			le32_to_cpu(packet->max_fragmented_size));
363 		return false;
364 	}
365 	info->max_fragmented_send_size =
366 		le32_to_cpu(packet->max_fragmented_size);
367 	info->rdma_readwrite_threshold =
368 		rdma_readwrite_threshold > info->max_fragmented_send_size ?
369 		info->max_fragmented_send_size :
370 		rdma_readwrite_threshold;
371 
372 
373 	info->max_readwrite_size = min_t(u32,
374 			le32_to_cpu(packet->max_readwrite_size),
375 			info->max_frmr_depth * PAGE_SIZE);
376 	info->max_frmr_depth = info->max_readwrite_size / PAGE_SIZE;
377 
378 	return true;
379 }
380 
smbd_post_send_credits(struct work_struct * work)381 static void smbd_post_send_credits(struct work_struct *work)
382 {
383 	int ret = 0;
384 	int use_receive_queue = 1;
385 	int rc;
386 	struct smbd_response *response;
387 	struct smbd_connection *info =
388 		container_of(work, struct smbd_connection,
389 			post_send_credits_work);
390 
391 	if (info->transport_status != SMBD_CONNECTED) {
392 		wake_up(&info->wait_receive_queues);
393 		return;
394 	}
395 
396 	if (info->receive_credit_target >
397 		atomic_read(&info->receive_credits)) {
398 		while (true) {
399 			if (use_receive_queue)
400 				response = get_receive_buffer(info);
401 			else
402 				response = get_empty_queue_buffer(info);
403 			if (!response) {
404 				/* now switch to emtpy packet queue */
405 				if (use_receive_queue) {
406 					use_receive_queue = 0;
407 					continue;
408 				} else
409 					break;
410 			}
411 
412 			response->type = SMBD_TRANSFER_DATA;
413 			response->first_segment = false;
414 			rc = smbd_post_recv(info, response);
415 			if (rc) {
416 				log_rdma_recv(ERR,
417 					"post_recv failed rc=%d\n", rc);
418 				put_receive_buffer(info, response);
419 				break;
420 			}
421 
422 			ret++;
423 		}
424 	}
425 
426 	spin_lock(&info->lock_new_credits_offered);
427 	info->new_credits_offered += ret;
428 	spin_unlock(&info->lock_new_credits_offered);
429 
430 	/* Promptly send an immediate packet as defined in [MS-SMBD] 3.1.1.1 */
431 	info->send_immediate = true;
432 	if (atomic_read(&info->receive_credits) <
433 		info->receive_credit_target - 1) {
434 		if (info->keep_alive_requested == KEEP_ALIVE_PENDING ||
435 		    info->send_immediate) {
436 			log_keep_alive(INFO, "send an empty message\n");
437 			smbd_post_send_empty(info);
438 		}
439 	}
440 }
441 
442 /* Called from softirq, when recv is done */
recv_done(struct ib_cq * cq,struct ib_wc * wc)443 static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
444 {
445 	struct smbd_data_transfer *data_transfer;
446 	struct smbd_response *response =
447 		container_of(wc->wr_cqe, struct smbd_response, cqe);
448 	struct smbd_connection *info = response->info;
449 	int data_length = 0;
450 
451 	log_rdma_recv(INFO, "response=%p type=%d wc status=%d wc opcode %d byte_len=%d pkey_index=%x\n",
452 		      response, response->type, wc->status, wc->opcode,
453 		      wc->byte_len, wc->pkey_index);
454 
455 	if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
456 		log_rdma_recv(INFO, "wc->status=%d opcode=%d\n",
457 			wc->status, wc->opcode);
458 		smbd_disconnect_rdma_connection(info);
459 		goto error;
460 	}
461 
462 	ib_dma_sync_single_for_cpu(
463 		wc->qp->device,
464 		response->sge.addr,
465 		response->sge.length,
466 		DMA_FROM_DEVICE);
467 
468 	switch (response->type) {
469 	/* SMBD negotiation response */
470 	case SMBD_NEGOTIATE_RESP:
471 		dump_smbd_negotiate_resp(smbd_response_payload(response));
472 		info->full_packet_received = true;
473 		info->negotiate_done =
474 			process_negotiation_response(response, wc->byte_len);
475 		complete(&info->negotiate_completion);
476 		break;
477 
478 	/* SMBD data transfer packet */
479 	case SMBD_TRANSFER_DATA:
480 		data_transfer = smbd_response_payload(response);
481 		data_length = le32_to_cpu(data_transfer->data_length);
482 
483 		/*
484 		 * If this is a packet with data playload place the data in
485 		 * reassembly queue and wake up the reading thread
486 		 */
487 		if (data_length) {
488 			if (info->full_packet_received)
489 				response->first_segment = true;
490 
491 			if (le32_to_cpu(data_transfer->remaining_data_length))
492 				info->full_packet_received = false;
493 			else
494 				info->full_packet_received = true;
495 
496 			enqueue_reassembly(
497 				info,
498 				response,
499 				data_length);
500 		} else
501 			put_empty_packet(info, response);
502 
503 		if (data_length)
504 			wake_up_interruptible(&info->wait_reassembly_queue);
505 
506 		atomic_dec(&info->receive_credits);
507 		info->receive_credit_target =
508 			le16_to_cpu(data_transfer->credits_requested);
509 		if (le16_to_cpu(data_transfer->credits_granted)) {
510 			atomic_add(le16_to_cpu(data_transfer->credits_granted),
511 				&info->send_credits);
512 			/*
513 			 * We have new send credits granted from remote peer
514 			 * If any sender is waiting for credits, unblock it
515 			 */
516 			wake_up_interruptible(&info->wait_send_queue);
517 		}
518 
519 		log_incoming(INFO, "data flags %d data_offset %d data_length %d remaining_data_length %d\n",
520 			     le16_to_cpu(data_transfer->flags),
521 			     le32_to_cpu(data_transfer->data_offset),
522 			     le32_to_cpu(data_transfer->data_length),
523 			     le32_to_cpu(data_transfer->remaining_data_length));
524 
525 		/* Send a KEEP_ALIVE response right away if requested */
526 		info->keep_alive_requested = KEEP_ALIVE_NONE;
527 		if (le16_to_cpu(data_transfer->flags) &
528 				SMB_DIRECT_RESPONSE_REQUESTED) {
529 			info->keep_alive_requested = KEEP_ALIVE_PENDING;
530 		}
531 
532 		return;
533 
534 	default:
535 		log_rdma_recv(ERR,
536 			"unexpected response type=%d\n", response->type);
537 	}
538 
539 error:
540 	put_receive_buffer(info, response);
541 }
542 
smbd_create_id(struct smbd_connection * info,struct sockaddr * dstaddr,int port)543 static struct rdma_cm_id *smbd_create_id(
544 		struct smbd_connection *info,
545 		struct sockaddr *dstaddr, int port)
546 {
547 	struct rdma_cm_id *id;
548 	int rc;
549 	__be16 *sport;
550 
551 	id = rdma_create_id(&init_net, smbd_conn_upcall, info,
552 		RDMA_PS_TCP, IB_QPT_RC);
553 	if (IS_ERR(id)) {
554 		rc = PTR_ERR(id);
555 		log_rdma_event(ERR, "rdma_create_id() failed %i\n", rc);
556 		return id;
557 	}
558 
559 	if (dstaddr->sa_family == AF_INET6)
560 		sport = &((struct sockaddr_in6 *)dstaddr)->sin6_port;
561 	else
562 		sport = &((struct sockaddr_in *)dstaddr)->sin_port;
563 
564 	*sport = htons(port);
565 
566 	init_completion(&info->ri_done);
567 	info->ri_rc = -ETIMEDOUT;
568 
569 	rc = rdma_resolve_addr(id, NULL, (struct sockaddr *)dstaddr,
570 		RDMA_RESOLVE_TIMEOUT);
571 	if (rc) {
572 		log_rdma_event(ERR, "rdma_resolve_addr() failed %i\n", rc);
573 		goto out;
574 	}
575 	rc = wait_for_completion_interruptible_timeout(
576 		&info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT));
577 	/* e.g. if interrupted returns -ERESTARTSYS */
578 	if (rc < 0) {
579 		log_rdma_event(ERR, "rdma_resolve_addr timeout rc: %i\n", rc);
580 		goto out;
581 	}
582 	rc = info->ri_rc;
583 	if (rc) {
584 		log_rdma_event(ERR, "rdma_resolve_addr() completed %i\n", rc);
585 		goto out;
586 	}
587 
588 	info->ri_rc = -ETIMEDOUT;
589 	rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
590 	if (rc) {
591 		log_rdma_event(ERR, "rdma_resolve_route() failed %i\n", rc);
592 		goto out;
593 	}
594 	rc = wait_for_completion_interruptible_timeout(
595 		&info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT));
596 	/* e.g. if interrupted returns -ERESTARTSYS */
597 	if (rc < 0)  {
598 		log_rdma_event(ERR, "rdma_resolve_addr timeout rc: %i\n", rc);
599 		goto out;
600 	}
601 	rc = info->ri_rc;
602 	if (rc) {
603 		log_rdma_event(ERR, "rdma_resolve_route() completed %i\n", rc);
604 		goto out;
605 	}
606 
607 	return id;
608 
609 out:
610 	rdma_destroy_id(id);
611 	return ERR_PTR(rc);
612 }
613 
614 /*
615  * Test if FRWR (Fast Registration Work Requests) is supported on the device
616  * This implementation requries FRWR on RDMA read/write
617  * return value: true if it is supported
618  */
frwr_is_supported(struct ib_device_attr * attrs)619 static bool frwr_is_supported(struct ib_device_attr *attrs)
620 {
621 	if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
622 		return false;
623 	if (attrs->max_fast_reg_page_list_len == 0)
624 		return false;
625 	return true;
626 }
627 
smbd_ia_open(struct smbd_connection * info,struct sockaddr * dstaddr,int port)628 static int smbd_ia_open(
629 		struct smbd_connection *info,
630 		struct sockaddr *dstaddr, int port)
631 {
632 	int rc;
633 
634 	info->id = smbd_create_id(info, dstaddr, port);
635 	if (IS_ERR(info->id)) {
636 		rc = PTR_ERR(info->id);
637 		goto out1;
638 	}
639 
640 	if (!frwr_is_supported(&info->id->device->attrs)) {
641 		log_rdma_event(ERR, "Fast Registration Work Requests (FRWR) is not supported\n");
642 		log_rdma_event(ERR, "Device capability flags = %llx max_fast_reg_page_list_len = %u\n",
643 			       info->id->device->attrs.device_cap_flags,
644 			       info->id->device->attrs.max_fast_reg_page_list_len);
645 		rc = -EPROTONOSUPPORT;
646 		goto out2;
647 	}
648 	info->max_frmr_depth = min_t(int,
649 		smbd_max_frmr_depth,
650 		info->id->device->attrs.max_fast_reg_page_list_len);
651 	info->mr_type = IB_MR_TYPE_MEM_REG;
652 	if (info->id->device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
653 		info->mr_type = IB_MR_TYPE_SG_GAPS;
654 
655 	info->pd = ib_alloc_pd(info->id->device, 0);
656 	if (IS_ERR(info->pd)) {
657 		rc = PTR_ERR(info->pd);
658 		log_rdma_event(ERR, "ib_alloc_pd() returned %d\n", rc);
659 		goto out2;
660 	}
661 
662 	return 0;
663 
664 out2:
665 	rdma_destroy_id(info->id);
666 	info->id = NULL;
667 
668 out1:
669 	return rc;
670 }
671 
672 /*
673  * Send a negotiation request message to the peer
674  * The negotiation procedure is in [MS-SMBD] 3.1.5.2 and 3.1.5.3
675  * After negotiation, the transport is connected and ready for
676  * carrying upper layer SMB payload
677  */
smbd_post_send_negotiate_req(struct smbd_connection * info)678 static int smbd_post_send_negotiate_req(struct smbd_connection *info)
679 {
680 	struct ib_send_wr send_wr;
681 	int rc = -ENOMEM;
682 	struct smbd_request *request;
683 	struct smbd_negotiate_req *packet;
684 
685 	request = mempool_alloc(info->request_mempool, GFP_KERNEL);
686 	if (!request)
687 		return rc;
688 
689 	request->info = info;
690 
691 	packet = smbd_request_payload(request);
692 	packet->min_version = cpu_to_le16(SMBD_V1);
693 	packet->max_version = cpu_to_le16(SMBD_V1);
694 	packet->reserved = 0;
695 	packet->credits_requested = cpu_to_le16(info->send_credit_target);
696 	packet->preferred_send_size = cpu_to_le32(info->max_send_size);
697 	packet->max_receive_size = cpu_to_le32(info->max_receive_size);
698 	packet->max_fragmented_size =
699 		cpu_to_le32(info->max_fragmented_recv_size);
700 
701 	request->num_sge = 1;
702 	request->sge[0].addr = ib_dma_map_single(
703 				info->id->device, (void *)packet,
704 				sizeof(*packet), DMA_TO_DEVICE);
705 	if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
706 		rc = -EIO;
707 		goto dma_mapping_failed;
708 	}
709 
710 	request->sge[0].length = sizeof(*packet);
711 	request->sge[0].lkey = info->pd->local_dma_lkey;
712 
713 	ib_dma_sync_single_for_device(
714 		info->id->device, request->sge[0].addr,
715 		request->sge[0].length, DMA_TO_DEVICE);
716 
717 	request->cqe.done = send_done;
718 
719 	send_wr.next = NULL;
720 	send_wr.wr_cqe = &request->cqe;
721 	send_wr.sg_list = request->sge;
722 	send_wr.num_sge = request->num_sge;
723 	send_wr.opcode = IB_WR_SEND;
724 	send_wr.send_flags = IB_SEND_SIGNALED;
725 
726 	log_rdma_send(INFO, "sge addr=%llx length=%x lkey=%x\n",
727 		request->sge[0].addr,
728 		request->sge[0].length, request->sge[0].lkey);
729 
730 	atomic_inc(&info->send_pending);
731 	rc = ib_post_send(info->id->qp, &send_wr, NULL);
732 	if (!rc)
733 		return 0;
734 
735 	/* if we reach here, post send failed */
736 	log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
737 	atomic_dec(&info->send_pending);
738 	ib_dma_unmap_single(info->id->device, request->sge[0].addr,
739 		request->sge[0].length, DMA_TO_DEVICE);
740 
741 	smbd_disconnect_rdma_connection(info);
742 
743 dma_mapping_failed:
744 	mempool_free(request, info->request_mempool);
745 	return rc;
746 }
747 
748 /*
749  * Extend the credits to remote peer
750  * This implements [MS-SMBD] 3.1.5.9
751  * The idea is that we should extend credits to remote peer as quickly as
752  * it's allowed, to maintain data flow. We allocate as much receive
753  * buffer as possible, and extend the receive credits to remote peer
754  * return value: the new credtis being granted.
755  */
manage_credits_prior_sending(struct smbd_connection * info)756 static int manage_credits_prior_sending(struct smbd_connection *info)
757 {
758 	int new_credits;
759 
760 	spin_lock(&info->lock_new_credits_offered);
761 	new_credits = info->new_credits_offered;
762 	info->new_credits_offered = 0;
763 	spin_unlock(&info->lock_new_credits_offered);
764 
765 	return new_credits;
766 }
767 
768 /*
769  * Check if we need to send a KEEP_ALIVE message
770  * The idle connection timer triggers a KEEP_ALIVE message when expires
771  * SMB_DIRECT_RESPONSE_REQUESTED is set in the message flag to have peer send
772  * back a response.
773  * return value:
774  * 1 if SMB_DIRECT_RESPONSE_REQUESTED needs to be set
775  * 0: otherwise
776  */
manage_keep_alive_before_sending(struct smbd_connection * info)777 static int manage_keep_alive_before_sending(struct smbd_connection *info)
778 {
779 	if (info->keep_alive_requested == KEEP_ALIVE_PENDING) {
780 		info->keep_alive_requested = KEEP_ALIVE_SENT;
781 		return 1;
782 	}
783 	return 0;
784 }
785 
786 /* Post the send request */
smbd_post_send(struct smbd_connection * info,struct smbd_request * request)787 static int smbd_post_send(struct smbd_connection *info,
788 		struct smbd_request *request)
789 {
790 	struct ib_send_wr send_wr;
791 	int rc, i;
792 
793 	for (i = 0; i < request->num_sge; i++) {
794 		log_rdma_send(INFO,
795 			"rdma_request sge[%d] addr=%llu length=%u\n",
796 			i, request->sge[i].addr, request->sge[i].length);
797 		ib_dma_sync_single_for_device(
798 			info->id->device,
799 			request->sge[i].addr,
800 			request->sge[i].length,
801 			DMA_TO_DEVICE);
802 	}
803 
804 	request->cqe.done = send_done;
805 
806 	send_wr.next = NULL;
807 	send_wr.wr_cqe = &request->cqe;
808 	send_wr.sg_list = request->sge;
809 	send_wr.num_sge = request->num_sge;
810 	send_wr.opcode = IB_WR_SEND;
811 	send_wr.send_flags = IB_SEND_SIGNALED;
812 
813 	rc = ib_post_send(info->id->qp, &send_wr, NULL);
814 	if (rc) {
815 		log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
816 		smbd_disconnect_rdma_connection(info);
817 		rc = -EAGAIN;
818 	} else
819 		/* Reset timer for idle connection after packet is sent */
820 		mod_delayed_work(info->workqueue, &info->idle_timer_work,
821 			info->keep_alive_interval*HZ);
822 
823 	return rc;
824 }
825 
smbd_post_send_sgl(struct smbd_connection * info,struct scatterlist * sgl,int data_length,int remaining_data_length)826 static int smbd_post_send_sgl(struct smbd_connection *info,
827 	struct scatterlist *sgl, int data_length, int remaining_data_length)
828 {
829 	int num_sgs;
830 	int i, rc;
831 	int header_length;
832 	struct smbd_request *request;
833 	struct smbd_data_transfer *packet;
834 	int new_credits;
835 	struct scatterlist *sg;
836 
837 wait_credit:
838 	/* Wait for send credits. A SMBD packet needs one credit */
839 	rc = wait_event_interruptible(info->wait_send_queue,
840 		atomic_read(&info->send_credits) > 0 ||
841 		info->transport_status != SMBD_CONNECTED);
842 	if (rc)
843 		goto err_wait_credit;
844 
845 	if (info->transport_status != SMBD_CONNECTED) {
846 		log_outgoing(ERR, "disconnected not sending on wait_credit\n");
847 		rc = -EAGAIN;
848 		goto err_wait_credit;
849 	}
850 	if (unlikely(atomic_dec_return(&info->send_credits) < 0)) {
851 		atomic_inc(&info->send_credits);
852 		goto wait_credit;
853 	}
854 
855 wait_send_queue:
856 	wait_event(info->wait_post_send,
857 		atomic_read(&info->send_pending) < info->send_credit_target ||
858 		info->transport_status != SMBD_CONNECTED);
859 
860 	if (info->transport_status != SMBD_CONNECTED) {
861 		log_outgoing(ERR, "disconnected not sending on wait_send_queue\n");
862 		rc = -EAGAIN;
863 		goto err_wait_send_queue;
864 	}
865 
866 	if (unlikely(atomic_inc_return(&info->send_pending) >
867 				info->send_credit_target)) {
868 		atomic_dec(&info->send_pending);
869 		goto wait_send_queue;
870 	}
871 
872 	request = mempool_alloc(info->request_mempool, GFP_KERNEL);
873 	if (!request) {
874 		rc = -ENOMEM;
875 		goto err_alloc;
876 	}
877 
878 	request->info = info;
879 
880 	/* Fill in the packet header */
881 	packet = smbd_request_payload(request);
882 	packet->credits_requested = cpu_to_le16(info->send_credit_target);
883 
884 	new_credits = manage_credits_prior_sending(info);
885 	atomic_add(new_credits, &info->receive_credits);
886 	packet->credits_granted = cpu_to_le16(new_credits);
887 
888 	info->send_immediate = false;
889 
890 	packet->flags = 0;
891 	if (manage_keep_alive_before_sending(info))
892 		packet->flags |= cpu_to_le16(SMB_DIRECT_RESPONSE_REQUESTED);
893 
894 	packet->reserved = 0;
895 	if (!data_length)
896 		packet->data_offset = 0;
897 	else
898 		packet->data_offset = cpu_to_le32(24);
899 	packet->data_length = cpu_to_le32(data_length);
900 	packet->remaining_data_length = cpu_to_le32(remaining_data_length);
901 	packet->padding = 0;
902 
903 	log_outgoing(INFO, "credits_requested=%d credits_granted=%d data_offset=%d data_length=%d remaining_data_length=%d\n",
904 		     le16_to_cpu(packet->credits_requested),
905 		     le16_to_cpu(packet->credits_granted),
906 		     le32_to_cpu(packet->data_offset),
907 		     le32_to_cpu(packet->data_length),
908 		     le32_to_cpu(packet->remaining_data_length));
909 
910 	/* Map the packet to DMA */
911 	header_length = sizeof(struct smbd_data_transfer);
912 	/* If this is a packet without payload, don't send padding */
913 	if (!data_length)
914 		header_length = offsetof(struct smbd_data_transfer, padding);
915 
916 	request->num_sge = 1;
917 	request->sge[0].addr = ib_dma_map_single(info->id->device,
918 						 (void *)packet,
919 						 header_length,
920 						 DMA_TO_DEVICE);
921 	if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
922 		rc = -EIO;
923 		request->sge[0].addr = 0;
924 		goto err_dma;
925 	}
926 
927 	request->sge[0].length = header_length;
928 	request->sge[0].lkey = info->pd->local_dma_lkey;
929 
930 	/* Fill in the packet data payload */
931 	num_sgs = sgl ? sg_nents(sgl) : 0;
932 	for_each_sg(sgl, sg, num_sgs, i) {
933 		request->sge[i+1].addr =
934 			ib_dma_map_page(info->id->device, sg_page(sg),
935 			       sg->offset, sg->length, DMA_TO_DEVICE);
936 		if (ib_dma_mapping_error(
937 				info->id->device, request->sge[i+1].addr)) {
938 			rc = -EIO;
939 			request->sge[i+1].addr = 0;
940 			goto err_dma;
941 		}
942 		request->sge[i+1].length = sg->length;
943 		request->sge[i+1].lkey = info->pd->local_dma_lkey;
944 		request->num_sge++;
945 	}
946 
947 	rc = smbd_post_send(info, request);
948 	if (!rc)
949 		return 0;
950 
951 err_dma:
952 	for (i = 0; i < request->num_sge; i++)
953 		if (request->sge[i].addr)
954 			ib_dma_unmap_single(info->id->device,
955 					    request->sge[i].addr,
956 					    request->sge[i].length,
957 					    DMA_TO_DEVICE);
958 	mempool_free(request, info->request_mempool);
959 
960 	/* roll back receive credits and credits to be offered */
961 	spin_lock(&info->lock_new_credits_offered);
962 	info->new_credits_offered += new_credits;
963 	spin_unlock(&info->lock_new_credits_offered);
964 	atomic_sub(new_credits, &info->receive_credits);
965 
966 err_alloc:
967 	if (atomic_dec_and_test(&info->send_pending))
968 		wake_up(&info->wait_send_pending);
969 
970 err_wait_send_queue:
971 	/* roll back send credits and pending */
972 	atomic_inc(&info->send_credits);
973 
974 err_wait_credit:
975 	return rc;
976 }
977 
978 /*
979  * Send a page
980  * page: the page to send
981  * offset: offset in the page to send
982  * size: length in the page to send
983  * remaining_data_length: remaining data to send in this payload
984  */
smbd_post_send_page(struct smbd_connection * info,struct page * page,unsigned long offset,size_t size,int remaining_data_length)985 static int smbd_post_send_page(struct smbd_connection *info, struct page *page,
986 		unsigned long offset, size_t size, int remaining_data_length)
987 {
988 	struct scatterlist sgl;
989 
990 	sg_init_table(&sgl, 1);
991 	sg_set_page(&sgl, page, size, offset);
992 
993 	return smbd_post_send_sgl(info, &sgl, size, remaining_data_length);
994 }
995 
996 /*
997  * Send an empty message
998  * Empty message is used to extend credits to peer to for keep live
999  * while there is no upper layer payload to send at the time
1000  */
smbd_post_send_empty(struct smbd_connection * info)1001 static int smbd_post_send_empty(struct smbd_connection *info)
1002 {
1003 	info->count_send_empty++;
1004 	return smbd_post_send_sgl(info, NULL, 0, 0);
1005 }
1006 
1007 /*
1008  * Send a data buffer
1009  * iov: the iov array describing the data buffers
1010  * n_vec: number of iov array
1011  * remaining_data_length: remaining data to send following this packet
1012  * in segmented SMBD packet
1013  */
smbd_post_send_data(struct smbd_connection * info,struct kvec * iov,int n_vec,int remaining_data_length)1014 static int smbd_post_send_data(
1015 	struct smbd_connection *info, struct kvec *iov, int n_vec,
1016 	int remaining_data_length)
1017 {
1018 	int i;
1019 	u32 data_length = 0;
1020 	struct scatterlist sgl[SMBDIRECT_MAX_SGE];
1021 
1022 	if (n_vec > SMBDIRECT_MAX_SGE) {
1023 		cifs_dbg(VFS, "Can't fit data to SGL, n_vec=%d\n", n_vec);
1024 		return -EINVAL;
1025 	}
1026 
1027 	sg_init_table(sgl, n_vec);
1028 	for (i = 0; i < n_vec; i++) {
1029 		data_length += iov[i].iov_len;
1030 		sg_set_buf(&sgl[i], iov[i].iov_base, iov[i].iov_len);
1031 	}
1032 
1033 	return smbd_post_send_sgl(info, sgl, data_length, remaining_data_length);
1034 }
1035 
1036 /*
1037  * Post a receive request to the transport
1038  * The remote peer can only send data when a receive request is posted
1039  * The interaction is controlled by send/receive credit system
1040  */
smbd_post_recv(struct smbd_connection * info,struct smbd_response * response)1041 static int smbd_post_recv(
1042 		struct smbd_connection *info, struct smbd_response *response)
1043 {
1044 	struct ib_recv_wr recv_wr;
1045 	int rc = -EIO;
1046 
1047 	response->sge.addr = ib_dma_map_single(
1048 				info->id->device, response->packet,
1049 				info->max_receive_size, DMA_FROM_DEVICE);
1050 	if (ib_dma_mapping_error(info->id->device, response->sge.addr))
1051 		return rc;
1052 
1053 	response->sge.length = info->max_receive_size;
1054 	response->sge.lkey = info->pd->local_dma_lkey;
1055 
1056 	response->cqe.done = recv_done;
1057 
1058 	recv_wr.wr_cqe = &response->cqe;
1059 	recv_wr.next = NULL;
1060 	recv_wr.sg_list = &response->sge;
1061 	recv_wr.num_sge = 1;
1062 
1063 	rc = ib_post_recv(info->id->qp, &recv_wr, NULL);
1064 	if (rc) {
1065 		ib_dma_unmap_single(info->id->device, response->sge.addr,
1066 				    response->sge.length, DMA_FROM_DEVICE);
1067 		smbd_disconnect_rdma_connection(info);
1068 		log_rdma_recv(ERR, "ib_post_recv failed rc=%d\n", rc);
1069 	}
1070 
1071 	return rc;
1072 }
1073 
1074 /* Perform SMBD negotiate according to [MS-SMBD] 3.1.5.2 */
smbd_negotiate(struct smbd_connection * info)1075 static int smbd_negotiate(struct smbd_connection *info)
1076 {
1077 	int rc;
1078 	struct smbd_response *response = get_receive_buffer(info);
1079 
1080 	response->type = SMBD_NEGOTIATE_RESP;
1081 	rc = smbd_post_recv(info, response);
1082 	log_rdma_event(INFO, "smbd_post_recv rc=%d iov.addr=%llx iov.length=%x iov.lkey=%x\n",
1083 		       rc, response->sge.addr,
1084 		       response->sge.length, response->sge.lkey);
1085 	if (rc)
1086 		return rc;
1087 
1088 	init_completion(&info->negotiate_completion);
1089 	info->negotiate_done = false;
1090 	rc = smbd_post_send_negotiate_req(info);
1091 	if (rc)
1092 		return rc;
1093 
1094 	rc = wait_for_completion_interruptible_timeout(
1095 		&info->negotiate_completion, SMBD_NEGOTIATE_TIMEOUT * HZ);
1096 	log_rdma_event(INFO, "wait_for_completion_timeout rc=%d\n", rc);
1097 
1098 	if (info->negotiate_done)
1099 		return 0;
1100 
1101 	if (rc == 0)
1102 		rc = -ETIMEDOUT;
1103 	else if (rc == -ERESTARTSYS)
1104 		rc = -EINTR;
1105 	else
1106 		rc = -ENOTCONN;
1107 
1108 	return rc;
1109 }
1110 
put_empty_packet(struct smbd_connection * info,struct smbd_response * response)1111 static void put_empty_packet(
1112 		struct smbd_connection *info, struct smbd_response *response)
1113 {
1114 	spin_lock(&info->empty_packet_queue_lock);
1115 	list_add_tail(&response->list, &info->empty_packet_queue);
1116 	info->count_empty_packet_queue++;
1117 	spin_unlock(&info->empty_packet_queue_lock);
1118 
1119 	queue_work(info->workqueue, &info->post_send_credits_work);
1120 }
1121 
1122 /*
1123  * Implement Connection.FragmentReassemblyBuffer defined in [MS-SMBD] 3.1.1.1
1124  * This is a queue for reassembling upper layer payload and present to upper
1125  * layer. All the inncoming payload go to the reassembly queue, regardless of
1126  * if reassembly is required. The uuper layer code reads from the queue for all
1127  * incoming payloads.
1128  * Put a received packet to the reassembly queue
1129  * response: the packet received
1130  * data_length: the size of payload in this packet
1131  */
enqueue_reassembly(struct smbd_connection * info,struct smbd_response * response,int data_length)1132 static void enqueue_reassembly(
1133 	struct smbd_connection *info,
1134 	struct smbd_response *response,
1135 	int data_length)
1136 {
1137 	spin_lock(&info->reassembly_queue_lock);
1138 	list_add_tail(&response->list, &info->reassembly_queue);
1139 	info->reassembly_queue_length++;
1140 	/*
1141 	 * Make sure reassembly_data_length is updated after list and
1142 	 * reassembly_queue_length are updated. On the dequeue side
1143 	 * reassembly_data_length is checked without a lock to determine
1144 	 * if reassembly_queue_length and list is up to date
1145 	 */
1146 	virt_wmb();
1147 	info->reassembly_data_length += data_length;
1148 	spin_unlock(&info->reassembly_queue_lock);
1149 	info->count_reassembly_queue++;
1150 	info->count_enqueue_reassembly_queue++;
1151 }
1152 
1153 /*
1154  * Get the first entry at the front of reassembly queue
1155  * Caller is responsible for locking
1156  * return value: the first entry if any, NULL if queue is empty
1157  */
_get_first_reassembly(struct smbd_connection * info)1158 static struct smbd_response *_get_first_reassembly(struct smbd_connection *info)
1159 {
1160 	struct smbd_response *ret = NULL;
1161 
1162 	if (!list_empty(&info->reassembly_queue)) {
1163 		ret = list_first_entry(
1164 			&info->reassembly_queue,
1165 			struct smbd_response, list);
1166 	}
1167 	return ret;
1168 }
1169 
get_empty_queue_buffer(struct smbd_connection * info)1170 static struct smbd_response *get_empty_queue_buffer(
1171 		struct smbd_connection *info)
1172 {
1173 	struct smbd_response *ret = NULL;
1174 	unsigned long flags;
1175 
1176 	spin_lock_irqsave(&info->empty_packet_queue_lock, flags);
1177 	if (!list_empty(&info->empty_packet_queue)) {
1178 		ret = list_first_entry(
1179 			&info->empty_packet_queue,
1180 			struct smbd_response, list);
1181 		list_del(&ret->list);
1182 		info->count_empty_packet_queue--;
1183 	}
1184 	spin_unlock_irqrestore(&info->empty_packet_queue_lock, flags);
1185 
1186 	return ret;
1187 }
1188 
1189 /*
1190  * Get a receive buffer
1191  * For each remote send, we need to post a receive. The receive buffers are
1192  * pre-allocated in advance.
1193  * return value: the receive buffer, NULL if none is available
1194  */
get_receive_buffer(struct smbd_connection * info)1195 static struct smbd_response *get_receive_buffer(struct smbd_connection *info)
1196 {
1197 	struct smbd_response *ret = NULL;
1198 	unsigned long flags;
1199 
1200 	spin_lock_irqsave(&info->receive_queue_lock, flags);
1201 	if (!list_empty(&info->receive_queue)) {
1202 		ret = list_first_entry(
1203 			&info->receive_queue,
1204 			struct smbd_response, list);
1205 		list_del(&ret->list);
1206 		info->count_receive_queue--;
1207 		info->count_get_receive_buffer++;
1208 	}
1209 	spin_unlock_irqrestore(&info->receive_queue_lock, flags);
1210 
1211 	return ret;
1212 }
1213 
1214 /*
1215  * Return a receive buffer
1216  * Upon returning of a receive buffer, we can post new receive and extend
1217  * more receive credits to remote peer. This is done immediately after a
1218  * receive buffer is returned.
1219  */
put_receive_buffer(struct smbd_connection * info,struct smbd_response * response)1220 static void put_receive_buffer(
1221 	struct smbd_connection *info, struct smbd_response *response)
1222 {
1223 	unsigned long flags;
1224 
1225 	ib_dma_unmap_single(info->id->device, response->sge.addr,
1226 		response->sge.length, DMA_FROM_DEVICE);
1227 
1228 	spin_lock_irqsave(&info->receive_queue_lock, flags);
1229 	list_add_tail(&response->list, &info->receive_queue);
1230 	info->count_receive_queue++;
1231 	info->count_put_receive_buffer++;
1232 	spin_unlock_irqrestore(&info->receive_queue_lock, flags);
1233 
1234 	queue_work(info->workqueue, &info->post_send_credits_work);
1235 }
1236 
1237 /* Preallocate all receive buffer on transport establishment */
allocate_receive_buffers(struct smbd_connection * info,int num_buf)1238 static int allocate_receive_buffers(struct smbd_connection *info, int num_buf)
1239 {
1240 	int i;
1241 	struct smbd_response *response;
1242 
1243 	INIT_LIST_HEAD(&info->reassembly_queue);
1244 	spin_lock_init(&info->reassembly_queue_lock);
1245 	info->reassembly_data_length = 0;
1246 	info->reassembly_queue_length = 0;
1247 
1248 	INIT_LIST_HEAD(&info->receive_queue);
1249 	spin_lock_init(&info->receive_queue_lock);
1250 	info->count_receive_queue = 0;
1251 
1252 	INIT_LIST_HEAD(&info->empty_packet_queue);
1253 	spin_lock_init(&info->empty_packet_queue_lock);
1254 	info->count_empty_packet_queue = 0;
1255 
1256 	init_waitqueue_head(&info->wait_receive_queues);
1257 
1258 	for (i = 0; i < num_buf; i++) {
1259 		response = mempool_alloc(info->response_mempool, GFP_KERNEL);
1260 		if (!response)
1261 			goto allocate_failed;
1262 
1263 		response->info = info;
1264 		list_add_tail(&response->list, &info->receive_queue);
1265 		info->count_receive_queue++;
1266 	}
1267 
1268 	return 0;
1269 
1270 allocate_failed:
1271 	while (!list_empty(&info->receive_queue)) {
1272 		response = list_first_entry(
1273 				&info->receive_queue,
1274 				struct smbd_response, list);
1275 		list_del(&response->list);
1276 		info->count_receive_queue--;
1277 
1278 		mempool_free(response, info->response_mempool);
1279 	}
1280 	return -ENOMEM;
1281 }
1282 
destroy_receive_buffers(struct smbd_connection * info)1283 static void destroy_receive_buffers(struct smbd_connection *info)
1284 {
1285 	struct smbd_response *response;
1286 
1287 	while ((response = get_receive_buffer(info)))
1288 		mempool_free(response, info->response_mempool);
1289 
1290 	while ((response = get_empty_queue_buffer(info)))
1291 		mempool_free(response, info->response_mempool);
1292 }
1293 
1294 /* Implement idle connection timer [MS-SMBD] 3.1.6.2 */
idle_connection_timer(struct work_struct * work)1295 static void idle_connection_timer(struct work_struct *work)
1296 {
1297 	struct smbd_connection *info = container_of(
1298 					work, struct smbd_connection,
1299 					idle_timer_work.work);
1300 
1301 	if (info->keep_alive_requested != KEEP_ALIVE_NONE) {
1302 		log_keep_alive(ERR,
1303 			"error status info->keep_alive_requested=%d\n",
1304 			info->keep_alive_requested);
1305 		smbd_disconnect_rdma_connection(info);
1306 		return;
1307 	}
1308 
1309 	log_keep_alive(INFO, "about to send an empty idle message\n");
1310 	smbd_post_send_empty(info);
1311 
1312 	/* Setup the next idle timeout work */
1313 	queue_delayed_work(info->workqueue, &info->idle_timer_work,
1314 			info->keep_alive_interval*HZ);
1315 }
1316 
1317 /*
1318  * Destroy the transport and related RDMA and memory resources
1319  * Need to go through all the pending counters and make sure on one is using
1320  * the transport while it is destroyed
1321  */
smbd_destroy(struct TCP_Server_Info * server)1322 void smbd_destroy(struct TCP_Server_Info *server)
1323 {
1324 	struct smbd_connection *info = server->smbd_conn;
1325 	struct smbd_response *response;
1326 	unsigned long flags;
1327 
1328 	if (!info) {
1329 		log_rdma_event(INFO, "rdma session already destroyed\n");
1330 		return;
1331 	}
1332 
1333 	log_rdma_event(INFO, "destroying rdma session\n");
1334 	if (info->transport_status != SMBD_DISCONNECTED) {
1335 		rdma_disconnect(server->smbd_conn->id);
1336 		log_rdma_event(INFO, "wait for transport being disconnected\n");
1337 		wait_event_interruptible(
1338 			info->disconn_wait,
1339 			info->transport_status == SMBD_DISCONNECTED);
1340 	}
1341 
1342 	log_rdma_event(INFO, "destroying qp\n");
1343 	ib_drain_qp(info->id->qp);
1344 	rdma_destroy_qp(info->id);
1345 
1346 	log_rdma_event(INFO, "cancelling idle timer\n");
1347 	cancel_delayed_work_sync(&info->idle_timer_work);
1348 
1349 	log_rdma_event(INFO, "wait for all send posted to IB to finish\n");
1350 	wait_event(info->wait_send_pending,
1351 		atomic_read(&info->send_pending) == 0);
1352 
1353 	/* It's not posssible for upper layer to get to reassembly */
1354 	log_rdma_event(INFO, "drain the reassembly queue\n");
1355 	do {
1356 		spin_lock_irqsave(&info->reassembly_queue_lock, flags);
1357 		response = _get_first_reassembly(info);
1358 		if (response) {
1359 			list_del(&response->list);
1360 			spin_unlock_irqrestore(
1361 				&info->reassembly_queue_lock, flags);
1362 			put_receive_buffer(info, response);
1363 		} else
1364 			spin_unlock_irqrestore(
1365 				&info->reassembly_queue_lock, flags);
1366 	} while (response);
1367 	info->reassembly_data_length = 0;
1368 
1369 	log_rdma_event(INFO, "free receive buffers\n");
1370 	wait_event(info->wait_receive_queues,
1371 		info->count_receive_queue + info->count_empty_packet_queue
1372 			== info->receive_credit_max);
1373 	destroy_receive_buffers(info);
1374 
1375 	/*
1376 	 * For performance reasons, memory registration and deregistration
1377 	 * are not locked by srv_mutex. It is possible some processes are
1378 	 * blocked on transport srv_mutex while holding memory registration.
1379 	 * Release the transport srv_mutex to allow them to hit the failure
1380 	 * path when sending data, and then release memory registartions.
1381 	 */
1382 	log_rdma_event(INFO, "freeing mr list\n");
1383 	wake_up_interruptible_all(&info->wait_mr);
1384 	while (atomic_read(&info->mr_used_count)) {
1385 		mutex_unlock(&server->srv_mutex);
1386 		msleep(1000);
1387 		mutex_lock(&server->srv_mutex);
1388 	}
1389 	destroy_mr_list(info);
1390 
1391 	ib_free_cq(info->send_cq);
1392 	ib_free_cq(info->recv_cq);
1393 	ib_dealloc_pd(info->pd);
1394 	rdma_destroy_id(info->id);
1395 
1396 	/* free mempools */
1397 	mempool_destroy(info->request_mempool);
1398 	kmem_cache_destroy(info->request_cache);
1399 
1400 	mempool_destroy(info->response_mempool);
1401 	kmem_cache_destroy(info->response_cache);
1402 
1403 	info->transport_status = SMBD_DESTROYED;
1404 
1405 	destroy_workqueue(info->workqueue);
1406 	log_rdma_event(INFO,  "rdma session destroyed\n");
1407 	kfree(info);
1408 	server->smbd_conn = NULL;
1409 }
1410 
1411 /*
1412  * Reconnect this SMBD connection, called from upper layer
1413  * return value: 0 on success, or actual error code
1414  */
smbd_reconnect(struct TCP_Server_Info * server)1415 int smbd_reconnect(struct TCP_Server_Info *server)
1416 {
1417 	log_rdma_event(INFO, "reconnecting rdma session\n");
1418 
1419 	if (!server->smbd_conn) {
1420 		log_rdma_event(INFO, "rdma session already destroyed\n");
1421 		goto create_conn;
1422 	}
1423 
1424 	/*
1425 	 * This is possible if transport is disconnected and we haven't received
1426 	 * notification from RDMA, but upper layer has detected timeout
1427 	 */
1428 	if (server->smbd_conn->transport_status == SMBD_CONNECTED) {
1429 		log_rdma_event(INFO, "disconnecting transport\n");
1430 		smbd_destroy(server);
1431 	}
1432 
1433 create_conn:
1434 	log_rdma_event(INFO, "creating rdma session\n");
1435 	server->smbd_conn = smbd_get_connection(
1436 		server, (struct sockaddr *) &server->dstaddr);
1437 
1438 	if (server->smbd_conn)
1439 		cifs_dbg(VFS, "RDMA transport re-established\n");
1440 
1441 	return server->smbd_conn ? 0 : -ENOENT;
1442 }
1443 
destroy_caches_and_workqueue(struct smbd_connection * info)1444 static void destroy_caches_and_workqueue(struct smbd_connection *info)
1445 {
1446 	destroy_receive_buffers(info);
1447 	destroy_workqueue(info->workqueue);
1448 	mempool_destroy(info->response_mempool);
1449 	kmem_cache_destroy(info->response_cache);
1450 	mempool_destroy(info->request_mempool);
1451 	kmem_cache_destroy(info->request_cache);
1452 }
1453 
1454 #define MAX_NAME_LEN	80
allocate_caches_and_workqueue(struct smbd_connection * info)1455 static int allocate_caches_and_workqueue(struct smbd_connection *info)
1456 {
1457 	char name[MAX_NAME_LEN];
1458 	int rc;
1459 
1460 	scnprintf(name, MAX_NAME_LEN, "smbd_request_%p", info);
1461 	info->request_cache =
1462 		kmem_cache_create(
1463 			name,
1464 			sizeof(struct smbd_request) +
1465 				sizeof(struct smbd_data_transfer),
1466 			0, SLAB_HWCACHE_ALIGN, NULL);
1467 	if (!info->request_cache)
1468 		return -ENOMEM;
1469 
1470 	info->request_mempool =
1471 		mempool_create(info->send_credit_target, mempool_alloc_slab,
1472 			mempool_free_slab, info->request_cache);
1473 	if (!info->request_mempool)
1474 		goto out1;
1475 
1476 	scnprintf(name, MAX_NAME_LEN, "smbd_response_%p", info);
1477 	info->response_cache =
1478 		kmem_cache_create(
1479 			name,
1480 			sizeof(struct smbd_response) +
1481 				info->max_receive_size,
1482 			0, SLAB_HWCACHE_ALIGN, NULL);
1483 	if (!info->response_cache)
1484 		goto out2;
1485 
1486 	info->response_mempool =
1487 		mempool_create(info->receive_credit_max, mempool_alloc_slab,
1488 		       mempool_free_slab, info->response_cache);
1489 	if (!info->response_mempool)
1490 		goto out3;
1491 
1492 	scnprintf(name, MAX_NAME_LEN, "smbd_%p", info);
1493 	info->workqueue = create_workqueue(name);
1494 	if (!info->workqueue)
1495 		goto out4;
1496 
1497 	rc = allocate_receive_buffers(info, info->receive_credit_max);
1498 	if (rc) {
1499 		log_rdma_event(ERR, "failed to allocate receive buffers\n");
1500 		goto out5;
1501 	}
1502 
1503 	return 0;
1504 
1505 out5:
1506 	destroy_workqueue(info->workqueue);
1507 out4:
1508 	mempool_destroy(info->response_mempool);
1509 out3:
1510 	kmem_cache_destroy(info->response_cache);
1511 out2:
1512 	mempool_destroy(info->request_mempool);
1513 out1:
1514 	kmem_cache_destroy(info->request_cache);
1515 	return -ENOMEM;
1516 }
1517 
1518 /* Create a SMBD connection, called by upper layer */
_smbd_get_connection(struct TCP_Server_Info * server,struct sockaddr * dstaddr,int port)1519 static struct smbd_connection *_smbd_get_connection(
1520 	struct TCP_Server_Info *server, struct sockaddr *dstaddr, int port)
1521 {
1522 	int rc;
1523 	struct smbd_connection *info;
1524 	struct rdma_conn_param conn_param;
1525 	struct ib_qp_init_attr qp_attr;
1526 	struct sockaddr_in *addr_in = (struct sockaddr_in *) dstaddr;
1527 	struct ib_port_immutable port_immutable;
1528 	u32 ird_ord_hdr[2];
1529 
1530 	info = kzalloc(sizeof(struct smbd_connection), GFP_KERNEL);
1531 	if (!info)
1532 		return NULL;
1533 
1534 	info->transport_status = SMBD_CONNECTING;
1535 	rc = smbd_ia_open(info, dstaddr, port);
1536 	if (rc) {
1537 		log_rdma_event(INFO, "smbd_ia_open rc=%d\n", rc);
1538 		goto create_id_failed;
1539 	}
1540 
1541 	if (smbd_send_credit_target > info->id->device->attrs.max_cqe ||
1542 	    smbd_send_credit_target > info->id->device->attrs.max_qp_wr) {
1543 		log_rdma_event(ERR, "consider lowering send_credit_target = %d. Possible CQE overrun, device reporting max_cpe %d max_qp_wr %d\n",
1544 			       smbd_send_credit_target,
1545 			       info->id->device->attrs.max_cqe,
1546 			       info->id->device->attrs.max_qp_wr);
1547 		goto config_failed;
1548 	}
1549 
1550 	if (smbd_receive_credit_max > info->id->device->attrs.max_cqe ||
1551 	    smbd_receive_credit_max > info->id->device->attrs.max_qp_wr) {
1552 		log_rdma_event(ERR, "consider lowering receive_credit_max = %d. Possible CQE overrun, device reporting max_cpe %d max_qp_wr %d\n",
1553 			       smbd_receive_credit_max,
1554 			       info->id->device->attrs.max_cqe,
1555 			       info->id->device->attrs.max_qp_wr);
1556 		goto config_failed;
1557 	}
1558 
1559 	info->receive_credit_max = smbd_receive_credit_max;
1560 	info->send_credit_target = smbd_send_credit_target;
1561 	info->max_send_size = smbd_max_send_size;
1562 	info->max_fragmented_recv_size = smbd_max_fragmented_recv_size;
1563 	info->max_receive_size = smbd_max_receive_size;
1564 	info->keep_alive_interval = smbd_keep_alive_interval;
1565 
1566 	if (info->id->device->attrs.max_send_sge < SMBDIRECT_MAX_SGE) {
1567 		log_rdma_event(ERR,
1568 			"warning: device max_send_sge = %d too small\n",
1569 			info->id->device->attrs.max_send_sge);
1570 		log_rdma_event(ERR, "Queue Pair creation may fail\n");
1571 	}
1572 	if (info->id->device->attrs.max_recv_sge < SMBDIRECT_MAX_SGE) {
1573 		log_rdma_event(ERR,
1574 			"warning: device max_recv_sge = %d too small\n",
1575 			info->id->device->attrs.max_recv_sge);
1576 		log_rdma_event(ERR, "Queue Pair creation may fail\n");
1577 	}
1578 
1579 	info->send_cq = NULL;
1580 	info->recv_cq = NULL;
1581 	info->send_cq =
1582 		ib_alloc_cq_any(info->id->device, info,
1583 				info->send_credit_target, IB_POLL_SOFTIRQ);
1584 	if (IS_ERR(info->send_cq)) {
1585 		info->send_cq = NULL;
1586 		goto alloc_cq_failed;
1587 	}
1588 
1589 	info->recv_cq =
1590 		ib_alloc_cq_any(info->id->device, info,
1591 				info->receive_credit_max, IB_POLL_SOFTIRQ);
1592 	if (IS_ERR(info->recv_cq)) {
1593 		info->recv_cq = NULL;
1594 		goto alloc_cq_failed;
1595 	}
1596 
1597 	memset(&qp_attr, 0, sizeof(qp_attr));
1598 	qp_attr.event_handler = smbd_qp_async_error_upcall;
1599 	qp_attr.qp_context = info;
1600 	qp_attr.cap.max_send_wr = info->send_credit_target;
1601 	qp_attr.cap.max_recv_wr = info->receive_credit_max;
1602 	qp_attr.cap.max_send_sge = SMBDIRECT_MAX_SGE;
1603 	qp_attr.cap.max_recv_sge = SMBDIRECT_MAX_SGE;
1604 	qp_attr.cap.max_inline_data = 0;
1605 	qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
1606 	qp_attr.qp_type = IB_QPT_RC;
1607 	qp_attr.send_cq = info->send_cq;
1608 	qp_attr.recv_cq = info->recv_cq;
1609 	qp_attr.port_num = ~0;
1610 
1611 	rc = rdma_create_qp(info->id, info->pd, &qp_attr);
1612 	if (rc) {
1613 		log_rdma_event(ERR, "rdma_create_qp failed %i\n", rc);
1614 		goto create_qp_failed;
1615 	}
1616 
1617 	memset(&conn_param, 0, sizeof(conn_param));
1618 	conn_param.initiator_depth = 0;
1619 
1620 	conn_param.responder_resources =
1621 		info->id->device->attrs.max_qp_rd_atom
1622 			< SMBD_CM_RESPONDER_RESOURCES ?
1623 		info->id->device->attrs.max_qp_rd_atom :
1624 		SMBD_CM_RESPONDER_RESOURCES;
1625 	info->responder_resources = conn_param.responder_resources;
1626 	log_rdma_mr(INFO, "responder_resources=%d\n",
1627 		info->responder_resources);
1628 
1629 	/* Need to send IRD/ORD in private data for iWARP */
1630 	info->id->device->ops.get_port_immutable(
1631 		info->id->device, info->id->port_num, &port_immutable);
1632 	if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) {
1633 		ird_ord_hdr[0] = info->responder_resources;
1634 		ird_ord_hdr[1] = 1;
1635 		conn_param.private_data = ird_ord_hdr;
1636 		conn_param.private_data_len = sizeof(ird_ord_hdr);
1637 	} else {
1638 		conn_param.private_data = NULL;
1639 		conn_param.private_data_len = 0;
1640 	}
1641 
1642 	conn_param.retry_count = SMBD_CM_RETRY;
1643 	conn_param.rnr_retry_count = SMBD_CM_RNR_RETRY;
1644 	conn_param.flow_control = 0;
1645 
1646 	log_rdma_event(INFO, "connecting to IP %pI4 port %d\n",
1647 		&addr_in->sin_addr, port);
1648 
1649 	init_waitqueue_head(&info->conn_wait);
1650 	init_waitqueue_head(&info->disconn_wait);
1651 	init_waitqueue_head(&info->wait_reassembly_queue);
1652 	rc = rdma_connect(info->id, &conn_param);
1653 	if (rc) {
1654 		log_rdma_event(ERR, "rdma_connect() failed with %i\n", rc);
1655 		goto rdma_connect_failed;
1656 	}
1657 
1658 	wait_event_interruptible(
1659 		info->conn_wait, info->transport_status != SMBD_CONNECTING);
1660 
1661 	if (info->transport_status != SMBD_CONNECTED) {
1662 		log_rdma_event(ERR, "rdma_connect failed port=%d\n", port);
1663 		goto rdma_connect_failed;
1664 	}
1665 
1666 	log_rdma_event(INFO, "rdma_connect connected\n");
1667 
1668 	rc = allocate_caches_and_workqueue(info);
1669 	if (rc) {
1670 		log_rdma_event(ERR, "cache allocation failed\n");
1671 		goto allocate_cache_failed;
1672 	}
1673 
1674 	init_waitqueue_head(&info->wait_send_queue);
1675 	INIT_DELAYED_WORK(&info->idle_timer_work, idle_connection_timer);
1676 	queue_delayed_work(info->workqueue, &info->idle_timer_work,
1677 		info->keep_alive_interval*HZ);
1678 
1679 	init_waitqueue_head(&info->wait_send_pending);
1680 	atomic_set(&info->send_pending, 0);
1681 
1682 	init_waitqueue_head(&info->wait_post_send);
1683 
1684 	INIT_WORK(&info->disconnect_work, smbd_disconnect_rdma_work);
1685 	INIT_WORK(&info->post_send_credits_work, smbd_post_send_credits);
1686 	info->new_credits_offered = 0;
1687 	spin_lock_init(&info->lock_new_credits_offered);
1688 
1689 	rc = smbd_negotiate(info);
1690 	if (rc) {
1691 		log_rdma_event(ERR, "smbd_negotiate rc=%d\n", rc);
1692 		goto negotiation_failed;
1693 	}
1694 
1695 	rc = allocate_mr_list(info);
1696 	if (rc) {
1697 		log_rdma_mr(ERR, "memory registration allocation failed\n");
1698 		goto allocate_mr_failed;
1699 	}
1700 
1701 	return info;
1702 
1703 allocate_mr_failed:
1704 	/* At this point, need to a full transport shutdown */
1705 	server->smbd_conn = info;
1706 	smbd_destroy(server);
1707 	return NULL;
1708 
1709 negotiation_failed:
1710 	cancel_delayed_work_sync(&info->idle_timer_work);
1711 	destroy_caches_and_workqueue(info);
1712 	info->transport_status = SMBD_NEGOTIATE_FAILED;
1713 	init_waitqueue_head(&info->conn_wait);
1714 	rdma_disconnect(info->id);
1715 	wait_event(info->conn_wait,
1716 		info->transport_status == SMBD_DISCONNECTED);
1717 
1718 allocate_cache_failed:
1719 rdma_connect_failed:
1720 	rdma_destroy_qp(info->id);
1721 
1722 create_qp_failed:
1723 alloc_cq_failed:
1724 	if (info->send_cq)
1725 		ib_free_cq(info->send_cq);
1726 	if (info->recv_cq)
1727 		ib_free_cq(info->recv_cq);
1728 
1729 config_failed:
1730 	ib_dealloc_pd(info->pd);
1731 	rdma_destroy_id(info->id);
1732 
1733 create_id_failed:
1734 	kfree(info);
1735 	return NULL;
1736 }
1737 
smbd_get_connection(struct TCP_Server_Info * server,struct sockaddr * dstaddr)1738 struct smbd_connection *smbd_get_connection(
1739 	struct TCP_Server_Info *server, struct sockaddr *dstaddr)
1740 {
1741 	struct smbd_connection *ret;
1742 	int port = SMBD_PORT;
1743 
1744 try_again:
1745 	ret = _smbd_get_connection(server, dstaddr, port);
1746 
1747 	/* Try SMB_PORT if SMBD_PORT doesn't work */
1748 	if (!ret && port == SMBD_PORT) {
1749 		port = SMB_PORT;
1750 		goto try_again;
1751 	}
1752 	return ret;
1753 }
1754 
1755 /*
1756  * Receive data from receive reassembly queue
1757  * All the incoming data packets are placed in reassembly queue
1758  * buf: the buffer to read data into
1759  * size: the length of data to read
1760  * return value: actual data read
1761  * Note: this implementation copies the data from reassebmly queue to receive
1762  * buffers used by upper layer. This is not the optimal code path. A better way
1763  * to do it is to not have upper layer allocate its receive buffers but rather
1764  * borrow the buffer from reassembly queue, and return it after data is
1765  * consumed. But this will require more changes to upper layer code, and also
1766  * need to consider packet boundaries while they still being reassembled.
1767  */
smbd_recv_buf(struct smbd_connection * info,char * buf,unsigned int size)1768 static int smbd_recv_buf(struct smbd_connection *info, char *buf,
1769 		unsigned int size)
1770 {
1771 	struct smbd_response *response;
1772 	struct smbd_data_transfer *data_transfer;
1773 	int to_copy, to_read, data_read, offset;
1774 	u32 data_length, remaining_data_length, data_offset;
1775 	int rc;
1776 
1777 again:
1778 	/*
1779 	 * No need to hold the reassembly queue lock all the time as we are
1780 	 * the only one reading from the front of the queue. The transport
1781 	 * may add more entries to the back of the queue at the same time
1782 	 */
1783 	log_read(INFO, "size=%d info->reassembly_data_length=%d\n", size,
1784 		info->reassembly_data_length);
1785 	if (info->reassembly_data_length >= size) {
1786 		int queue_length;
1787 		int queue_removed = 0;
1788 
1789 		/*
1790 		 * Need to make sure reassembly_data_length is read before
1791 		 * reading reassembly_queue_length and calling
1792 		 * _get_first_reassembly. This call is lock free
1793 		 * as we never read at the end of the queue which are being
1794 		 * updated in SOFTIRQ as more data is received
1795 		 */
1796 		virt_rmb();
1797 		queue_length = info->reassembly_queue_length;
1798 		data_read = 0;
1799 		to_read = size;
1800 		offset = info->first_entry_offset;
1801 		while (data_read < size) {
1802 			response = _get_first_reassembly(info);
1803 			data_transfer = smbd_response_payload(response);
1804 			data_length = le32_to_cpu(data_transfer->data_length);
1805 			remaining_data_length =
1806 				le32_to_cpu(
1807 					data_transfer->remaining_data_length);
1808 			data_offset = le32_to_cpu(data_transfer->data_offset);
1809 
1810 			/*
1811 			 * The upper layer expects RFC1002 length at the
1812 			 * beginning of the payload. Return it to indicate
1813 			 * the total length of the packet. This minimize the
1814 			 * change to upper layer packet processing logic. This
1815 			 * will be eventually remove when an intermediate
1816 			 * transport layer is added
1817 			 */
1818 			if (response->first_segment && size == 4) {
1819 				unsigned int rfc1002_len =
1820 					data_length + remaining_data_length;
1821 				*((__be32 *)buf) = cpu_to_be32(rfc1002_len);
1822 				data_read = 4;
1823 				response->first_segment = false;
1824 				log_read(INFO, "returning rfc1002 length %d\n",
1825 					rfc1002_len);
1826 				goto read_rfc1002_done;
1827 			}
1828 
1829 			to_copy = min_t(int, data_length - offset, to_read);
1830 			memcpy(
1831 				buf + data_read,
1832 				(char *)data_transfer + data_offset + offset,
1833 				to_copy);
1834 
1835 			/* move on to the next buffer? */
1836 			if (to_copy == data_length - offset) {
1837 				queue_length--;
1838 				/*
1839 				 * No need to lock if we are not at the
1840 				 * end of the queue
1841 				 */
1842 				if (queue_length)
1843 					list_del(&response->list);
1844 				else {
1845 					spin_lock_irq(
1846 						&info->reassembly_queue_lock);
1847 					list_del(&response->list);
1848 					spin_unlock_irq(
1849 						&info->reassembly_queue_lock);
1850 				}
1851 				queue_removed++;
1852 				info->count_reassembly_queue--;
1853 				info->count_dequeue_reassembly_queue++;
1854 				put_receive_buffer(info, response);
1855 				offset = 0;
1856 				log_read(INFO, "put_receive_buffer offset=0\n");
1857 			} else
1858 				offset += to_copy;
1859 
1860 			to_read -= to_copy;
1861 			data_read += to_copy;
1862 
1863 			log_read(INFO, "_get_first_reassembly memcpy %d bytes data_transfer_length-offset=%d after that to_read=%d data_read=%d offset=%d\n",
1864 				 to_copy, data_length - offset,
1865 				 to_read, data_read, offset);
1866 		}
1867 
1868 		spin_lock_irq(&info->reassembly_queue_lock);
1869 		info->reassembly_data_length -= data_read;
1870 		info->reassembly_queue_length -= queue_removed;
1871 		spin_unlock_irq(&info->reassembly_queue_lock);
1872 
1873 		info->first_entry_offset = offset;
1874 		log_read(INFO, "returning to thread data_read=%d reassembly_data_length=%d first_entry_offset=%d\n",
1875 			 data_read, info->reassembly_data_length,
1876 			 info->first_entry_offset);
1877 read_rfc1002_done:
1878 		return data_read;
1879 	}
1880 
1881 	log_read(INFO, "wait_event on more data\n");
1882 	rc = wait_event_interruptible(
1883 		info->wait_reassembly_queue,
1884 		info->reassembly_data_length >= size ||
1885 			info->transport_status != SMBD_CONNECTED);
1886 	/* Don't return any data if interrupted */
1887 	if (rc)
1888 		return rc;
1889 
1890 	if (info->transport_status != SMBD_CONNECTED) {
1891 		log_read(ERR, "disconnected\n");
1892 		return -ECONNABORTED;
1893 	}
1894 
1895 	goto again;
1896 }
1897 
1898 /*
1899  * Receive a page from receive reassembly queue
1900  * page: the page to read data into
1901  * to_read: the length of data to read
1902  * return value: actual data read
1903  */
smbd_recv_page(struct smbd_connection * info,struct page * page,unsigned int page_offset,unsigned int to_read)1904 static int smbd_recv_page(struct smbd_connection *info,
1905 		struct page *page, unsigned int page_offset,
1906 		unsigned int to_read)
1907 {
1908 	int ret;
1909 	char *to_address;
1910 	void *page_address;
1911 
1912 	/* make sure we have the page ready for read */
1913 	ret = wait_event_interruptible(
1914 		info->wait_reassembly_queue,
1915 		info->reassembly_data_length >= to_read ||
1916 			info->transport_status != SMBD_CONNECTED);
1917 	if (ret)
1918 		return ret;
1919 
1920 	/* now we can read from reassembly queue and not sleep */
1921 	page_address = kmap_atomic(page);
1922 	to_address = (char *) page_address + page_offset;
1923 
1924 	log_read(INFO, "reading from page=%p address=%p to_read=%d\n",
1925 		page, to_address, to_read);
1926 
1927 	ret = smbd_recv_buf(info, to_address, to_read);
1928 	kunmap_atomic(page_address);
1929 
1930 	return ret;
1931 }
1932 
1933 /*
1934  * Receive data from transport
1935  * msg: a msghdr point to the buffer, can be ITER_KVEC or ITER_BVEC
1936  * return: total bytes read, or 0. SMB Direct will not do partial read.
1937  */
smbd_recv(struct smbd_connection * info,struct msghdr * msg)1938 int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
1939 {
1940 	char *buf;
1941 	struct page *page;
1942 	unsigned int to_read, page_offset;
1943 	int rc;
1944 
1945 	if (iov_iter_rw(&msg->msg_iter) == WRITE) {
1946 		/* It's a bug in upper layer to get there */
1947 		cifs_dbg(VFS, "Invalid msg iter dir %u\n",
1948 			 iov_iter_rw(&msg->msg_iter));
1949 		rc = -EINVAL;
1950 		goto out;
1951 	}
1952 
1953 	switch (iov_iter_type(&msg->msg_iter)) {
1954 	case ITER_KVEC:
1955 		buf = msg->msg_iter.kvec->iov_base;
1956 		to_read = msg->msg_iter.kvec->iov_len;
1957 		rc = smbd_recv_buf(info, buf, to_read);
1958 		break;
1959 
1960 	case ITER_BVEC:
1961 		page = msg->msg_iter.bvec->bv_page;
1962 		page_offset = msg->msg_iter.bvec->bv_offset;
1963 		to_read = msg->msg_iter.bvec->bv_len;
1964 		rc = smbd_recv_page(info, page, page_offset, to_read);
1965 		break;
1966 
1967 	default:
1968 		/* It's a bug in upper layer to get there */
1969 		cifs_dbg(VFS, "Invalid msg type %d\n",
1970 			 iov_iter_type(&msg->msg_iter));
1971 		rc = -EINVAL;
1972 	}
1973 
1974 out:
1975 	/* SMBDirect will read it all or nothing */
1976 	if (rc > 0)
1977 		msg->msg_iter.count = 0;
1978 	return rc;
1979 }
1980 
1981 /*
1982  * Send data to transport
1983  * Each rqst is transported as a SMBDirect payload
1984  * rqst: the data to write
1985  * return value: 0 if successfully write, otherwise error code
1986  */
smbd_send(struct TCP_Server_Info * server,int num_rqst,struct smb_rqst * rqst_array)1987 int smbd_send(struct TCP_Server_Info *server,
1988 	int num_rqst, struct smb_rqst *rqst_array)
1989 {
1990 	struct smbd_connection *info = server->smbd_conn;
1991 	struct kvec vec;
1992 	int nvecs;
1993 	int size;
1994 	unsigned int buflen, remaining_data_length;
1995 	int start, i, j;
1996 	int max_iov_size =
1997 		info->max_send_size - sizeof(struct smbd_data_transfer);
1998 	struct kvec *iov;
1999 	int rc;
2000 	struct smb_rqst *rqst;
2001 	int rqst_idx;
2002 
2003 	if (info->transport_status != SMBD_CONNECTED) {
2004 		rc = -EAGAIN;
2005 		goto done;
2006 	}
2007 
2008 	/*
2009 	 * Add in the page array if there is one. The caller needs to set
2010 	 * rq_tailsz to PAGE_SIZE when the buffer has multiple pages and
2011 	 * ends at page boundary
2012 	 */
2013 	remaining_data_length = 0;
2014 	for (i = 0; i < num_rqst; i++)
2015 		remaining_data_length += smb_rqst_len(server, &rqst_array[i]);
2016 
2017 	if (remaining_data_length > info->max_fragmented_send_size) {
2018 		log_write(ERR, "payload size %d > max size %d\n",
2019 			remaining_data_length, info->max_fragmented_send_size);
2020 		rc = -EINVAL;
2021 		goto done;
2022 	}
2023 
2024 	log_write(INFO, "num_rqst=%d total length=%u\n",
2025 			num_rqst, remaining_data_length);
2026 
2027 	rqst_idx = 0;
2028 next_rqst:
2029 	rqst = &rqst_array[rqst_idx];
2030 	iov = rqst->rq_iov;
2031 
2032 	cifs_dbg(FYI, "Sending smb (RDMA): idx=%d smb_len=%lu\n",
2033 		rqst_idx, smb_rqst_len(server, rqst));
2034 	for (i = 0; i < rqst->rq_nvec; i++)
2035 		dump_smb(iov[i].iov_base, iov[i].iov_len);
2036 
2037 
2038 	log_write(INFO, "rqst_idx=%d nvec=%d rqst->rq_npages=%d rq_pagesz=%d rq_tailsz=%d buflen=%lu\n",
2039 		  rqst_idx, rqst->rq_nvec, rqst->rq_npages, rqst->rq_pagesz,
2040 		  rqst->rq_tailsz, smb_rqst_len(server, rqst));
2041 
2042 	start = i = 0;
2043 	buflen = 0;
2044 	while (true) {
2045 		buflen += iov[i].iov_len;
2046 		if (buflen > max_iov_size) {
2047 			if (i > start) {
2048 				remaining_data_length -=
2049 					(buflen-iov[i].iov_len);
2050 				log_write(INFO, "sending iov[] from start=%d i=%d nvecs=%d remaining_data_length=%d\n",
2051 					  start, i, i - start,
2052 					  remaining_data_length);
2053 				rc = smbd_post_send_data(
2054 					info, &iov[start], i-start,
2055 					remaining_data_length);
2056 				if (rc)
2057 					goto done;
2058 			} else {
2059 				/* iov[start] is too big, break it */
2060 				nvecs = (buflen+max_iov_size-1)/max_iov_size;
2061 				log_write(INFO, "iov[%d] iov_base=%p buflen=%d break to %d vectors\n",
2062 					  start, iov[start].iov_base,
2063 					  buflen, nvecs);
2064 				for (j = 0; j < nvecs; j++) {
2065 					vec.iov_base =
2066 						(char *)iov[start].iov_base +
2067 						j*max_iov_size;
2068 					vec.iov_len = max_iov_size;
2069 					if (j == nvecs-1)
2070 						vec.iov_len =
2071 							buflen -
2072 							max_iov_size*(nvecs-1);
2073 					remaining_data_length -= vec.iov_len;
2074 					log_write(INFO,
2075 						"sending vec j=%d iov_base=%p iov_len=%zu remaining_data_length=%d\n",
2076 						  j, vec.iov_base, vec.iov_len,
2077 						  remaining_data_length);
2078 					rc = smbd_post_send_data(
2079 						info, &vec, 1,
2080 						remaining_data_length);
2081 					if (rc)
2082 						goto done;
2083 				}
2084 				i++;
2085 				if (i == rqst->rq_nvec)
2086 					break;
2087 			}
2088 			start = i;
2089 			buflen = 0;
2090 		} else {
2091 			i++;
2092 			if (i == rqst->rq_nvec) {
2093 				/* send out all remaining vecs */
2094 				remaining_data_length -= buflen;
2095 				log_write(INFO, "sending iov[] from start=%d i=%d nvecs=%d remaining_data_length=%d\n",
2096 					  start, i, i - start,
2097 					  remaining_data_length);
2098 				rc = smbd_post_send_data(info, &iov[start],
2099 					i-start, remaining_data_length);
2100 				if (rc)
2101 					goto done;
2102 				break;
2103 			}
2104 		}
2105 		log_write(INFO, "looping i=%d buflen=%d\n", i, buflen);
2106 	}
2107 
2108 	/* now sending pages if there are any */
2109 	for (i = 0; i < rqst->rq_npages; i++) {
2110 		unsigned int offset;
2111 
2112 		rqst_page_get_length(rqst, i, &buflen, &offset);
2113 		nvecs = (buflen + max_iov_size - 1) / max_iov_size;
2114 		log_write(INFO, "sending pages buflen=%d nvecs=%d\n",
2115 			buflen, nvecs);
2116 		for (j = 0; j < nvecs; j++) {
2117 			size = max_iov_size;
2118 			if (j == nvecs-1)
2119 				size = buflen - j*max_iov_size;
2120 			remaining_data_length -= size;
2121 			log_write(INFO, "sending pages i=%d offset=%d size=%d remaining_data_length=%d\n",
2122 				  i, j * max_iov_size + offset, size,
2123 				  remaining_data_length);
2124 			rc = smbd_post_send_page(
2125 				info, rqst->rq_pages[i],
2126 				j*max_iov_size + offset,
2127 				size, remaining_data_length);
2128 			if (rc)
2129 				goto done;
2130 		}
2131 	}
2132 
2133 	rqst_idx++;
2134 	if (rqst_idx < num_rqst)
2135 		goto next_rqst;
2136 
2137 done:
2138 	/*
2139 	 * As an optimization, we don't wait for individual I/O to finish
2140 	 * before sending the next one.
2141 	 * Send them all and wait for pending send count to get to 0
2142 	 * that means all the I/Os have been out and we are good to return
2143 	 */
2144 
2145 	wait_event(info->wait_send_pending,
2146 		atomic_read(&info->send_pending) == 0);
2147 
2148 	return rc;
2149 }
2150 
register_mr_done(struct ib_cq * cq,struct ib_wc * wc)2151 static void register_mr_done(struct ib_cq *cq, struct ib_wc *wc)
2152 {
2153 	struct smbd_mr *mr;
2154 	struct ib_cqe *cqe;
2155 
2156 	if (wc->status) {
2157 		log_rdma_mr(ERR, "status=%d\n", wc->status);
2158 		cqe = wc->wr_cqe;
2159 		mr = container_of(cqe, struct smbd_mr, cqe);
2160 		smbd_disconnect_rdma_connection(mr->conn);
2161 	}
2162 }
2163 
2164 /*
2165  * The work queue function that recovers MRs
2166  * We need to call ib_dereg_mr() and ib_alloc_mr() before this MR can be used
2167  * again. Both calls are slow, so finish them in a workqueue. This will not
2168  * block I/O path.
2169  * There is one workqueue that recovers MRs, there is no need to lock as the
2170  * I/O requests calling smbd_register_mr will never update the links in the
2171  * mr_list.
2172  */
smbd_mr_recovery_work(struct work_struct * work)2173 static void smbd_mr_recovery_work(struct work_struct *work)
2174 {
2175 	struct smbd_connection *info =
2176 		container_of(work, struct smbd_connection, mr_recovery_work);
2177 	struct smbd_mr *smbdirect_mr;
2178 	int rc;
2179 
2180 	list_for_each_entry(smbdirect_mr, &info->mr_list, list) {
2181 		if (smbdirect_mr->state == MR_ERROR) {
2182 
2183 			/* recover this MR entry */
2184 			rc = ib_dereg_mr(smbdirect_mr->mr);
2185 			if (rc) {
2186 				log_rdma_mr(ERR,
2187 					"ib_dereg_mr failed rc=%x\n",
2188 					rc);
2189 				smbd_disconnect_rdma_connection(info);
2190 				continue;
2191 			}
2192 
2193 			smbdirect_mr->mr = ib_alloc_mr(
2194 				info->pd, info->mr_type,
2195 				info->max_frmr_depth);
2196 			if (IS_ERR(smbdirect_mr->mr)) {
2197 				log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n",
2198 					    info->mr_type,
2199 					    info->max_frmr_depth);
2200 				smbd_disconnect_rdma_connection(info);
2201 				continue;
2202 			}
2203 		} else
2204 			/* This MR is being used, don't recover it */
2205 			continue;
2206 
2207 		smbdirect_mr->state = MR_READY;
2208 
2209 		/* smbdirect_mr->state is updated by this function
2210 		 * and is read and updated by I/O issuing CPUs trying
2211 		 * to get a MR, the call to atomic_inc_return
2212 		 * implicates a memory barrier and guarantees this
2213 		 * value is updated before waking up any calls to
2214 		 * get_mr() from the I/O issuing CPUs
2215 		 */
2216 		if (atomic_inc_return(&info->mr_ready_count) == 1)
2217 			wake_up_interruptible(&info->wait_mr);
2218 	}
2219 }
2220 
destroy_mr_list(struct smbd_connection * info)2221 static void destroy_mr_list(struct smbd_connection *info)
2222 {
2223 	struct smbd_mr *mr, *tmp;
2224 
2225 	cancel_work_sync(&info->mr_recovery_work);
2226 	list_for_each_entry_safe(mr, tmp, &info->mr_list, list) {
2227 		if (mr->state == MR_INVALIDATED)
2228 			ib_dma_unmap_sg(info->id->device, mr->sgl,
2229 				mr->sgl_count, mr->dir);
2230 		ib_dereg_mr(mr->mr);
2231 		kfree(mr->sgl);
2232 		kfree(mr);
2233 	}
2234 }
2235 
2236 /*
2237  * Allocate MRs used for RDMA read/write
2238  * The number of MRs will not exceed hardware capability in responder_resources
2239  * All MRs are kept in mr_list. The MR can be recovered after it's used
2240  * Recovery is done in smbd_mr_recovery_work. The content of list entry changes
2241  * as MRs are used and recovered for I/O, but the list links will not change
2242  */
allocate_mr_list(struct smbd_connection * info)2243 static int allocate_mr_list(struct smbd_connection *info)
2244 {
2245 	int i;
2246 	struct smbd_mr *smbdirect_mr, *tmp;
2247 
2248 	INIT_LIST_HEAD(&info->mr_list);
2249 	init_waitqueue_head(&info->wait_mr);
2250 	spin_lock_init(&info->mr_list_lock);
2251 	atomic_set(&info->mr_ready_count, 0);
2252 	atomic_set(&info->mr_used_count, 0);
2253 	init_waitqueue_head(&info->wait_for_mr_cleanup);
2254 	INIT_WORK(&info->mr_recovery_work, smbd_mr_recovery_work);
2255 	/* Allocate more MRs (2x) than hardware responder_resources */
2256 	for (i = 0; i < info->responder_resources * 2; i++) {
2257 		smbdirect_mr = kzalloc(sizeof(*smbdirect_mr), GFP_KERNEL);
2258 		if (!smbdirect_mr)
2259 			goto out;
2260 		smbdirect_mr->mr = ib_alloc_mr(info->pd, info->mr_type,
2261 					info->max_frmr_depth);
2262 		if (IS_ERR(smbdirect_mr->mr)) {
2263 			log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n",
2264 				    info->mr_type, info->max_frmr_depth);
2265 			goto out;
2266 		}
2267 		smbdirect_mr->sgl = kcalloc(
2268 					info->max_frmr_depth,
2269 					sizeof(struct scatterlist),
2270 					GFP_KERNEL);
2271 		if (!smbdirect_mr->sgl) {
2272 			log_rdma_mr(ERR, "failed to allocate sgl\n");
2273 			ib_dereg_mr(smbdirect_mr->mr);
2274 			goto out;
2275 		}
2276 		smbdirect_mr->state = MR_READY;
2277 		smbdirect_mr->conn = info;
2278 
2279 		list_add_tail(&smbdirect_mr->list, &info->mr_list);
2280 		atomic_inc(&info->mr_ready_count);
2281 	}
2282 	return 0;
2283 
2284 out:
2285 	kfree(smbdirect_mr);
2286 
2287 	list_for_each_entry_safe(smbdirect_mr, tmp, &info->mr_list, list) {
2288 		list_del(&smbdirect_mr->list);
2289 		ib_dereg_mr(smbdirect_mr->mr);
2290 		kfree(smbdirect_mr->sgl);
2291 		kfree(smbdirect_mr);
2292 	}
2293 	return -ENOMEM;
2294 }
2295 
2296 /*
2297  * Get a MR from mr_list. This function waits until there is at least one
2298  * MR available in the list. It may access the list while the
2299  * smbd_mr_recovery_work is recovering the MR list. This doesn't need a lock
2300  * as they never modify the same places. However, there may be several CPUs
2301  * issueing I/O trying to get MR at the same time, mr_list_lock is used to
2302  * protect this situation.
2303  */
get_mr(struct smbd_connection * info)2304 static struct smbd_mr *get_mr(struct smbd_connection *info)
2305 {
2306 	struct smbd_mr *ret;
2307 	int rc;
2308 again:
2309 	rc = wait_event_interruptible(info->wait_mr,
2310 		atomic_read(&info->mr_ready_count) ||
2311 		info->transport_status != SMBD_CONNECTED);
2312 	if (rc) {
2313 		log_rdma_mr(ERR, "wait_event_interruptible rc=%x\n", rc);
2314 		return NULL;
2315 	}
2316 
2317 	if (info->transport_status != SMBD_CONNECTED) {
2318 		log_rdma_mr(ERR, "info->transport_status=%x\n",
2319 			info->transport_status);
2320 		return NULL;
2321 	}
2322 
2323 	spin_lock(&info->mr_list_lock);
2324 	list_for_each_entry(ret, &info->mr_list, list) {
2325 		if (ret->state == MR_READY) {
2326 			ret->state = MR_REGISTERED;
2327 			spin_unlock(&info->mr_list_lock);
2328 			atomic_dec(&info->mr_ready_count);
2329 			atomic_inc(&info->mr_used_count);
2330 			return ret;
2331 		}
2332 	}
2333 
2334 	spin_unlock(&info->mr_list_lock);
2335 	/*
2336 	 * It is possible that we could fail to get MR because other processes may
2337 	 * try to acquire a MR at the same time. If this is the case, retry it.
2338 	 */
2339 	goto again;
2340 }
2341 
2342 /*
2343  * Register memory for RDMA read/write
2344  * pages[]: the list of pages to register memory with
2345  * num_pages: the number of pages to register
2346  * tailsz: if non-zero, the bytes to register in the last page
2347  * writing: true if this is a RDMA write (SMB read), false for RDMA read
2348  * need_invalidate: true if this MR needs to be locally invalidated after I/O
2349  * return value: the MR registered, NULL if failed.
2350  */
smbd_register_mr(struct smbd_connection * info,struct page * pages[],int num_pages,int offset,int tailsz,bool writing,bool need_invalidate)2351 struct smbd_mr *smbd_register_mr(
2352 	struct smbd_connection *info, struct page *pages[], int num_pages,
2353 	int offset, int tailsz, bool writing, bool need_invalidate)
2354 {
2355 	struct smbd_mr *smbdirect_mr;
2356 	int rc, i;
2357 	enum dma_data_direction dir;
2358 	struct ib_reg_wr *reg_wr;
2359 
2360 	if (num_pages > info->max_frmr_depth) {
2361 		log_rdma_mr(ERR, "num_pages=%d max_frmr_depth=%d\n",
2362 			num_pages, info->max_frmr_depth);
2363 		return NULL;
2364 	}
2365 
2366 	smbdirect_mr = get_mr(info);
2367 	if (!smbdirect_mr) {
2368 		log_rdma_mr(ERR, "get_mr returning NULL\n");
2369 		return NULL;
2370 	}
2371 	smbdirect_mr->need_invalidate = need_invalidate;
2372 	smbdirect_mr->sgl_count = num_pages;
2373 	sg_init_table(smbdirect_mr->sgl, num_pages);
2374 
2375 	log_rdma_mr(INFO, "num_pages=0x%x offset=0x%x tailsz=0x%x\n",
2376 			num_pages, offset, tailsz);
2377 
2378 	if (num_pages == 1) {
2379 		sg_set_page(&smbdirect_mr->sgl[0], pages[0], tailsz, offset);
2380 		goto skip_multiple_pages;
2381 	}
2382 
2383 	/* We have at least two pages to register */
2384 	sg_set_page(
2385 		&smbdirect_mr->sgl[0], pages[0], PAGE_SIZE - offset, offset);
2386 	i = 1;
2387 	while (i < num_pages - 1) {
2388 		sg_set_page(&smbdirect_mr->sgl[i], pages[i], PAGE_SIZE, 0);
2389 		i++;
2390 	}
2391 	sg_set_page(&smbdirect_mr->sgl[i], pages[i],
2392 		tailsz ? tailsz : PAGE_SIZE, 0);
2393 
2394 skip_multiple_pages:
2395 	dir = writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2396 	smbdirect_mr->dir = dir;
2397 	rc = ib_dma_map_sg(info->id->device, smbdirect_mr->sgl, num_pages, dir);
2398 	if (!rc) {
2399 		log_rdma_mr(ERR, "ib_dma_map_sg num_pages=%x dir=%x rc=%x\n",
2400 			num_pages, dir, rc);
2401 		goto dma_map_error;
2402 	}
2403 
2404 	rc = ib_map_mr_sg(smbdirect_mr->mr, smbdirect_mr->sgl, num_pages,
2405 		NULL, PAGE_SIZE);
2406 	if (rc != num_pages) {
2407 		log_rdma_mr(ERR,
2408 			"ib_map_mr_sg failed rc = %d num_pages = %x\n",
2409 			rc, num_pages);
2410 		goto map_mr_error;
2411 	}
2412 
2413 	ib_update_fast_reg_key(smbdirect_mr->mr,
2414 		ib_inc_rkey(smbdirect_mr->mr->rkey));
2415 	reg_wr = &smbdirect_mr->wr;
2416 	reg_wr->wr.opcode = IB_WR_REG_MR;
2417 	smbdirect_mr->cqe.done = register_mr_done;
2418 	reg_wr->wr.wr_cqe = &smbdirect_mr->cqe;
2419 	reg_wr->wr.num_sge = 0;
2420 	reg_wr->wr.send_flags = IB_SEND_SIGNALED;
2421 	reg_wr->mr = smbdirect_mr->mr;
2422 	reg_wr->key = smbdirect_mr->mr->rkey;
2423 	reg_wr->access = writing ?
2424 			IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
2425 			IB_ACCESS_REMOTE_READ;
2426 
2427 	/*
2428 	 * There is no need for waiting for complemtion on ib_post_send
2429 	 * on IB_WR_REG_MR. Hardware enforces a barrier and order of execution
2430 	 * on the next ib_post_send when we actaully send I/O to remote peer
2431 	 */
2432 	rc = ib_post_send(info->id->qp, &reg_wr->wr, NULL);
2433 	if (!rc)
2434 		return smbdirect_mr;
2435 
2436 	log_rdma_mr(ERR, "ib_post_send failed rc=%x reg_wr->key=%x\n",
2437 		rc, reg_wr->key);
2438 
2439 	/* If all failed, attempt to recover this MR by setting it MR_ERROR*/
2440 map_mr_error:
2441 	ib_dma_unmap_sg(info->id->device, smbdirect_mr->sgl,
2442 		smbdirect_mr->sgl_count, smbdirect_mr->dir);
2443 
2444 dma_map_error:
2445 	smbdirect_mr->state = MR_ERROR;
2446 	if (atomic_dec_and_test(&info->mr_used_count))
2447 		wake_up(&info->wait_for_mr_cleanup);
2448 
2449 	smbd_disconnect_rdma_connection(info);
2450 
2451 	return NULL;
2452 }
2453 
local_inv_done(struct ib_cq * cq,struct ib_wc * wc)2454 static void local_inv_done(struct ib_cq *cq, struct ib_wc *wc)
2455 {
2456 	struct smbd_mr *smbdirect_mr;
2457 	struct ib_cqe *cqe;
2458 
2459 	cqe = wc->wr_cqe;
2460 	smbdirect_mr = container_of(cqe, struct smbd_mr, cqe);
2461 	smbdirect_mr->state = MR_INVALIDATED;
2462 	if (wc->status != IB_WC_SUCCESS) {
2463 		log_rdma_mr(ERR, "invalidate failed status=%x\n", wc->status);
2464 		smbdirect_mr->state = MR_ERROR;
2465 	}
2466 	complete(&smbdirect_mr->invalidate_done);
2467 }
2468 
2469 /*
2470  * Deregister a MR after I/O is done
2471  * This function may wait if remote invalidation is not used
2472  * and we have to locally invalidate the buffer to prevent data is being
2473  * modified by remote peer after upper layer consumes it
2474  */
smbd_deregister_mr(struct smbd_mr * smbdirect_mr)2475 int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
2476 {
2477 	struct ib_send_wr *wr;
2478 	struct smbd_connection *info = smbdirect_mr->conn;
2479 	int rc = 0;
2480 
2481 	if (smbdirect_mr->need_invalidate) {
2482 		/* Need to finish local invalidation before returning */
2483 		wr = &smbdirect_mr->inv_wr;
2484 		wr->opcode = IB_WR_LOCAL_INV;
2485 		smbdirect_mr->cqe.done = local_inv_done;
2486 		wr->wr_cqe = &smbdirect_mr->cqe;
2487 		wr->num_sge = 0;
2488 		wr->ex.invalidate_rkey = smbdirect_mr->mr->rkey;
2489 		wr->send_flags = IB_SEND_SIGNALED;
2490 
2491 		init_completion(&smbdirect_mr->invalidate_done);
2492 		rc = ib_post_send(info->id->qp, wr, NULL);
2493 		if (rc) {
2494 			log_rdma_mr(ERR, "ib_post_send failed rc=%x\n", rc);
2495 			smbd_disconnect_rdma_connection(info);
2496 			goto done;
2497 		}
2498 		wait_for_completion(&smbdirect_mr->invalidate_done);
2499 		smbdirect_mr->need_invalidate = false;
2500 	} else
2501 		/*
2502 		 * For remote invalidation, just set it to MR_INVALIDATED
2503 		 * and defer to mr_recovery_work to recover the MR for next use
2504 		 */
2505 		smbdirect_mr->state = MR_INVALIDATED;
2506 
2507 	if (smbdirect_mr->state == MR_INVALIDATED) {
2508 		ib_dma_unmap_sg(
2509 			info->id->device, smbdirect_mr->sgl,
2510 			smbdirect_mr->sgl_count,
2511 			smbdirect_mr->dir);
2512 		smbdirect_mr->state = MR_READY;
2513 		if (atomic_inc_return(&info->mr_ready_count) == 1)
2514 			wake_up_interruptible(&info->wait_mr);
2515 	} else
2516 		/*
2517 		 * Schedule the work to do MR recovery for future I/Os MR
2518 		 * recovery is slow and don't want it to block current I/O
2519 		 */
2520 		queue_work(info->workqueue, &info->mr_recovery_work);
2521 
2522 done:
2523 	if (atomic_dec_and_test(&info->mr_used_count))
2524 		wake_up(&info->wait_for_mr_cleanup);
2525 
2526 	return rc;
2527 }
2528