• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *   Jeremy Allison (jra@samba.org) 2006.
7  *
8  */
9 
10 #include <linux/fs.h>
11 #include <linux/list.h>
12 #include <linux/gfp.h>
13 #include <linux/wait.h>
14 #include <linux/net.h>
15 #include <linux/delay.h>
16 #include <linux/freezer.h>
17 #include <linux/tcp.h>
18 #include <linux/bvec.h>
19 #include <linux/highmem.h>
20 #include <linux/uaccess.h>
21 #include <asm/processor.h>
22 #include <linux/mempool.h>
23 #include <linux/sched/signal.h>
24 #include "cifspdu.h"
25 #include "cifsglob.h"
26 #include "cifsproto.h"
27 #include "cifs_debug.h"
28 #include "smb2proto.h"
29 #include "smbdirect.h"
30 
31 /* Max number of iovectors we can use off the stack when sending requests. */
32 #define CIFS_MAX_IOV_SIZE 8
33 
34 void
cifs_wake_up_task(struct mid_q_entry * mid)35 cifs_wake_up_task(struct mid_q_entry *mid)
36 {
37 	wake_up_process(mid->callback_data);
38 }
39 
40 struct mid_q_entry *
AllocMidQEntry(const struct smb_hdr * smb_buffer,struct TCP_Server_Info * server)41 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
42 {
43 	struct mid_q_entry *temp;
44 
45 	if (server == NULL) {
46 		cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
47 		return NULL;
48 	}
49 
50 	temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
51 	memset(temp, 0, sizeof(struct mid_q_entry));
52 	kref_init(&temp->refcount);
53 	temp->mid = get_mid(smb_buffer);
54 	temp->pid = current->pid;
55 	temp->command = cpu_to_le16(smb_buffer->Command);
56 	cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
57 	/*	do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
58 	/* when mid allocated can be before when sent */
59 	temp->when_alloc = jiffies;
60 	temp->server = server;
61 
62 	/*
63 	 * The default is for the mid to be synchronous, so the
64 	 * default callback just wakes up the current task.
65 	 */
66 	get_task_struct(current);
67 	temp->creator = current;
68 	temp->callback = cifs_wake_up_task;
69 	temp->callback_data = current;
70 
71 	atomic_inc(&midCount);
72 	temp->mid_state = MID_REQUEST_ALLOCATED;
73 	return temp;
74 }
75 
_cifs_mid_q_entry_release(struct kref * refcount)76 static void _cifs_mid_q_entry_release(struct kref *refcount)
77 {
78 	struct mid_q_entry *midEntry =
79 			container_of(refcount, struct mid_q_entry, refcount);
80 #ifdef CONFIG_CIFS_STATS2
81 	__le16 command = midEntry->server->vals->lock_cmd;
82 	__u16 smb_cmd = le16_to_cpu(midEntry->command);
83 	unsigned long now;
84 	unsigned long roundtrip_time;
85 #endif
86 	struct TCP_Server_Info *server = midEntry->server;
87 
88 	if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
89 	    midEntry->mid_state == MID_RESPONSE_RECEIVED &&
90 	    server->ops->handle_cancelled_mid)
91 		server->ops->handle_cancelled_mid(midEntry, server);
92 
93 	midEntry->mid_state = MID_FREE;
94 	atomic_dec(&midCount);
95 	if (midEntry->large_buf)
96 		cifs_buf_release(midEntry->resp_buf);
97 	else
98 		cifs_small_buf_release(midEntry->resp_buf);
99 #ifdef CONFIG_CIFS_STATS2
100 	now = jiffies;
101 	if (now < midEntry->when_alloc)
102 		cifs_server_dbg(VFS, "Invalid mid allocation time\n");
103 	roundtrip_time = now - midEntry->when_alloc;
104 
105 	if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
106 		if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
107 			server->slowest_cmd[smb_cmd] = roundtrip_time;
108 			server->fastest_cmd[smb_cmd] = roundtrip_time;
109 		} else {
110 			if (server->slowest_cmd[smb_cmd] < roundtrip_time)
111 				server->slowest_cmd[smb_cmd] = roundtrip_time;
112 			else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
113 				server->fastest_cmd[smb_cmd] = roundtrip_time;
114 		}
115 		cifs_stats_inc(&server->num_cmds[smb_cmd]);
116 		server->time_per_cmd[smb_cmd] += roundtrip_time;
117 	}
118 	/*
119 	 * commands taking longer than one second (default) can be indications
120 	 * that something is wrong, unless it is quite a slow link or a very
121 	 * busy server. Note that this calc is unlikely or impossible to wrap
122 	 * as long as slow_rsp_threshold is not set way above recommended max
123 	 * value (32767 ie 9 hours) and is generally harmless even if wrong
124 	 * since only affects debug counters - so leaving the calc as simple
125 	 * comparison rather than doing multiple conversions and overflow
126 	 * checks
127 	 */
128 	if ((slow_rsp_threshold != 0) &&
129 	    time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
130 	    (midEntry->command != command)) {
131 		/*
132 		 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
133 		 * NB: le16_to_cpu returns unsigned so can not be negative below
134 		 */
135 		if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
136 			cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
137 
138 		trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
139 			       midEntry->when_sent, midEntry->when_received);
140 		if (cifsFYI & CIFS_TIMER) {
141 			pr_debug("slow rsp: cmd %d mid %llu",
142 				 midEntry->command, midEntry->mid);
143 			cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
144 				  now - midEntry->when_alloc,
145 				  now - midEntry->when_sent,
146 				  now - midEntry->when_received);
147 		}
148 	}
149 #endif
150 	put_task_struct(midEntry->creator);
151 
152 	mempool_free(midEntry, cifs_mid_poolp);
153 }
154 
cifs_mid_q_entry_release(struct mid_q_entry * midEntry)155 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
156 {
157 	spin_lock(&GlobalMid_Lock);
158 	kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
159 	spin_unlock(&GlobalMid_Lock);
160 }
161 
DeleteMidQEntry(struct mid_q_entry * midEntry)162 void DeleteMidQEntry(struct mid_q_entry *midEntry)
163 {
164 	cifs_mid_q_entry_release(midEntry);
165 }
166 
167 void
cifs_delete_mid(struct mid_q_entry * mid)168 cifs_delete_mid(struct mid_q_entry *mid)
169 {
170 	spin_lock(&GlobalMid_Lock);
171 	if (!(mid->mid_flags & MID_DELETED)) {
172 		list_del_init(&mid->qhead);
173 		mid->mid_flags |= MID_DELETED;
174 	}
175 	spin_unlock(&GlobalMid_Lock);
176 
177 	DeleteMidQEntry(mid);
178 }
179 
180 /*
181  * smb_send_kvec - send an array of kvecs to the server
182  * @server:	Server to send the data to
183  * @smb_msg:	Message to send
184  * @sent:	amount of data sent on socket is stored here
185  *
186  * Our basic "send data to server" function. Should be called with srv_mutex
187  * held. The caller is responsible for handling the results.
188  */
189 static int
smb_send_kvec(struct TCP_Server_Info * server,struct msghdr * smb_msg,size_t * sent)190 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
191 	      size_t *sent)
192 {
193 	int rc = 0;
194 	int retries = 0;
195 	struct socket *ssocket = server->ssocket;
196 
197 	*sent = 0;
198 
199 	if (server->noblocksnd)
200 		smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
201 	else
202 		smb_msg->msg_flags = MSG_NOSIGNAL;
203 
204 	while (msg_data_left(smb_msg)) {
205 		/*
206 		 * If blocking send, we try 3 times, since each can block
207 		 * for 5 seconds. For nonblocking  we have to try more
208 		 * but wait increasing amounts of time allowing time for
209 		 * socket to clear.  The overall time we wait in either
210 		 * case to send on the socket is about 15 seconds.
211 		 * Similarly we wait for 15 seconds for a response from
212 		 * the server in SendReceive[2] for the server to send
213 		 * a response back for most types of requests (except
214 		 * SMB Write past end of file which can be slow, and
215 		 * blocking lock operations). NFS waits slightly longer
216 		 * than CIFS, but this can make it take longer for
217 		 * nonresponsive servers to be detected and 15 seconds
218 		 * is more than enough time for modern networks to
219 		 * send a packet.  In most cases if we fail to send
220 		 * after the retries we will kill the socket and
221 		 * reconnect which may clear the network problem.
222 		 */
223 		rc = sock_sendmsg(ssocket, smb_msg);
224 		if (rc == -EAGAIN) {
225 			retries++;
226 			if (retries >= 14 ||
227 			    (!server->noblocksnd && (retries > 2))) {
228 				cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
229 					 ssocket);
230 				return -EAGAIN;
231 			}
232 			msleep(1 << retries);
233 			continue;
234 		}
235 
236 		if (rc < 0)
237 			return rc;
238 
239 		if (rc == 0) {
240 			/* should never happen, letting socket clear before
241 			   retrying is our only obvious option here */
242 			cifs_server_dbg(VFS, "tcp sent no data\n");
243 			msleep(500);
244 			continue;
245 		}
246 
247 		/* send was at least partially successful */
248 		*sent += rc;
249 		retries = 0; /* in case we get ENOSPC on the next send */
250 	}
251 	return 0;
252 }
253 
254 unsigned long
smb_rqst_len(struct TCP_Server_Info * server,struct smb_rqst * rqst)255 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
256 {
257 	unsigned int i;
258 	struct kvec *iov;
259 	int nvec;
260 	unsigned long buflen = 0;
261 
262 	if (server->vals->header_preamble_size == 0 &&
263 	    rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
264 		iov = &rqst->rq_iov[1];
265 		nvec = rqst->rq_nvec - 1;
266 	} else {
267 		iov = rqst->rq_iov;
268 		nvec = rqst->rq_nvec;
269 	}
270 
271 	/* total up iov array first */
272 	for (i = 0; i < nvec; i++)
273 		buflen += iov[i].iov_len;
274 
275 	/*
276 	 * Add in the page array if there is one. The caller needs to make
277 	 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
278 	 * multiple pages ends at page boundary, rq_tailsz needs to be set to
279 	 * PAGE_SIZE.
280 	 */
281 	if (rqst->rq_npages) {
282 		if (rqst->rq_npages == 1)
283 			buflen += rqst->rq_tailsz;
284 		else {
285 			/*
286 			 * If there is more than one page, calculate the
287 			 * buffer length based on rq_offset and rq_tailsz
288 			 */
289 			buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
290 					rqst->rq_offset;
291 			buflen += rqst->rq_tailsz;
292 		}
293 	}
294 
295 	return buflen;
296 }
297 
298 static int
__smb_send_rqst(struct TCP_Server_Info * server,int num_rqst,struct smb_rqst * rqst)299 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
300 		struct smb_rqst *rqst)
301 {
302 	int rc;
303 	struct kvec *iov;
304 	int n_vec;
305 	unsigned int send_length = 0;
306 	unsigned int i, j;
307 	sigset_t mask, oldmask;
308 	size_t total_len = 0, sent, size;
309 	struct socket *ssocket = server->ssocket;
310 	struct msghdr smb_msg = {};
311 	__be32 rfc1002_marker;
312 
313 	cifs_in_send_inc(server);
314 	if (cifs_rdma_enabled(server)) {
315 		/* return -EAGAIN when connecting or reconnecting */
316 		rc = -EAGAIN;
317 		if (server->smbd_conn)
318 			rc = smbd_send(server, num_rqst, rqst);
319 		goto smbd_done;
320 	}
321 
322 	rc = -EAGAIN;
323 	if (ssocket == NULL)
324 		goto out;
325 
326 	rc = -ERESTARTSYS;
327 	if (fatal_signal_pending(current)) {
328 		cifs_dbg(FYI, "signal pending before send request\n");
329 		goto out;
330 	}
331 
332 	rc = 0;
333 	/* cork the socket */
334 	tcp_sock_set_cork(ssocket->sk, true);
335 
336 	for (j = 0; j < num_rqst; j++)
337 		send_length += smb_rqst_len(server, &rqst[j]);
338 	rfc1002_marker = cpu_to_be32(send_length);
339 
340 	/*
341 	 * We should not allow signals to interrupt the network send because
342 	 * any partial send will cause session reconnects thus increasing
343 	 * latency of system calls and overload a server with unnecessary
344 	 * requests.
345 	 */
346 
347 	sigfillset(&mask);
348 	sigprocmask(SIG_BLOCK, &mask, &oldmask);
349 
350 	/* Generate a rfc1002 marker for SMB2+ */
351 	if (server->vals->header_preamble_size == 0) {
352 		struct kvec hiov = {
353 			.iov_base = &rfc1002_marker,
354 			.iov_len  = 4
355 		};
356 		iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
357 		rc = smb_send_kvec(server, &smb_msg, &sent);
358 		if (rc < 0)
359 			goto unmask;
360 
361 		total_len += sent;
362 		send_length += 4;
363 	}
364 
365 	cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
366 
367 	for (j = 0; j < num_rqst; j++) {
368 		iov = rqst[j].rq_iov;
369 		n_vec = rqst[j].rq_nvec;
370 
371 		size = 0;
372 		for (i = 0; i < n_vec; i++) {
373 			dump_smb(iov[i].iov_base, iov[i].iov_len);
374 			size += iov[i].iov_len;
375 		}
376 
377 		iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
378 
379 		rc = smb_send_kvec(server, &smb_msg, &sent);
380 		if (rc < 0)
381 			goto unmask;
382 
383 		total_len += sent;
384 
385 		/* now walk the page array and send each page in it */
386 		for (i = 0; i < rqst[j].rq_npages; i++) {
387 			struct bio_vec bvec;
388 
389 			bvec.bv_page = rqst[j].rq_pages[i];
390 			rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
391 					     &bvec.bv_offset);
392 
393 			iov_iter_bvec(&smb_msg.msg_iter, WRITE,
394 				      &bvec, 1, bvec.bv_len);
395 			rc = smb_send_kvec(server, &smb_msg, &sent);
396 			if (rc < 0)
397 				break;
398 
399 			total_len += sent;
400 		}
401 	}
402 
403 unmask:
404 	sigprocmask(SIG_SETMASK, &oldmask, NULL);
405 
406 	/*
407 	 * If signal is pending but we have already sent the whole packet to
408 	 * the server we need to return success status to allow a corresponding
409 	 * mid entry to be kept in the pending requests queue thus allowing
410 	 * to handle responses from the server by the client.
411 	 *
412 	 * If only part of the packet has been sent there is no need to hide
413 	 * interrupt because the session will be reconnected anyway, so there
414 	 * won't be any response from the server to handle.
415 	 */
416 
417 	if (signal_pending(current) && (total_len != send_length)) {
418 		cifs_dbg(FYI, "signal is pending after attempt to send\n");
419 		rc = -ERESTARTSYS;
420 	}
421 
422 	/* uncork it */
423 	tcp_sock_set_cork(ssocket->sk, false);
424 
425 	if ((total_len > 0) && (total_len != send_length)) {
426 		cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
427 			 send_length, total_len);
428 		/*
429 		 * If we have only sent part of an SMB then the next SMB could
430 		 * be taken as the remainder of this one. We need to kill the
431 		 * socket so the server throws away the partial SMB
432 		 */
433 		spin_lock(&GlobalMid_Lock);
434 		server->tcpStatus = CifsNeedReconnect;
435 		spin_unlock(&GlobalMid_Lock);
436 		trace_smb3_partial_send_reconnect(server->CurrentMid,
437 						  server->conn_id, server->hostname);
438 	}
439 smbd_done:
440 	if (rc < 0 && rc != -EINTR)
441 		cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
442 			 rc);
443 	else if (rc > 0)
444 		rc = 0;
445 out:
446 	cifs_in_send_dec(server);
447 	return rc;
448 }
449 
450 static int
smb_send_rqst(struct TCP_Server_Info * server,int num_rqst,struct smb_rqst * rqst,int flags)451 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
452 	      struct smb_rqst *rqst, int flags)
453 {
454 	struct kvec iov;
455 	struct smb2_transform_hdr *tr_hdr;
456 	struct smb_rqst cur_rqst[MAX_COMPOUND];
457 	int rc;
458 
459 	if (!(flags & CIFS_TRANSFORM_REQ))
460 		return __smb_send_rqst(server, num_rqst, rqst);
461 
462 	if (num_rqst > MAX_COMPOUND - 1)
463 		return -ENOMEM;
464 
465 	if (!server->ops->init_transform_rq) {
466 		cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
467 		return -EIO;
468 	}
469 
470 	tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS);
471 	if (!tr_hdr)
472 		return -ENOMEM;
473 
474 	memset(&cur_rqst[0], 0, sizeof(cur_rqst));
475 	memset(&iov, 0, sizeof(iov));
476 	memset(tr_hdr, 0, sizeof(*tr_hdr));
477 
478 	iov.iov_base = tr_hdr;
479 	iov.iov_len = sizeof(*tr_hdr);
480 	cur_rqst[0].rq_iov = &iov;
481 	cur_rqst[0].rq_nvec = 1;
482 
483 	rc = server->ops->init_transform_rq(server, num_rqst + 1,
484 					    &cur_rqst[0], rqst);
485 	if (rc)
486 		goto out;
487 
488 	rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
489 	smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
490 out:
491 	kfree(tr_hdr);
492 	return rc;
493 }
494 
495 int
smb_send(struct TCP_Server_Info * server,struct smb_hdr * smb_buffer,unsigned int smb_buf_length)496 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
497 	 unsigned int smb_buf_length)
498 {
499 	struct kvec iov[2];
500 	struct smb_rqst rqst = { .rq_iov = iov,
501 				 .rq_nvec = 2 };
502 
503 	iov[0].iov_base = smb_buffer;
504 	iov[0].iov_len = 4;
505 	iov[1].iov_base = (char *)smb_buffer + 4;
506 	iov[1].iov_len = smb_buf_length;
507 
508 	return __smb_send_rqst(server, 1, &rqst);
509 }
510 
511 static int
wait_for_free_credits(struct TCP_Server_Info * server,const int num_credits,const int timeout,const int flags,unsigned int * instance)512 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
513 		      const int timeout, const int flags,
514 		      unsigned int *instance)
515 {
516 	long rc;
517 	int *credits;
518 	int optype;
519 	long int t;
520 	int scredits, in_flight;
521 
522 	if (timeout < 0)
523 		t = MAX_JIFFY_OFFSET;
524 	else
525 		t = msecs_to_jiffies(timeout);
526 
527 	optype = flags & CIFS_OP_MASK;
528 
529 	*instance = 0;
530 
531 	credits = server->ops->get_credits_field(server, optype);
532 	/* Since an echo is already inflight, no need to wait to send another */
533 	if (*credits <= 0 && optype == CIFS_ECHO_OP)
534 		return -EAGAIN;
535 
536 	spin_lock(&server->req_lock);
537 	if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
538 		/* oplock breaks must not be held up */
539 		server->in_flight++;
540 		if (server->in_flight > server->max_in_flight)
541 			server->max_in_flight = server->in_flight;
542 		*credits -= 1;
543 		*instance = server->reconnect_instance;
544 		scredits = *credits;
545 		in_flight = server->in_flight;
546 		spin_unlock(&server->req_lock);
547 
548 		trace_smb3_add_credits(server->CurrentMid,
549 				server->conn_id, server->hostname, scredits, -1, in_flight);
550 		cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
551 				__func__, 1, scredits);
552 
553 		return 0;
554 	}
555 
556 	while (1) {
557 		if (*credits < num_credits) {
558 			scredits = *credits;
559 			spin_unlock(&server->req_lock);
560 
561 			cifs_num_waiters_inc(server);
562 			rc = wait_event_killable_timeout(server->request_q,
563 				has_credits(server, credits, num_credits), t);
564 			cifs_num_waiters_dec(server);
565 			if (!rc) {
566 				spin_lock(&server->req_lock);
567 				scredits = *credits;
568 				in_flight = server->in_flight;
569 				spin_unlock(&server->req_lock);
570 
571 				trace_smb3_credit_timeout(server->CurrentMid,
572 						server->conn_id, server->hostname, scredits,
573 						num_credits, in_flight);
574 				cifs_server_dbg(VFS, "wait timed out after %d ms\n",
575 						timeout);
576 				return -EBUSY;
577 			}
578 			if (rc == -ERESTARTSYS)
579 				return -ERESTARTSYS;
580 			spin_lock(&server->req_lock);
581 		} else {
582 			if (server->tcpStatus == CifsExiting) {
583 				spin_unlock(&server->req_lock);
584 				return -ENOENT;
585 			}
586 
587 			/*
588 			 * For normal commands, reserve the last MAX_COMPOUND
589 			 * credits to compound requests.
590 			 * Otherwise these compounds could be permanently
591 			 * starved for credits by single-credit requests.
592 			 *
593 			 * To prevent spinning CPU, block this thread until
594 			 * there are >MAX_COMPOUND credits available.
595 			 * But only do this is we already have a lot of
596 			 * credits in flight to avoid triggering this check
597 			 * for servers that are slow to hand out credits on
598 			 * new sessions.
599 			 */
600 			if (!optype && num_credits == 1 &&
601 			    server->in_flight > 2 * MAX_COMPOUND &&
602 			    *credits <= MAX_COMPOUND) {
603 				spin_unlock(&server->req_lock);
604 
605 				cifs_num_waiters_inc(server);
606 				rc = wait_event_killable_timeout(
607 					server->request_q,
608 					has_credits(server, credits,
609 						    MAX_COMPOUND + 1),
610 					t);
611 				cifs_num_waiters_dec(server);
612 				if (!rc) {
613 					spin_lock(&server->req_lock);
614 					scredits = *credits;
615 					in_flight = server->in_flight;
616 					spin_unlock(&server->req_lock);
617 
618 					trace_smb3_credit_timeout(
619 							server->CurrentMid,
620 							server->conn_id, server->hostname,
621 							scredits, num_credits, in_flight);
622 					cifs_server_dbg(VFS, "wait timed out after %d ms\n",
623 							timeout);
624 					return -EBUSY;
625 				}
626 				if (rc == -ERESTARTSYS)
627 					return -ERESTARTSYS;
628 				spin_lock(&server->req_lock);
629 				continue;
630 			}
631 
632 			/*
633 			 * Can not count locking commands against total
634 			 * as they are allowed to block on server.
635 			 */
636 
637 			/* update # of requests on the wire to server */
638 			if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
639 				*credits -= num_credits;
640 				server->in_flight += num_credits;
641 				if (server->in_flight > server->max_in_flight)
642 					server->max_in_flight = server->in_flight;
643 				*instance = server->reconnect_instance;
644 			}
645 			scredits = *credits;
646 			in_flight = server->in_flight;
647 			spin_unlock(&server->req_lock);
648 
649 			trace_smb3_add_credits(server->CurrentMid,
650 					server->conn_id, server->hostname, scredits,
651 					-(num_credits), in_flight);
652 			cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
653 					__func__, num_credits, scredits);
654 			break;
655 		}
656 	}
657 	return 0;
658 }
659 
660 static int
wait_for_free_request(struct TCP_Server_Info * server,const int flags,unsigned int * instance)661 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
662 		      unsigned int *instance)
663 {
664 	return wait_for_free_credits(server, 1, -1, flags,
665 				     instance);
666 }
667 
668 static int
wait_for_compound_request(struct TCP_Server_Info * server,int num,const int flags,unsigned int * instance)669 wait_for_compound_request(struct TCP_Server_Info *server, int num,
670 			  const int flags, unsigned int *instance)
671 {
672 	int *credits;
673 	int scredits, in_flight;
674 
675 	credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
676 
677 	spin_lock(&server->req_lock);
678 	scredits = *credits;
679 	in_flight = server->in_flight;
680 
681 	if (*credits < num) {
682 		/*
683 		 * If the server is tight on resources or just gives us less
684 		 * credits for other reasons (e.g. requests are coming out of
685 		 * order and the server delays granting more credits until it
686 		 * processes a missing mid) and we exhausted most available
687 		 * credits there may be situations when we try to send
688 		 * a compound request but we don't have enough credits. At this
689 		 * point the client needs to decide if it should wait for
690 		 * additional credits or fail the request. If at least one
691 		 * request is in flight there is a high probability that the
692 		 * server will return enough credits to satisfy this compound
693 		 * request.
694 		 *
695 		 * Return immediately if no requests in flight since we will be
696 		 * stuck on waiting for credits.
697 		 */
698 		if (server->in_flight == 0) {
699 			spin_unlock(&server->req_lock);
700 			trace_smb3_insufficient_credits(server->CurrentMid,
701 					server->conn_id, server->hostname, scredits,
702 					num, in_flight);
703 			cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
704 					__func__, in_flight, num, scredits);
705 			return -EDEADLK;
706 		}
707 	}
708 	spin_unlock(&server->req_lock);
709 
710 	return wait_for_free_credits(server, num, 60000, flags,
711 				     instance);
712 }
713 
714 int
cifs_wait_mtu_credits(struct TCP_Server_Info * server,unsigned int size,unsigned int * num,struct cifs_credits * credits)715 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
716 		      unsigned int *num, struct cifs_credits *credits)
717 {
718 	*num = size;
719 	credits->value = 0;
720 	credits->instance = server->reconnect_instance;
721 	return 0;
722 }
723 
allocate_mid(struct cifs_ses * ses,struct smb_hdr * in_buf,struct mid_q_entry ** ppmidQ)724 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
725 			struct mid_q_entry **ppmidQ)
726 {
727 	if (ses->server->tcpStatus == CifsExiting) {
728 		return -ENOENT;
729 	}
730 
731 	if (ses->server->tcpStatus == CifsNeedReconnect) {
732 		cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
733 		return -EAGAIN;
734 	}
735 
736 	if (ses->status == CifsNew) {
737 		if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
738 			(in_buf->Command != SMB_COM_NEGOTIATE))
739 			return -EAGAIN;
740 		/* else ok - we are setting up session */
741 	}
742 
743 	if (ses->status == CifsExiting) {
744 		/* check if SMB session is bad because we are setting it up */
745 		if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
746 			return -EAGAIN;
747 		/* else ok - we are shutting down session */
748 	}
749 
750 	*ppmidQ = AllocMidQEntry(in_buf, ses->server);
751 	if (*ppmidQ == NULL)
752 		return -ENOMEM;
753 	spin_lock(&GlobalMid_Lock);
754 	list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
755 	spin_unlock(&GlobalMid_Lock);
756 	return 0;
757 }
758 
759 static int
wait_for_response(struct TCP_Server_Info * server,struct mid_q_entry * midQ)760 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
761 {
762 	int error;
763 
764 	error = wait_event_freezekillable_unsafe(server->response_q,
765 				    midQ->mid_state != MID_REQUEST_SUBMITTED);
766 	if (error < 0)
767 		return -ERESTARTSYS;
768 
769 	return 0;
770 }
771 
772 struct mid_q_entry *
cifs_setup_async_request(struct TCP_Server_Info * server,struct smb_rqst * rqst)773 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
774 {
775 	int rc;
776 	struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
777 	struct mid_q_entry *mid;
778 
779 	if (rqst->rq_iov[0].iov_len != 4 ||
780 	    rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
781 		return ERR_PTR(-EIO);
782 
783 	/* enable signing if server requires it */
784 	if (server->sign)
785 		hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
786 
787 	mid = AllocMidQEntry(hdr, server);
788 	if (mid == NULL)
789 		return ERR_PTR(-ENOMEM);
790 
791 	rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
792 	if (rc) {
793 		DeleteMidQEntry(mid);
794 		return ERR_PTR(rc);
795 	}
796 
797 	return mid;
798 }
799 
800 /*
801  * Send a SMB request and set the callback function in the mid to handle
802  * the result. Caller is responsible for dealing with timeouts.
803  */
804 int
cifs_call_async(struct TCP_Server_Info * server,struct smb_rqst * rqst,mid_receive_t * receive,mid_callback_t * callback,mid_handle_t * handle,void * cbdata,const int flags,const struct cifs_credits * exist_credits)805 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
806 		mid_receive_t *receive, mid_callback_t *callback,
807 		mid_handle_t *handle, void *cbdata, const int flags,
808 		const struct cifs_credits *exist_credits)
809 {
810 	int rc;
811 	struct mid_q_entry *mid;
812 	struct cifs_credits credits = { .value = 0, .instance = 0 };
813 	unsigned int instance;
814 	int optype;
815 
816 	optype = flags & CIFS_OP_MASK;
817 
818 	if ((flags & CIFS_HAS_CREDITS) == 0) {
819 		rc = wait_for_free_request(server, flags, &instance);
820 		if (rc)
821 			return rc;
822 		credits.value = 1;
823 		credits.instance = instance;
824 	} else
825 		instance = exist_credits->instance;
826 
827 	mutex_lock(&server->srv_mutex);
828 
829 	/*
830 	 * We can't use credits obtained from the previous session to send this
831 	 * request. Check if there were reconnects after we obtained credits and
832 	 * return -EAGAIN in such cases to let callers handle it.
833 	 */
834 	if (instance != server->reconnect_instance) {
835 		mutex_unlock(&server->srv_mutex);
836 		add_credits_and_wake_if(server, &credits, optype);
837 		return -EAGAIN;
838 	}
839 
840 	mid = server->ops->setup_async_request(server, rqst);
841 	if (IS_ERR(mid)) {
842 		mutex_unlock(&server->srv_mutex);
843 		add_credits_and_wake_if(server, &credits, optype);
844 		return PTR_ERR(mid);
845 	}
846 
847 	mid->receive = receive;
848 	mid->callback = callback;
849 	mid->callback_data = cbdata;
850 	mid->handle = handle;
851 	mid->mid_state = MID_REQUEST_SUBMITTED;
852 
853 	/* put it on the pending_mid_q */
854 	spin_lock(&GlobalMid_Lock);
855 	list_add_tail(&mid->qhead, &server->pending_mid_q);
856 	spin_unlock(&GlobalMid_Lock);
857 
858 	/*
859 	 * Need to store the time in mid before calling I/O. For call_async,
860 	 * I/O response may come back and free the mid entry on another thread.
861 	 */
862 	cifs_save_when_sent(mid);
863 	rc = smb_send_rqst(server, 1, rqst, flags);
864 
865 	if (rc < 0) {
866 		revert_current_mid(server, mid->credits);
867 		server->sequence_number -= 2;
868 		cifs_delete_mid(mid);
869 	}
870 
871 	mutex_unlock(&server->srv_mutex);
872 
873 	if (rc == 0)
874 		return 0;
875 
876 	add_credits_and_wake_if(server, &credits, optype);
877 	return rc;
878 }
879 
880 /*
881  *
882  * Send an SMB Request.  No response info (other than return code)
883  * needs to be parsed.
884  *
885  * flags indicate the type of request buffer and how long to wait
886  * and whether to log NT STATUS code (error) before mapping it to POSIX error
887  *
888  */
889 int
SendReceiveNoRsp(const unsigned int xid,struct cifs_ses * ses,char * in_buf,int flags)890 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
891 		 char *in_buf, int flags)
892 {
893 	int rc;
894 	struct kvec iov[1];
895 	struct kvec rsp_iov;
896 	int resp_buf_type;
897 
898 	iov[0].iov_base = in_buf;
899 	iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
900 	flags |= CIFS_NO_RSP_BUF;
901 	rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
902 	cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
903 
904 	return rc;
905 }
906 
907 static int
cifs_sync_mid_result(struct mid_q_entry * mid,struct TCP_Server_Info * server)908 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
909 {
910 	int rc = 0;
911 
912 	cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
913 		 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
914 
915 	spin_lock(&GlobalMid_Lock);
916 	switch (mid->mid_state) {
917 	case MID_RESPONSE_RECEIVED:
918 		spin_unlock(&GlobalMid_Lock);
919 		return rc;
920 	case MID_RETRY_NEEDED:
921 		rc = -EAGAIN;
922 		break;
923 	case MID_RESPONSE_MALFORMED:
924 		rc = -EIO;
925 		break;
926 	case MID_SHUTDOWN:
927 		rc = -EHOSTDOWN;
928 		break;
929 	default:
930 		if (!(mid->mid_flags & MID_DELETED)) {
931 			list_del_init(&mid->qhead);
932 			mid->mid_flags |= MID_DELETED;
933 		}
934 		cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
935 			 __func__, mid->mid, mid->mid_state);
936 		rc = -EIO;
937 	}
938 	spin_unlock(&GlobalMid_Lock);
939 
940 	DeleteMidQEntry(mid);
941 	return rc;
942 }
943 
944 static inline int
send_cancel(struct TCP_Server_Info * server,struct smb_rqst * rqst,struct mid_q_entry * mid)945 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
946 	    struct mid_q_entry *mid)
947 {
948 	return server->ops->send_cancel ?
949 				server->ops->send_cancel(server, rqst, mid) : 0;
950 }
951 
952 int
cifs_check_receive(struct mid_q_entry * mid,struct TCP_Server_Info * server,bool log_error)953 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
954 		   bool log_error)
955 {
956 	unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
957 
958 	dump_smb(mid->resp_buf, min_t(u32, 92, len));
959 
960 	/* convert the length into a more usable form */
961 	if (server->sign) {
962 		struct kvec iov[2];
963 		int rc = 0;
964 		struct smb_rqst rqst = { .rq_iov = iov,
965 					 .rq_nvec = 2 };
966 
967 		iov[0].iov_base = mid->resp_buf;
968 		iov[0].iov_len = 4;
969 		iov[1].iov_base = (char *)mid->resp_buf + 4;
970 		iov[1].iov_len = len - 4;
971 		/* FIXME: add code to kill session */
972 		rc = cifs_verify_signature(&rqst, server,
973 					   mid->sequence_number);
974 		if (rc)
975 			cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
976 				 rc);
977 	}
978 
979 	/* BB special case reconnect tid and uid here? */
980 	return map_and_check_smb_error(mid, log_error);
981 }
982 
983 struct mid_q_entry *
cifs_setup_request(struct cifs_ses * ses,struct TCP_Server_Info * ignored,struct smb_rqst * rqst)984 cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
985 		   struct smb_rqst *rqst)
986 {
987 	int rc;
988 	struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
989 	struct mid_q_entry *mid;
990 
991 	if (rqst->rq_iov[0].iov_len != 4 ||
992 	    rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
993 		return ERR_PTR(-EIO);
994 
995 	rc = allocate_mid(ses, hdr, &mid);
996 	if (rc)
997 		return ERR_PTR(rc);
998 	rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
999 	if (rc) {
1000 		cifs_delete_mid(mid);
1001 		return ERR_PTR(rc);
1002 	}
1003 	return mid;
1004 }
1005 
1006 static void
cifs_compound_callback(struct mid_q_entry * mid)1007 cifs_compound_callback(struct mid_q_entry *mid)
1008 {
1009 	struct TCP_Server_Info *server = mid->server;
1010 	struct cifs_credits credits;
1011 
1012 	credits.value = server->ops->get_credits(mid);
1013 	credits.instance = server->reconnect_instance;
1014 
1015 	add_credits(server, &credits, mid->optype);
1016 }
1017 
1018 static void
cifs_compound_last_callback(struct mid_q_entry * mid)1019 cifs_compound_last_callback(struct mid_q_entry *mid)
1020 {
1021 	cifs_compound_callback(mid);
1022 	cifs_wake_up_task(mid);
1023 }
1024 
1025 static void
cifs_cancelled_callback(struct mid_q_entry * mid)1026 cifs_cancelled_callback(struct mid_q_entry *mid)
1027 {
1028 	cifs_compound_callback(mid);
1029 	DeleteMidQEntry(mid);
1030 }
1031 
1032 /*
1033  * Return a channel (master if none) of @ses that can be used to send
1034  * regular requests.
1035  *
1036  * If we are currently binding a new channel (negprot/sess.setup),
1037  * return the new incomplete channel.
1038  */
cifs_pick_channel(struct cifs_ses * ses)1039 struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1040 {
1041 	uint index = 0;
1042 
1043 	if (!ses)
1044 		return NULL;
1045 
1046 	spin_lock(&ses->chan_lock);
1047 	if (!ses->binding) {
1048 		/* round robin */
1049 		if (ses->chan_count > 1) {
1050 			index = (uint)atomic_inc_return(&ses->chan_seq);
1051 			index %= ses->chan_count;
1052 		}
1053 		spin_unlock(&ses->chan_lock);
1054 		return ses->chans[index].server;
1055 	} else {
1056 		spin_unlock(&ses->chan_lock);
1057 		return cifs_ses_server(ses);
1058 	}
1059 }
1060 
1061 int
compound_send_recv(const unsigned int xid,struct cifs_ses * ses,struct TCP_Server_Info * server,const int flags,const int num_rqst,struct smb_rqst * rqst,int * resp_buf_type,struct kvec * resp_iov)1062 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1063 		   struct TCP_Server_Info *server,
1064 		   const int flags, const int num_rqst, struct smb_rqst *rqst,
1065 		   int *resp_buf_type, struct kvec *resp_iov)
1066 {
1067 	int i, j, optype, rc = 0;
1068 	struct mid_q_entry *midQ[MAX_COMPOUND];
1069 	bool cancelled_mid[MAX_COMPOUND] = {false};
1070 	struct cifs_credits credits[MAX_COMPOUND] = {
1071 		{ .value = 0, .instance = 0 }
1072 	};
1073 	unsigned int instance;
1074 	char *buf;
1075 
1076 	optype = flags & CIFS_OP_MASK;
1077 
1078 	for (i = 0; i < num_rqst; i++)
1079 		resp_buf_type[i] = CIFS_NO_BUFFER;  /* no response buf yet */
1080 
1081 	if (!ses || !ses->server || !server) {
1082 		cifs_dbg(VFS, "Null session\n");
1083 		return -EIO;
1084 	}
1085 
1086 	if (server->tcpStatus == CifsExiting)
1087 		return -ENOENT;
1088 
1089 	/*
1090 	 * Wait for all the requests to become available.
1091 	 * This approach still leaves the possibility to be stuck waiting for
1092 	 * credits if the server doesn't grant credits to the outstanding
1093 	 * requests and if the client is completely idle, not generating any
1094 	 * other requests.
1095 	 * This can be handled by the eventual session reconnect.
1096 	 */
1097 	rc = wait_for_compound_request(server, num_rqst, flags,
1098 				       &instance);
1099 	if (rc)
1100 		return rc;
1101 
1102 	for (i = 0; i < num_rqst; i++) {
1103 		credits[i].value = 1;
1104 		credits[i].instance = instance;
1105 	}
1106 
1107 	/*
1108 	 * Make sure that we sign in the same order that we send on this socket
1109 	 * and avoid races inside tcp sendmsg code that could cause corruption
1110 	 * of smb data.
1111 	 */
1112 
1113 	mutex_lock(&server->srv_mutex);
1114 
1115 	/*
1116 	 * All the parts of the compound chain belong obtained credits from the
1117 	 * same session. We can not use credits obtained from the previous
1118 	 * session to send this request. Check if there were reconnects after
1119 	 * we obtained credits and return -EAGAIN in such cases to let callers
1120 	 * handle it.
1121 	 */
1122 	if (instance != server->reconnect_instance) {
1123 		mutex_unlock(&server->srv_mutex);
1124 		for (j = 0; j < num_rqst; j++)
1125 			add_credits(server, &credits[j], optype);
1126 		return -EAGAIN;
1127 	}
1128 
1129 	for (i = 0; i < num_rqst; i++) {
1130 		midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
1131 		if (IS_ERR(midQ[i])) {
1132 			revert_current_mid(server, i);
1133 			for (j = 0; j < i; j++)
1134 				cifs_delete_mid(midQ[j]);
1135 			mutex_unlock(&server->srv_mutex);
1136 
1137 			/* Update # of requests on wire to server */
1138 			for (j = 0; j < num_rqst; j++)
1139 				add_credits(server, &credits[j], optype);
1140 			return PTR_ERR(midQ[i]);
1141 		}
1142 
1143 		midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1144 		midQ[i]->optype = optype;
1145 		/*
1146 		 * Invoke callback for every part of the compound chain
1147 		 * to calculate credits properly. Wake up this thread only when
1148 		 * the last element is received.
1149 		 */
1150 		if (i < num_rqst - 1)
1151 			midQ[i]->callback = cifs_compound_callback;
1152 		else
1153 			midQ[i]->callback = cifs_compound_last_callback;
1154 	}
1155 	rc = smb_send_rqst(server, num_rqst, rqst, flags);
1156 
1157 	for (i = 0; i < num_rqst; i++)
1158 		cifs_save_when_sent(midQ[i]);
1159 
1160 	if (rc < 0) {
1161 		revert_current_mid(server, num_rqst);
1162 		server->sequence_number -= 2;
1163 	}
1164 
1165 	mutex_unlock(&server->srv_mutex);
1166 
1167 	/*
1168 	 * If sending failed for some reason or it is an oplock break that we
1169 	 * will not receive a response to - return credits back
1170 	 */
1171 	if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1172 		for (i = 0; i < num_rqst; i++)
1173 			add_credits(server, &credits[i], optype);
1174 		goto out;
1175 	}
1176 
1177 	/*
1178 	 * At this point the request is passed to the network stack - we assume
1179 	 * that any credits taken from the server structure on the client have
1180 	 * been spent and we can't return them back. Once we receive responses
1181 	 * we will collect credits granted by the server in the mid callbacks
1182 	 * and add those credits to the server structure.
1183 	 */
1184 
1185 	/*
1186 	 * Compounding is never used during session establish.
1187 	 */
1188 	if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1189 		mutex_lock(&server->srv_mutex);
1190 		smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1191 					   rqst[0].rq_nvec);
1192 		mutex_unlock(&server->srv_mutex);
1193 	}
1194 
1195 	for (i = 0; i < num_rqst; i++) {
1196 		rc = wait_for_response(server, midQ[i]);
1197 		if (rc != 0)
1198 			break;
1199 	}
1200 	if (rc != 0) {
1201 		for (; i < num_rqst; i++) {
1202 			cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
1203 				 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1204 			send_cancel(server, &rqst[i], midQ[i]);
1205 			spin_lock(&GlobalMid_Lock);
1206 			midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1207 			if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1208 				midQ[i]->callback = cifs_cancelled_callback;
1209 				cancelled_mid[i] = true;
1210 				credits[i].value = 0;
1211 			}
1212 			spin_unlock(&GlobalMid_Lock);
1213 		}
1214 	}
1215 
1216 	for (i = 0; i < num_rqst; i++) {
1217 		if (rc < 0)
1218 			goto out;
1219 
1220 		rc = cifs_sync_mid_result(midQ[i], server);
1221 		if (rc != 0) {
1222 			/* mark this mid as cancelled to not free it below */
1223 			cancelled_mid[i] = true;
1224 			goto out;
1225 		}
1226 
1227 		if (!midQ[i]->resp_buf ||
1228 		    midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1229 			rc = -EIO;
1230 			cifs_dbg(FYI, "Bad MID state?\n");
1231 			goto out;
1232 		}
1233 
1234 		buf = (char *)midQ[i]->resp_buf;
1235 		resp_iov[i].iov_base = buf;
1236 		resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1237 			server->vals->header_preamble_size;
1238 
1239 		if (midQ[i]->large_buf)
1240 			resp_buf_type[i] = CIFS_LARGE_BUFFER;
1241 		else
1242 			resp_buf_type[i] = CIFS_SMALL_BUFFER;
1243 
1244 		rc = server->ops->check_receive(midQ[i], server,
1245 						     flags & CIFS_LOG_ERROR);
1246 
1247 		/* mark it so buf will not be freed by cifs_delete_mid */
1248 		if ((flags & CIFS_NO_RSP_BUF) == 0)
1249 			midQ[i]->resp_buf = NULL;
1250 
1251 	}
1252 
1253 	/*
1254 	 * Compounding is never used during session establish.
1255 	 */
1256 	if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
1257 		struct kvec iov = {
1258 			.iov_base = resp_iov[0].iov_base,
1259 			.iov_len = resp_iov[0].iov_len
1260 		};
1261 		mutex_lock(&server->srv_mutex);
1262 		smb311_update_preauth_hash(ses, &iov, 1);
1263 		mutex_unlock(&server->srv_mutex);
1264 	}
1265 
1266 out:
1267 	/*
1268 	 * This will dequeue all mids. After this it is important that the
1269 	 * demultiplex_thread will not process any of these mids any futher.
1270 	 * This is prevented above by using a noop callback that will not
1271 	 * wake this thread except for the very last PDU.
1272 	 */
1273 	for (i = 0; i < num_rqst; i++) {
1274 		if (!cancelled_mid[i])
1275 			cifs_delete_mid(midQ[i]);
1276 	}
1277 
1278 	return rc;
1279 }
1280 
1281 int
cifs_send_recv(const unsigned int xid,struct cifs_ses * ses,struct TCP_Server_Info * server,struct smb_rqst * rqst,int * resp_buf_type,const int flags,struct kvec * resp_iov)1282 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1283 	       struct TCP_Server_Info *server,
1284 	       struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1285 	       struct kvec *resp_iov)
1286 {
1287 	return compound_send_recv(xid, ses, server, flags, 1,
1288 				  rqst, resp_buf_type, resp_iov);
1289 }
1290 
1291 int
SendReceive2(const unsigned int xid,struct cifs_ses * ses,struct kvec * iov,int n_vec,int * resp_buf_type,const int flags,struct kvec * resp_iov)1292 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1293 	     struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1294 	     const int flags, struct kvec *resp_iov)
1295 {
1296 	struct smb_rqst rqst;
1297 	struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1298 	int rc;
1299 
1300 	if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1301 		new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1302 					GFP_KERNEL);
1303 		if (!new_iov) {
1304 			/* otherwise cifs_send_recv below sets resp_buf_type */
1305 			*resp_buf_type = CIFS_NO_BUFFER;
1306 			return -ENOMEM;
1307 		}
1308 	} else
1309 		new_iov = s_iov;
1310 
1311 	/* 1st iov is a RFC1001 length followed by the rest of the packet */
1312 	memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1313 
1314 	new_iov[0].iov_base = new_iov[1].iov_base;
1315 	new_iov[0].iov_len = 4;
1316 	new_iov[1].iov_base += 4;
1317 	new_iov[1].iov_len -= 4;
1318 
1319 	memset(&rqst, 0, sizeof(struct smb_rqst));
1320 	rqst.rq_iov = new_iov;
1321 	rqst.rq_nvec = n_vec + 1;
1322 
1323 	rc = cifs_send_recv(xid, ses, ses->server,
1324 			    &rqst, resp_buf_type, flags, resp_iov);
1325 	if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1326 		kfree(new_iov);
1327 	return rc;
1328 }
1329 
1330 int
SendReceive(const unsigned int xid,struct cifs_ses * ses,struct smb_hdr * in_buf,struct smb_hdr * out_buf,int * pbytes_returned,const int flags)1331 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1332 	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1333 	    int *pbytes_returned, const int flags)
1334 {
1335 	int rc = 0;
1336 	struct mid_q_entry *midQ;
1337 	unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1338 	struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1339 	struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1340 	struct cifs_credits credits = { .value = 1, .instance = 0 };
1341 	struct TCP_Server_Info *server;
1342 
1343 	if (ses == NULL) {
1344 		cifs_dbg(VFS, "Null smb session\n");
1345 		return -EIO;
1346 	}
1347 	server = ses->server;
1348 	if (server == NULL) {
1349 		cifs_dbg(VFS, "Null tcp session\n");
1350 		return -EIO;
1351 	}
1352 
1353 	if (server->tcpStatus == CifsExiting)
1354 		return -ENOENT;
1355 
1356 	/* Ensure that we do not send more than 50 overlapping requests
1357 	   to the same server. We may make this configurable later or
1358 	   use ses->maxReq */
1359 
1360 	if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1361 		cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1362 				len);
1363 		return -EIO;
1364 	}
1365 
1366 	rc = wait_for_free_request(server, flags, &credits.instance);
1367 	if (rc)
1368 		return rc;
1369 
1370 	/* make sure that we sign in the same order that we send on this socket
1371 	   and avoid races inside tcp sendmsg code that could cause corruption
1372 	   of smb data */
1373 
1374 	mutex_lock(&server->srv_mutex);
1375 
1376 	rc = allocate_mid(ses, in_buf, &midQ);
1377 	if (rc) {
1378 		mutex_unlock(&server->srv_mutex);
1379 		/* Update # of requests on wire to server */
1380 		add_credits(server, &credits, 0);
1381 		return rc;
1382 	}
1383 
1384 	rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1385 	if (rc) {
1386 		mutex_unlock(&server->srv_mutex);
1387 		goto out;
1388 	}
1389 
1390 	midQ->mid_state = MID_REQUEST_SUBMITTED;
1391 
1392 	rc = smb_send(server, in_buf, len);
1393 	cifs_save_when_sent(midQ);
1394 
1395 	if (rc < 0)
1396 		server->sequence_number -= 2;
1397 
1398 	mutex_unlock(&server->srv_mutex);
1399 
1400 	if (rc < 0)
1401 		goto out;
1402 
1403 	rc = wait_for_response(server, midQ);
1404 	if (rc != 0) {
1405 		send_cancel(server, &rqst, midQ);
1406 		spin_lock(&GlobalMid_Lock);
1407 		if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1408 			/* no longer considered to be "in-flight" */
1409 			midQ->callback = DeleteMidQEntry;
1410 			spin_unlock(&GlobalMid_Lock);
1411 			add_credits(server, &credits, 0);
1412 			return rc;
1413 		}
1414 		spin_unlock(&GlobalMid_Lock);
1415 	}
1416 
1417 	rc = cifs_sync_mid_result(midQ, server);
1418 	if (rc != 0) {
1419 		add_credits(server, &credits, 0);
1420 		return rc;
1421 	}
1422 
1423 	if (!midQ->resp_buf || !out_buf ||
1424 	    midQ->mid_state != MID_RESPONSE_RECEIVED) {
1425 		rc = -EIO;
1426 		cifs_server_dbg(VFS, "Bad MID state?\n");
1427 		goto out;
1428 	}
1429 
1430 	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1431 	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1432 	rc = cifs_check_receive(midQ, server, 0);
1433 out:
1434 	cifs_delete_mid(midQ);
1435 	add_credits(server, &credits, 0);
1436 
1437 	return rc;
1438 }
1439 
1440 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1441    blocking lock to return. */
1442 
1443 static int
send_lock_cancel(const unsigned int xid,struct cifs_tcon * tcon,struct smb_hdr * in_buf,struct smb_hdr * out_buf)1444 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1445 			struct smb_hdr *in_buf,
1446 			struct smb_hdr *out_buf)
1447 {
1448 	int bytes_returned;
1449 	struct cifs_ses *ses = tcon->ses;
1450 	LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1451 
1452 	/* We just modify the current in_buf to change
1453 	   the type of lock from LOCKING_ANDX_SHARED_LOCK
1454 	   or LOCKING_ANDX_EXCLUSIVE_LOCK to
1455 	   LOCKING_ANDX_CANCEL_LOCK. */
1456 
1457 	pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1458 	pSMB->Timeout = 0;
1459 	pSMB->hdr.Mid = get_next_mid(ses->server);
1460 
1461 	return SendReceive(xid, ses, in_buf, out_buf,
1462 			&bytes_returned, 0);
1463 }
1464 
1465 int
SendReceiveBlockingLock(const unsigned int xid,struct cifs_tcon * tcon,struct smb_hdr * in_buf,struct smb_hdr * out_buf,int * pbytes_returned)1466 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1467 	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1468 	    int *pbytes_returned)
1469 {
1470 	int rc = 0;
1471 	int rstart = 0;
1472 	struct mid_q_entry *midQ;
1473 	struct cifs_ses *ses;
1474 	unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1475 	struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1476 	struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1477 	unsigned int instance;
1478 	struct TCP_Server_Info *server;
1479 
1480 	if (tcon == NULL || tcon->ses == NULL) {
1481 		cifs_dbg(VFS, "Null smb session\n");
1482 		return -EIO;
1483 	}
1484 	ses = tcon->ses;
1485 	server = ses->server;
1486 
1487 	if (server == NULL) {
1488 		cifs_dbg(VFS, "Null tcp session\n");
1489 		return -EIO;
1490 	}
1491 
1492 	if (server->tcpStatus == CifsExiting)
1493 		return -ENOENT;
1494 
1495 	/* Ensure that we do not send more than 50 overlapping requests
1496 	   to the same server. We may make this configurable later or
1497 	   use ses->maxReq */
1498 
1499 	if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1500 		cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1501 			      len);
1502 		return -EIO;
1503 	}
1504 
1505 	rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1506 	if (rc)
1507 		return rc;
1508 
1509 	/* make sure that we sign in the same order that we send on this socket
1510 	   and avoid races inside tcp sendmsg code that could cause corruption
1511 	   of smb data */
1512 
1513 	mutex_lock(&server->srv_mutex);
1514 
1515 	rc = allocate_mid(ses, in_buf, &midQ);
1516 	if (rc) {
1517 		mutex_unlock(&server->srv_mutex);
1518 		return rc;
1519 	}
1520 
1521 	rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1522 	if (rc) {
1523 		cifs_delete_mid(midQ);
1524 		mutex_unlock(&server->srv_mutex);
1525 		return rc;
1526 	}
1527 
1528 	midQ->mid_state = MID_REQUEST_SUBMITTED;
1529 	rc = smb_send(server, in_buf, len);
1530 	cifs_save_when_sent(midQ);
1531 
1532 	if (rc < 0)
1533 		server->sequence_number -= 2;
1534 
1535 	mutex_unlock(&server->srv_mutex);
1536 
1537 	if (rc < 0) {
1538 		cifs_delete_mid(midQ);
1539 		return rc;
1540 	}
1541 
1542 	/* Wait for a reply - allow signals to interrupt. */
1543 	rc = wait_event_interruptible(server->response_q,
1544 		(!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1545 		((server->tcpStatus != CifsGood) &&
1546 		 (server->tcpStatus != CifsNew)));
1547 
1548 	/* Were we interrupted by a signal ? */
1549 	if ((rc == -ERESTARTSYS) &&
1550 		(midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1551 		((server->tcpStatus == CifsGood) ||
1552 		 (server->tcpStatus == CifsNew))) {
1553 
1554 		if (in_buf->Command == SMB_COM_TRANSACTION2) {
1555 			/* POSIX lock. We send a NT_CANCEL SMB to cause the
1556 			   blocking lock to return. */
1557 			rc = send_cancel(server, &rqst, midQ);
1558 			if (rc) {
1559 				cifs_delete_mid(midQ);
1560 				return rc;
1561 			}
1562 		} else {
1563 			/* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1564 			   to cause the blocking lock to return. */
1565 
1566 			rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1567 
1568 			/* If we get -ENOLCK back the lock may have
1569 			   already been removed. Don't exit in this case. */
1570 			if (rc && rc != -ENOLCK) {
1571 				cifs_delete_mid(midQ);
1572 				return rc;
1573 			}
1574 		}
1575 
1576 		rc = wait_for_response(server, midQ);
1577 		if (rc) {
1578 			send_cancel(server, &rqst, midQ);
1579 			spin_lock(&GlobalMid_Lock);
1580 			if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1581 				/* no longer considered to be "in-flight" */
1582 				midQ->callback = DeleteMidQEntry;
1583 				spin_unlock(&GlobalMid_Lock);
1584 				return rc;
1585 			}
1586 			spin_unlock(&GlobalMid_Lock);
1587 		}
1588 
1589 		/* We got the response - restart system call. */
1590 		rstart = 1;
1591 	}
1592 
1593 	rc = cifs_sync_mid_result(midQ, server);
1594 	if (rc != 0)
1595 		return rc;
1596 
1597 	/* rcvd frame is ok */
1598 	if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1599 		rc = -EIO;
1600 		cifs_tcon_dbg(VFS, "Bad MID state?\n");
1601 		goto out;
1602 	}
1603 
1604 	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1605 	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1606 	rc = cifs_check_receive(midQ, server, 0);
1607 out:
1608 	cifs_delete_mid(midQ);
1609 	if (rstart && rc == -EACCES)
1610 		return -ERESTARTSYS;
1611 	return rc;
1612 }
1613