• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *   fs/cifs/transport.c
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *   Jeremy Allison (jra@samba.org) 2006.
7  *
8  *   This library is free software; you can redistribute it and/or modify
9  *   it under the terms of the GNU Lesser General Public License as published
10  *   by the Free Software Foundation; either version 2.1 of the License, or
11  *   (at your option) any later version.
12  *
13  *   This library is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
16  *   the GNU Lesser General Public License for more details.
17  *
18  *   You should have received a copy of the GNU Lesser General Public License
19  *   along with this library; if not, write to the Free Software
20  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21  */
22 
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/bvec.h>
32 #include <linux/highmem.h>
33 #include <linux/uaccess.h>
34 #include <asm/processor.h>
35 #include <linux/mempool.h>
36 #include <linux/sched/signal.h>
37 #include "cifspdu.h"
38 #include "cifsglob.h"
39 #include "cifsproto.h"
40 #include "cifs_debug.h"
41 #include "smb2proto.h"
42 #include "smbdirect.h"
43 
44 /* Max number of iovectors we can use off the stack when sending requests. */
45 #define CIFS_MAX_IOV_SIZE 8
46 
47 void
cifs_wake_up_task(struct mid_q_entry * mid)48 cifs_wake_up_task(struct mid_q_entry *mid)
49 {
50 	wake_up_process(mid->callback_data);
51 }
52 
53 struct mid_q_entry *
AllocMidQEntry(const struct smb_hdr * smb_buffer,struct TCP_Server_Info * server)54 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
55 {
56 	struct mid_q_entry *temp;
57 
58 	if (server == NULL) {
59 		cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
60 		return NULL;
61 	}
62 
63 	temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
64 	memset(temp, 0, sizeof(struct mid_q_entry));
65 	kref_init(&temp->refcount);
66 	temp->mid = get_mid(smb_buffer);
67 	temp->pid = current->pid;
68 	temp->command = cpu_to_le16(smb_buffer->Command);
69 	cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
70 	/*	do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
71 	/* when mid allocated can be before when sent */
72 	temp->when_alloc = jiffies;
73 	temp->server = server;
74 
75 	/*
76 	 * The default is for the mid to be synchronous, so the
77 	 * default callback just wakes up the current task.
78 	 */
79 	get_task_struct(current);
80 	temp->creator = current;
81 	temp->callback = cifs_wake_up_task;
82 	temp->callback_data = current;
83 
84 	atomic_inc(&midCount);
85 	temp->mid_state = MID_REQUEST_ALLOCATED;
86 	return temp;
87 }
88 
_cifs_mid_q_entry_release(struct kref * refcount)89 static void _cifs_mid_q_entry_release(struct kref *refcount)
90 {
91 	struct mid_q_entry *midEntry =
92 			container_of(refcount, struct mid_q_entry, refcount);
93 #ifdef CONFIG_CIFS_STATS2
94 	__le16 command = midEntry->server->vals->lock_cmd;
95 	__u16 smb_cmd = le16_to_cpu(midEntry->command);
96 	unsigned long now;
97 	unsigned long roundtrip_time;
98 #endif
99 	struct TCP_Server_Info *server = midEntry->server;
100 
101 	if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
102 	    midEntry->mid_state == MID_RESPONSE_RECEIVED &&
103 	    server->ops->handle_cancelled_mid)
104 		server->ops->handle_cancelled_mid(midEntry, server);
105 
106 	midEntry->mid_state = MID_FREE;
107 	atomic_dec(&midCount);
108 	if (midEntry->large_buf)
109 		cifs_buf_release(midEntry->resp_buf);
110 	else
111 		cifs_small_buf_release(midEntry->resp_buf);
112 #ifdef CONFIG_CIFS_STATS2
113 	now = jiffies;
114 	if (now < midEntry->when_alloc)
115 		cifs_server_dbg(VFS, "Invalid mid allocation time\n");
116 	roundtrip_time = now - midEntry->when_alloc;
117 
118 	if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
119 		if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
120 			server->slowest_cmd[smb_cmd] = roundtrip_time;
121 			server->fastest_cmd[smb_cmd] = roundtrip_time;
122 		} else {
123 			if (server->slowest_cmd[smb_cmd] < roundtrip_time)
124 				server->slowest_cmd[smb_cmd] = roundtrip_time;
125 			else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
126 				server->fastest_cmd[smb_cmd] = roundtrip_time;
127 		}
128 		cifs_stats_inc(&server->num_cmds[smb_cmd]);
129 		server->time_per_cmd[smb_cmd] += roundtrip_time;
130 	}
131 	/*
132 	 * commands taking longer than one second (default) can be indications
133 	 * that something is wrong, unless it is quite a slow link or a very
134 	 * busy server. Note that this calc is unlikely or impossible to wrap
135 	 * as long as slow_rsp_threshold is not set way above recommended max
136 	 * value (32767 ie 9 hours) and is generally harmless even if wrong
137 	 * since only affects debug counters - so leaving the calc as simple
138 	 * comparison rather than doing multiple conversions and overflow
139 	 * checks
140 	 */
141 	if ((slow_rsp_threshold != 0) &&
142 	    time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
143 	    (midEntry->command != command)) {
144 		/*
145 		 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
146 		 * NB: le16_to_cpu returns unsigned so can not be negative below
147 		 */
148 		if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
149 			cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
150 
151 		trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
152 			       midEntry->when_sent, midEntry->when_received);
153 		if (cifsFYI & CIFS_TIMER) {
154 			pr_debug("slow rsp: cmd %d mid %llu",
155 				 midEntry->command, midEntry->mid);
156 			cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
157 				  now - midEntry->when_alloc,
158 				  now - midEntry->when_sent,
159 				  now - midEntry->when_received);
160 		}
161 	}
162 #endif
163 	put_task_struct(midEntry->creator);
164 
165 	mempool_free(midEntry, cifs_mid_poolp);
166 }
167 
cifs_mid_q_entry_release(struct mid_q_entry * midEntry)168 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
169 {
170 	spin_lock(&GlobalMid_Lock);
171 	kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
172 	spin_unlock(&GlobalMid_Lock);
173 }
174 
DeleteMidQEntry(struct mid_q_entry * midEntry)175 void DeleteMidQEntry(struct mid_q_entry *midEntry)
176 {
177 	cifs_mid_q_entry_release(midEntry);
178 }
179 
180 void
cifs_delete_mid(struct mid_q_entry * mid)181 cifs_delete_mid(struct mid_q_entry *mid)
182 {
183 	spin_lock(&GlobalMid_Lock);
184 	if (!(mid->mid_flags & MID_DELETED)) {
185 		list_del_init(&mid->qhead);
186 		mid->mid_flags |= MID_DELETED;
187 	}
188 	spin_unlock(&GlobalMid_Lock);
189 
190 	DeleteMidQEntry(mid);
191 }
192 
193 /*
194  * smb_send_kvec - send an array of kvecs to the server
195  * @server:	Server to send the data to
196  * @smb_msg:	Message to send
197  * @sent:	amount of data sent on socket is stored here
198  *
199  * Our basic "send data to server" function. Should be called with srv_mutex
200  * held. The caller is responsible for handling the results.
201  */
202 static int
smb_send_kvec(struct TCP_Server_Info * server,struct msghdr * smb_msg,size_t * sent)203 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
204 	      size_t *sent)
205 {
206 	int rc = 0;
207 	int retries = 0;
208 	struct socket *ssocket = server->ssocket;
209 
210 	*sent = 0;
211 
212 	smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
213 	smb_msg->msg_namelen = sizeof(struct sockaddr);
214 	smb_msg->msg_control = NULL;
215 	smb_msg->msg_controllen = 0;
216 	if (server->noblocksnd)
217 		smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
218 	else
219 		smb_msg->msg_flags = MSG_NOSIGNAL;
220 
221 	while (msg_data_left(smb_msg)) {
222 		/*
223 		 * If blocking send, we try 3 times, since each can block
224 		 * for 5 seconds. For nonblocking  we have to try more
225 		 * but wait increasing amounts of time allowing time for
226 		 * socket to clear.  The overall time we wait in either
227 		 * case to send on the socket is about 15 seconds.
228 		 * Similarly we wait for 15 seconds for a response from
229 		 * the server in SendReceive[2] for the server to send
230 		 * a response back for most types of requests (except
231 		 * SMB Write past end of file which can be slow, and
232 		 * blocking lock operations). NFS waits slightly longer
233 		 * than CIFS, but this can make it take longer for
234 		 * nonresponsive servers to be detected and 15 seconds
235 		 * is more than enough time for modern networks to
236 		 * send a packet.  In most cases if we fail to send
237 		 * after the retries we will kill the socket and
238 		 * reconnect which may clear the network problem.
239 		 */
240 		rc = sock_sendmsg(ssocket, smb_msg);
241 		if (rc == -EAGAIN) {
242 			retries++;
243 			if (retries >= 14 ||
244 			    (!server->noblocksnd && (retries > 2))) {
245 				cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
246 					 ssocket);
247 				return -EAGAIN;
248 			}
249 			msleep(1 << retries);
250 			continue;
251 		}
252 
253 		if (rc < 0)
254 			return rc;
255 
256 		if (rc == 0) {
257 			/* should never happen, letting socket clear before
258 			   retrying is our only obvious option here */
259 			cifs_server_dbg(VFS, "tcp sent no data\n");
260 			msleep(500);
261 			continue;
262 		}
263 
264 		/* send was at least partially successful */
265 		*sent += rc;
266 		retries = 0; /* in case we get ENOSPC on the next send */
267 	}
268 	return 0;
269 }
270 
271 unsigned long
smb_rqst_len(struct TCP_Server_Info * server,struct smb_rqst * rqst)272 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
273 {
274 	unsigned int i;
275 	struct kvec *iov;
276 	int nvec;
277 	unsigned long buflen = 0;
278 
279 	if (server->vals->header_preamble_size == 0 &&
280 	    rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
281 		iov = &rqst->rq_iov[1];
282 		nvec = rqst->rq_nvec - 1;
283 	} else {
284 		iov = rqst->rq_iov;
285 		nvec = rqst->rq_nvec;
286 	}
287 
288 	/* total up iov array first */
289 	for (i = 0; i < nvec; i++)
290 		buflen += iov[i].iov_len;
291 
292 	/*
293 	 * Add in the page array if there is one. The caller needs to make
294 	 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
295 	 * multiple pages ends at page boundary, rq_tailsz needs to be set to
296 	 * PAGE_SIZE.
297 	 */
298 	if (rqst->rq_npages) {
299 		if (rqst->rq_npages == 1)
300 			buflen += rqst->rq_tailsz;
301 		else {
302 			/*
303 			 * If there is more than one page, calculate the
304 			 * buffer length based on rq_offset and rq_tailsz
305 			 */
306 			buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
307 					rqst->rq_offset;
308 			buflen += rqst->rq_tailsz;
309 		}
310 	}
311 
312 	return buflen;
313 }
314 
315 static int
__smb_send_rqst(struct TCP_Server_Info * server,int num_rqst,struct smb_rqst * rqst)316 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
317 		struct smb_rqst *rqst)
318 {
319 	int rc = 0;
320 	struct kvec *iov;
321 	int n_vec;
322 	unsigned int send_length = 0;
323 	unsigned int i, j;
324 	sigset_t mask, oldmask;
325 	size_t total_len = 0, sent, size;
326 	struct socket *ssocket = server->ssocket;
327 	struct msghdr smb_msg;
328 	__be32 rfc1002_marker;
329 
330 	if (cifs_rdma_enabled(server)) {
331 		/* return -EAGAIN when connecting or reconnecting */
332 		rc = -EAGAIN;
333 		if (server->smbd_conn)
334 			rc = smbd_send(server, num_rqst, rqst);
335 		goto smbd_done;
336 	}
337 
338 	if (ssocket == NULL)
339 		return -EAGAIN;
340 
341 	if (fatal_signal_pending(current)) {
342 		cifs_dbg(FYI, "signal pending before send request\n");
343 		return -ERESTARTSYS;
344 	}
345 
346 	/* cork the socket */
347 	tcp_sock_set_cork(ssocket->sk, true);
348 
349 	for (j = 0; j < num_rqst; j++)
350 		send_length += smb_rqst_len(server, &rqst[j]);
351 	rfc1002_marker = cpu_to_be32(send_length);
352 
353 	/*
354 	 * We should not allow signals to interrupt the network send because
355 	 * any partial send will cause session reconnects thus increasing
356 	 * latency of system calls and overload a server with unnecessary
357 	 * requests.
358 	 */
359 
360 	sigfillset(&mask);
361 	sigprocmask(SIG_BLOCK, &mask, &oldmask);
362 
363 	/* Generate a rfc1002 marker for SMB2+ */
364 	if (server->vals->header_preamble_size == 0) {
365 		struct kvec hiov = {
366 			.iov_base = &rfc1002_marker,
367 			.iov_len  = 4
368 		};
369 		iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
370 		rc = smb_send_kvec(server, &smb_msg, &sent);
371 		if (rc < 0)
372 			goto unmask;
373 
374 		total_len += sent;
375 		send_length += 4;
376 	}
377 
378 	cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
379 
380 	for (j = 0; j < num_rqst; j++) {
381 		iov = rqst[j].rq_iov;
382 		n_vec = rqst[j].rq_nvec;
383 
384 		size = 0;
385 		for (i = 0; i < n_vec; i++) {
386 			dump_smb(iov[i].iov_base, iov[i].iov_len);
387 			size += iov[i].iov_len;
388 		}
389 
390 		iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
391 
392 		rc = smb_send_kvec(server, &smb_msg, &sent);
393 		if (rc < 0)
394 			goto unmask;
395 
396 		total_len += sent;
397 
398 		/* now walk the page array and send each page in it */
399 		for (i = 0; i < rqst[j].rq_npages; i++) {
400 			struct bio_vec bvec;
401 
402 			bvec.bv_page = rqst[j].rq_pages[i];
403 			rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
404 					     &bvec.bv_offset);
405 
406 			iov_iter_bvec(&smb_msg.msg_iter, WRITE,
407 				      &bvec, 1, bvec.bv_len);
408 			rc = smb_send_kvec(server, &smb_msg, &sent);
409 			if (rc < 0)
410 				break;
411 
412 			total_len += sent;
413 		}
414 	}
415 
416 unmask:
417 	sigprocmask(SIG_SETMASK, &oldmask, NULL);
418 
419 	/*
420 	 * If signal is pending but we have already sent the whole packet to
421 	 * the server we need to return success status to allow a corresponding
422 	 * mid entry to be kept in the pending requests queue thus allowing
423 	 * to handle responses from the server by the client.
424 	 *
425 	 * If only part of the packet has been sent there is no need to hide
426 	 * interrupt because the session will be reconnected anyway, so there
427 	 * won't be any response from the server to handle.
428 	 */
429 
430 	if (signal_pending(current) && (total_len != send_length)) {
431 		cifs_dbg(FYI, "signal is pending after attempt to send\n");
432 		rc = -ERESTARTSYS;
433 	}
434 
435 	/* uncork it */
436 	tcp_sock_set_cork(ssocket->sk, false);
437 
438 	if ((total_len > 0) && (total_len != send_length)) {
439 		cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
440 			 send_length, total_len);
441 		/*
442 		 * If we have only sent part of an SMB then the next SMB could
443 		 * be taken as the remainder of this one. We need to kill the
444 		 * socket so the server throws away the partial SMB
445 		 */
446 		server->tcpStatus = CifsNeedReconnect;
447 		trace_smb3_partial_send_reconnect(server->CurrentMid,
448 						  server->hostname);
449 	}
450 smbd_done:
451 	if (rc < 0 && rc != -EINTR)
452 		cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
453 			 rc);
454 	else if (rc > 0)
455 		rc = 0;
456 
457 	return rc;
458 }
459 
460 static int
smb_send_rqst(struct TCP_Server_Info * server,int num_rqst,struct smb_rqst * rqst,int flags)461 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
462 	      struct smb_rqst *rqst, int flags)
463 {
464 	struct kvec iov;
465 	struct smb2_transform_hdr *tr_hdr;
466 	struct smb_rqst cur_rqst[MAX_COMPOUND];
467 	int rc;
468 
469 	if (!(flags & CIFS_TRANSFORM_REQ))
470 		return __smb_send_rqst(server, num_rqst, rqst);
471 
472 	if (num_rqst > MAX_COMPOUND - 1)
473 		return -ENOMEM;
474 
475 	if (!server->ops->init_transform_rq) {
476 		cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
477 		return -EIO;
478 	}
479 
480 	tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS);
481 	if (!tr_hdr)
482 		return -ENOMEM;
483 
484 	memset(&cur_rqst[0], 0, sizeof(cur_rqst));
485 	memset(&iov, 0, sizeof(iov));
486 	memset(tr_hdr, 0, sizeof(*tr_hdr));
487 
488 	iov.iov_base = tr_hdr;
489 	iov.iov_len = sizeof(*tr_hdr);
490 	cur_rqst[0].rq_iov = &iov;
491 	cur_rqst[0].rq_nvec = 1;
492 
493 	rc = server->ops->init_transform_rq(server, num_rqst + 1,
494 					    &cur_rqst[0], rqst);
495 	if (rc)
496 		goto out;
497 
498 	rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
499 	smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
500 out:
501 	kfree(tr_hdr);
502 	return rc;
503 }
504 
505 int
smb_send(struct TCP_Server_Info * server,struct smb_hdr * smb_buffer,unsigned int smb_buf_length)506 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
507 	 unsigned int smb_buf_length)
508 {
509 	struct kvec iov[2];
510 	struct smb_rqst rqst = { .rq_iov = iov,
511 				 .rq_nvec = 2 };
512 
513 	iov[0].iov_base = smb_buffer;
514 	iov[0].iov_len = 4;
515 	iov[1].iov_base = (char *)smb_buffer + 4;
516 	iov[1].iov_len = smb_buf_length;
517 
518 	return __smb_send_rqst(server, 1, &rqst);
519 }
520 
521 static int
wait_for_free_credits(struct TCP_Server_Info * server,const int num_credits,const int timeout,const int flags,unsigned int * instance)522 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
523 		      const int timeout, const int flags,
524 		      unsigned int *instance)
525 {
526 	long rc;
527 	int *credits;
528 	int optype;
529 	long int t;
530 
531 	if (timeout < 0)
532 		t = MAX_JIFFY_OFFSET;
533 	else
534 		t = msecs_to_jiffies(timeout);
535 
536 	optype = flags & CIFS_OP_MASK;
537 
538 	*instance = 0;
539 
540 	credits = server->ops->get_credits_field(server, optype);
541 	/* Since an echo is already inflight, no need to wait to send another */
542 	if (*credits <= 0 && optype == CIFS_ECHO_OP)
543 		return -EAGAIN;
544 
545 	spin_lock(&server->req_lock);
546 	if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
547 		/* oplock breaks must not be held up */
548 		server->in_flight++;
549 		if (server->in_flight > server->max_in_flight)
550 			server->max_in_flight = server->in_flight;
551 		*credits -= 1;
552 		*instance = server->reconnect_instance;
553 		spin_unlock(&server->req_lock);
554 		return 0;
555 	}
556 
557 	while (1) {
558 		if (*credits < num_credits) {
559 			spin_unlock(&server->req_lock);
560 			cifs_num_waiters_inc(server);
561 			rc = wait_event_killable_timeout(server->request_q,
562 				has_credits(server, credits, num_credits), t);
563 			cifs_num_waiters_dec(server);
564 			if (!rc) {
565 				trace_smb3_credit_timeout(server->CurrentMid,
566 					server->hostname, num_credits, 0);
567 				cifs_server_dbg(VFS, "wait timed out after %d ms\n",
568 					 timeout);
569 				return -ENOTSUPP;
570 			}
571 			if (rc == -ERESTARTSYS)
572 				return -ERESTARTSYS;
573 			spin_lock(&server->req_lock);
574 		} else {
575 			if (server->tcpStatus == CifsExiting) {
576 				spin_unlock(&server->req_lock);
577 				return -ENOENT;
578 			}
579 
580 			/*
581 			 * For normal commands, reserve the last MAX_COMPOUND
582 			 * credits to compound requests.
583 			 * Otherwise these compounds could be permanently
584 			 * starved for credits by single-credit requests.
585 			 *
586 			 * To prevent spinning CPU, block this thread until
587 			 * there are >MAX_COMPOUND credits available.
588 			 * But only do this is we already have a lot of
589 			 * credits in flight to avoid triggering this check
590 			 * for servers that are slow to hand out credits on
591 			 * new sessions.
592 			 */
593 			if (!optype && num_credits == 1 &&
594 			    server->in_flight > 2 * MAX_COMPOUND &&
595 			    *credits <= MAX_COMPOUND) {
596 				spin_unlock(&server->req_lock);
597 				cifs_num_waiters_inc(server);
598 				rc = wait_event_killable_timeout(
599 					server->request_q,
600 					has_credits(server, credits,
601 						    MAX_COMPOUND + 1),
602 					t);
603 				cifs_num_waiters_dec(server);
604 				if (!rc) {
605 					trace_smb3_credit_timeout(
606 						server->CurrentMid,
607 						server->hostname, num_credits,
608 						0);
609 					cifs_server_dbg(VFS, "wait timed out after %d ms\n",
610 						 timeout);
611 					return -ENOTSUPP;
612 				}
613 				if (rc == -ERESTARTSYS)
614 					return -ERESTARTSYS;
615 				spin_lock(&server->req_lock);
616 				continue;
617 			}
618 
619 			/*
620 			 * Can not count locking commands against total
621 			 * as they are allowed to block on server.
622 			 */
623 
624 			/* update # of requests on the wire to server */
625 			if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
626 				*credits -= num_credits;
627 				server->in_flight += num_credits;
628 				if (server->in_flight > server->max_in_flight)
629 					server->max_in_flight = server->in_flight;
630 				*instance = server->reconnect_instance;
631 			}
632 			spin_unlock(&server->req_lock);
633 			break;
634 		}
635 	}
636 	return 0;
637 }
638 
639 static int
wait_for_free_request(struct TCP_Server_Info * server,const int flags,unsigned int * instance)640 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
641 		      unsigned int *instance)
642 {
643 	return wait_for_free_credits(server, 1, -1, flags,
644 				     instance);
645 }
646 
647 static int
wait_for_compound_request(struct TCP_Server_Info * server,int num,const int flags,unsigned int * instance)648 wait_for_compound_request(struct TCP_Server_Info *server, int num,
649 			  const int flags, unsigned int *instance)
650 {
651 	int *credits;
652 
653 	credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
654 
655 	spin_lock(&server->req_lock);
656 	if (*credits < num) {
657 		/*
658 		 * If the server is tight on resources or just gives us less
659 		 * credits for other reasons (e.g. requests are coming out of
660 		 * order and the server delays granting more credits until it
661 		 * processes a missing mid) and we exhausted most available
662 		 * credits there may be situations when we try to send
663 		 * a compound request but we don't have enough credits. At this
664 		 * point the client needs to decide if it should wait for
665 		 * additional credits or fail the request. If at least one
666 		 * request is in flight there is a high probability that the
667 		 * server will return enough credits to satisfy this compound
668 		 * request.
669 		 *
670 		 * Return immediately if no requests in flight since we will be
671 		 * stuck on waiting for credits.
672 		 */
673 		if (server->in_flight == 0) {
674 			spin_unlock(&server->req_lock);
675 			return -ENOTSUPP;
676 		}
677 	}
678 	spin_unlock(&server->req_lock);
679 
680 	return wait_for_free_credits(server, num, 60000, flags,
681 				     instance);
682 }
683 
684 int
cifs_wait_mtu_credits(struct TCP_Server_Info * server,unsigned int size,unsigned int * num,struct cifs_credits * credits)685 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
686 		      unsigned int *num, struct cifs_credits *credits)
687 {
688 	*num = size;
689 	credits->value = 0;
690 	credits->instance = server->reconnect_instance;
691 	return 0;
692 }
693 
allocate_mid(struct cifs_ses * ses,struct smb_hdr * in_buf,struct mid_q_entry ** ppmidQ)694 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
695 			struct mid_q_entry **ppmidQ)
696 {
697 	if (ses->server->tcpStatus == CifsExiting) {
698 		return -ENOENT;
699 	}
700 
701 	if (ses->server->tcpStatus == CifsNeedReconnect) {
702 		cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
703 		return -EAGAIN;
704 	}
705 
706 	if (ses->status == CifsNew) {
707 		if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
708 			(in_buf->Command != SMB_COM_NEGOTIATE))
709 			return -EAGAIN;
710 		/* else ok - we are setting up session */
711 	}
712 
713 	if (ses->status == CifsExiting) {
714 		/* check if SMB session is bad because we are setting it up */
715 		if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
716 			return -EAGAIN;
717 		/* else ok - we are shutting down session */
718 	}
719 
720 	*ppmidQ = AllocMidQEntry(in_buf, ses->server);
721 	if (*ppmidQ == NULL)
722 		return -ENOMEM;
723 	spin_lock(&GlobalMid_Lock);
724 	list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
725 	spin_unlock(&GlobalMid_Lock);
726 	return 0;
727 }
728 
729 static int
wait_for_response(struct TCP_Server_Info * server,struct mid_q_entry * midQ)730 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
731 {
732 	int error;
733 
734 	error = wait_event_freezekillable_unsafe(server->response_q,
735 				    midQ->mid_state != MID_REQUEST_SUBMITTED);
736 	if (error < 0)
737 		return -ERESTARTSYS;
738 
739 	return 0;
740 }
741 
742 struct mid_q_entry *
cifs_setup_async_request(struct TCP_Server_Info * server,struct smb_rqst * rqst)743 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
744 {
745 	int rc;
746 	struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
747 	struct mid_q_entry *mid;
748 
749 	if (rqst->rq_iov[0].iov_len != 4 ||
750 	    rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
751 		return ERR_PTR(-EIO);
752 
753 	/* enable signing if server requires it */
754 	if (server->sign)
755 		hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
756 
757 	mid = AllocMidQEntry(hdr, server);
758 	if (mid == NULL)
759 		return ERR_PTR(-ENOMEM);
760 
761 	rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
762 	if (rc) {
763 		DeleteMidQEntry(mid);
764 		return ERR_PTR(rc);
765 	}
766 
767 	return mid;
768 }
769 
770 /*
771  * Send a SMB request and set the callback function in the mid to handle
772  * the result. Caller is responsible for dealing with timeouts.
773  */
774 int
cifs_call_async(struct TCP_Server_Info * server,struct smb_rqst * rqst,mid_receive_t * receive,mid_callback_t * callback,mid_handle_t * handle,void * cbdata,const int flags,const struct cifs_credits * exist_credits)775 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
776 		mid_receive_t *receive, mid_callback_t *callback,
777 		mid_handle_t *handle, void *cbdata, const int flags,
778 		const struct cifs_credits *exist_credits)
779 {
780 	int rc;
781 	struct mid_q_entry *mid;
782 	struct cifs_credits credits = { .value = 0, .instance = 0 };
783 	unsigned int instance;
784 	int optype;
785 
786 	optype = flags & CIFS_OP_MASK;
787 
788 	if ((flags & CIFS_HAS_CREDITS) == 0) {
789 		rc = wait_for_free_request(server, flags, &instance);
790 		if (rc)
791 			return rc;
792 		credits.value = 1;
793 		credits.instance = instance;
794 	} else
795 		instance = exist_credits->instance;
796 
797 	mutex_lock(&server->srv_mutex);
798 
799 	/*
800 	 * We can't use credits obtained from the previous session to send this
801 	 * request. Check if there were reconnects after we obtained credits and
802 	 * return -EAGAIN in such cases to let callers handle it.
803 	 */
804 	if (instance != server->reconnect_instance) {
805 		mutex_unlock(&server->srv_mutex);
806 		add_credits_and_wake_if(server, &credits, optype);
807 		return -EAGAIN;
808 	}
809 
810 	mid = server->ops->setup_async_request(server, rqst);
811 	if (IS_ERR(mid)) {
812 		mutex_unlock(&server->srv_mutex);
813 		add_credits_and_wake_if(server, &credits, optype);
814 		return PTR_ERR(mid);
815 	}
816 
817 	mid->receive = receive;
818 	mid->callback = callback;
819 	mid->callback_data = cbdata;
820 	mid->handle = handle;
821 	mid->mid_state = MID_REQUEST_SUBMITTED;
822 
823 	/* put it on the pending_mid_q */
824 	spin_lock(&GlobalMid_Lock);
825 	list_add_tail(&mid->qhead, &server->pending_mid_q);
826 	spin_unlock(&GlobalMid_Lock);
827 
828 	/*
829 	 * Need to store the time in mid before calling I/O. For call_async,
830 	 * I/O response may come back and free the mid entry on another thread.
831 	 */
832 	cifs_save_when_sent(mid);
833 	cifs_in_send_inc(server);
834 	rc = smb_send_rqst(server, 1, rqst, flags);
835 	cifs_in_send_dec(server);
836 
837 	if (rc < 0) {
838 		revert_current_mid(server, mid->credits);
839 		server->sequence_number -= 2;
840 		cifs_delete_mid(mid);
841 	}
842 
843 	mutex_unlock(&server->srv_mutex);
844 
845 	if (rc == 0)
846 		return 0;
847 
848 	add_credits_and_wake_if(server, &credits, optype);
849 	return rc;
850 }
851 
852 /*
853  *
854  * Send an SMB Request.  No response info (other than return code)
855  * needs to be parsed.
856  *
857  * flags indicate the type of request buffer and how long to wait
858  * and whether to log NT STATUS code (error) before mapping it to POSIX error
859  *
860  */
861 int
SendReceiveNoRsp(const unsigned int xid,struct cifs_ses * ses,char * in_buf,int flags)862 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
863 		 char *in_buf, int flags)
864 {
865 	int rc;
866 	struct kvec iov[1];
867 	struct kvec rsp_iov;
868 	int resp_buf_type;
869 
870 	iov[0].iov_base = in_buf;
871 	iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
872 	flags |= CIFS_NO_RSP_BUF;
873 	rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
874 	cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
875 
876 	return rc;
877 }
878 
879 static int
cifs_sync_mid_result(struct mid_q_entry * mid,struct TCP_Server_Info * server)880 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
881 {
882 	int rc = 0;
883 
884 	cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
885 		 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
886 
887 	spin_lock(&GlobalMid_Lock);
888 	switch (mid->mid_state) {
889 	case MID_RESPONSE_RECEIVED:
890 		spin_unlock(&GlobalMid_Lock);
891 		return rc;
892 	case MID_RETRY_NEEDED:
893 		rc = -EAGAIN;
894 		break;
895 	case MID_RESPONSE_MALFORMED:
896 		rc = -EIO;
897 		break;
898 	case MID_SHUTDOWN:
899 		rc = -EHOSTDOWN;
900 		break;
901 	default:
902 		if (!(mid->mid_flags & MID_DELETED)) {
903 			list_del_init(&mid->qhead);
904 			mid->mid_flags |= MID_DELETED;
905 		}
906 		cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
907 			 __func__, mid->mid, mid->mid_state);
908 		rc = -EIO;
909 	}
910 	spin_unlock(&GlobalMid_Lock);
911 
912 	DeleteMidQEntry(mid);
913 	return rc;
914 }
915 
916 static inline int
send_cancel(struct TCP_Server_Info * server,struct smb_rqst * rqst,struct mid_q_entry * mid)917 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
918 	    struct mid_q_entry *mid)
919 {
920 	return server->ops->send_cancel ?
921 				server->ops->send_cancel(server, rqst, mid) : 0;
922 }
923 
924 int
cifs_check_receive(struct mid_q_entry * mid,struct TCP_Server_Info * server,bool log_error)925 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
926 		   bool log_error)
927 {
928 	unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
929 
930 	dump_smb(mid->resp_buf, min_t(u32, 92, len));
931 
932 	/* convert the length into a more usable form */
933 	if (server->sign) {
934 		struct kvec iov[2];
935 		int rc = 0;
936 		struct smb_rqst rqst = { .rq_iov = iov,
937 					 .rq_nvec = 2 };
938 
939 		iov[0].iov_base = mid->resp_buf;
940 		iov[0].iov_len = 4;
941 		iov[1].iov_base = (char *)mid->resp_buf + 4;
942 		iov[1].iov_len = len - 4;
943 		/* FIXME: add code to kill session */
944 		rc = cifs_verify_signature(&rqst, server,
945 					   mid->sequence_number);
946 		if (rc)
947 			cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
948 				 rc);
949 	}
950 
951 	/* BB special case reconnect tid and uid here? */
952 	return map_and_check_smb_error(mid, log_error);
953 }
954 
955 struct mid_q_entry *
cifs_setup_request(struct cifs_ses * ses,struct TCP_Server_Info * ignored,struct smb_rqst * rqst)956 cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
957 		   struct smb_rqst *rqst)
958 {
959 	int rc;
960 	struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
961 	struct mid_q_entry *mid;
962 
963 	if (rqst->rq_iov[0].iov_len != 4 ||
964 	    rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
965 		return ERR_PTR(-EIO);
966 
967 	rc = allocate_mid(ses, hdr, &mid);
968 	if (rc)
969 		return ERR_PTR(rc);
970 	rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
971 	if (rc) {
972 		cifs_delete_mid(mid);
973 		return ERR_PTR(rc);
974 	}
975 	return mid;
976 }
977 
978 static void
cifs_compound_callback(struct mid_q_entry * mid)979 cifs_compound_callback(struct mid_q_entry *mid)
980 {
981 	struct TCP_Server_Info *server = mid->server;
982 	struct cifs_credits credits;
983 
984 	credits.value = server->ops->get_credits(mid);
985 	credits.instance = server->reconnect_instance;
986 
987 	add_credits(server, &credits, mid->optype);
988 }
989 
990 static void
cifs_compound_last_callback(struct mid_q_entry * mid)991 cifs_compound_last_callback(struct mid_q_entry *mid)
992 {
993 	cifs_compound_callback(mid);
994 	cifs_wake_up_task(mid);
995 }
996 
997 static void
cifs_cancelled_callback(struct mid_q_entry * mid)998 cifs_cancelled_callback(struct mid_q_entry *mid)
999 {
1000 	cifs_compound_callback(mid);
1001 	DeleteMidQEntry(mid);
1002 }
1003 
1004 /*
1005  * Return a channel (master if none) of @ses that can be used to send
1006  * regular requests.
1007  *
1008  * If we are currently binding a new channel (negprot/sess.setup),
1009  * return the new incomplete channel.
1010  */
cifs_pick_channel(struct cifs_ses * ses)1011 struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
1012 {
1013 	uint index = 0;
1014 
1015 	if (!ses)
1016 		return NULL;
1017 
1018 	if (!ses->binding) {
1019 		/* round robin */
1020 		if (ses->chan_count > 1) {
1021 			index = (uint)atomic_inc_return(&ses->chan_seq);
1022 			index %= ses->chan_count;
1023 		}
1024 		return ses->chans[index].server;
1025 	} else {
1026 		return cifs_ses_server(ses);
1027 	}
1028 }
1029 
1030 int
compound_send_recv(const unsigned int xid,struct cifs_ses * ses,struct TCP_Server_Info * server,const int flags,const int num_rqst,struct smb_rqst * rqst,int * resp_buf_type,struct kvec * resp_iov)1031 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1032 		   struct TCP_Server_Info *server,
1033 		   const int flags, const int num_rqst, struct smb_rqst *rqst,
1034 		   int *resp_buf_type, struct kvec *resp_iov)
1035 {
1036 	int i, j, optype, rc = 0;
1037 	struct mid_q_entry *midQ[MAX_COMPOUND];
1038 	bool cancelled_mid[MAX_COMPOUND] = {false};
1039 	struct cifs_credits credits[MAX_COMPOUND] = {
1040 		{ .value = 0, .instance = 0 }
1041 	};
1042 	unsigned int instance;
1043 	char *buf;
1044 
1045 	optype = flags & CIFS_OP_MASK;
1046 
1047 	for (i = 0; i < num_rqst; i++)
1048 		resp_buf_type[i] = CIFS_NO_BUFFER;  /* no response buf yet */
1049 
1050 	if (!ses || !ses->server || !server) {
1051 		cifs_dbg(VFS, "Null session\n");
1052 		return -EIO;
1053 	}
1054 
1055 	if (server->tcpStatus == CifsExiting)
1056 		return -ENOENT;
1057 
1058 	/*
1059 	 * Wait for all the requests to become available.
1060 	 * This approach still leaves the possibility to be stuck waiting for
1061 	 * credits if the server doesn't grant credits to the outstanding
1062 	 * requests and if the client is completely idle, not generating any
1063 	 * other requests.
1064 	 * This can be handled by the eventual session reconnect.
1065 	 */
1066 	rc = wait_for_compound_request(server, num_rqst, flags,
1067 				       &instance);
1068 	if (rc)
1069 		return rc;
1070 
1071 	for (i = 0; i < num_rqst; i++) {
1072 		credits[i].value = 1;
1073 		credits[i].instance = instance;
1074 	}
1075 
1076 	/*
1077 	 * Make sure that we sign in the same order that we send on this socket
1078 	 * and avoid races inside tcp sendmsg code that could cause corruption
1079 	 * of smb data.
1080 	 */
1081 
1082 	mutex_lock(&server->srv_mutex);
1083 
1084 	/*
1085 	 * All the parts of the compound chain belong obtained credits from the
1086 	 * same session. We can not use credits obtained from the previous
1087 	 * session to send this request. Check if there were reconnects after
1088 	 * we obtained credits and return -EAGAIN in such cases to let callers
1089 	 * handle it.
1090 	 */
1091 	if (instance != server->reconnect_instance) {
1092 		mutex_unlock(&server->srv_mutex);
1093 		for (j = 0; j < num_rqst; j++)
1094 			add_credits(server, &credits[j], optype);
1095 		return -EAGAIN;
1096 	}
1097 
1098 	for (i = 0; i < num_rqst; i++) {
1099 		midQ[i] = server->ops->setup_request(ses, server, &rqst[i]);
1100 		if (IS_ERR(midQ[i])) {
1101 			revert_current_mid(server, i);
1102 			for (j = 0; j < i; j++)
1103 				cifs_delete_mid(midQ[j]);
1104 			mutex_unlock(&server->srv_mutex);
1105 
1106 			/* Update # of requests on wire to server */
1107 			for (j = 0; j < num_rqst; j++)
1108 				add_credits(server, &credits[j], optype);
1109 			return PTR_ERR(midQ[i]);
1110 		}
1111 
1112 		midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1113 		midQ[i]->optype = optype;
1114 		/*
1115 		 * Invoke callback for every part of the compound chain
1116 		 * to calculate credits properly. Wake up this thread only when
1117 		 * the last element is received.
1118 		 */
1119 		if (i < num_rqst - 1)
1120 			midQ[i]->callback = cifs_compound_callback;
1121 		else
1122 			midQ[i]->callback = cifs_compound_last_callback;
1123 	}
1124 	cifs_in_send_inc(server);
1125 	rc = smb_send_rqst(server, num_rqst, rqst, flags);
1126 	cifs_in_send_dec(server);
1127 
1128 	for (i = 0; i < num_rqst; i++)
1129 		cifs_save_when_sent(midQ[i]);
1130 
1131 	if (rc < 0) {
1132 		revert_current_mid(server, num_rqst);
1133 		server->sequence_number -= 2;
1134 	}
1135 
1136 	mutex_unlock(&server->srv_mutex);
1137 
1138 	/*
1139 	 * If sending failed for some reason or it is an oplock break that we
1140 	 * will not receive a response to - return credits back
1141 	 */
1142 	if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1143 		for (i = 0; i < num_rqst; i++)
1144 			add_credits(server, &credits[i], optype);
1145 		goto out;
1146 	}
1147 
1148 	/*
1149 	 * At this point the request is passed to the network stack - we assume
1150 	 * that any credits taken from the server structure on the client have
1151 	 * been spent and we can't return them back. Once we receive responses
1152 	 * we will collect credits granted by the server in the mid callbacks
1153 	 * and add those credits to the server structure.
1154 	 */
1155 
1156 	/*
1157 	 * Compounding is never used during session establish.
1158 	 */
1159 	if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1160 		mutex_lock(&server->srv_mutex);
1161 		smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1162 					   rqst[0].rq_nvec);
1163 		mutex_unlock(&server->srv_mutex);
1164 	}
1165 
1166 	for (i = 0; i < num_rqst; i++) {
1167 		rc = wait_for_response(server, midQ[i]);
1168 		if (rc != 0)
1169 			break;
1170 	}
1171 	if (rc != 0) {
1172 		for (; i < num_rqst; i++) {
1173 			cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
1174 				 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1175 			send_cancel(server, &rqst[i], midQ[i]);
1176 			spin_lock(&GlobalMid_Lock);
1177 			midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1178 			if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1179 				midQ[i]->callback = cifs_cancelled_callback;
1180 				cancelled_mid[i] = true;
1181 				credits[i].value = 0;
1182 			}
1183 			spin_unlock(&GlobalMid_Lock);
1184 		}
1185 	}
1186 
1187 	for (i = 0; i < num_rqst; i++) {
1188 		if (rc < 0)
1189 			goto out;
1190 
1191 		rc = cifs_sync_mid_result(midQ[i], server);
1192 		if (rc != 0) {
1193 			/* mark this mid as cancelled to not free it below */
1194 			cancelled_mid[i] = true;
1195 			goto out;
1196 		}
1197 
1198 		if (!midQ[i]->resp_buf ||
1199 		    midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1200 			rc = -EIO;
1201 			cifs_dbg(FYI, "Bad MID state?\n");
1202 			goto out;
1203 		}
1204 
1205 		buf = (char *)midQ[i]->resp_buf;
1206 		resp_iov[i].iov_base = buf;
1207 		resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1208 			server->vals->header_preamble_size;
1209 
1210 		if (midQ[i]->large_buf)
1211 			resp_buf_type[i] = CIFS_LARGE_BUFFER;
1212 		else
1213 			resp_buf_type[i] = CIFS_SMALL_BUFFER;
1214 
1215 		rc = server->ops->check_receive(midQ[i], server,
1216 						     flags & CIFS_LOG_ERROR);
1217 
1218 		/* mark it so buf will not be freed by cifs_delete_mid */
1219 		if ((flags & CIFS_NO_RSP_BUF) == 0)
1220 			midQ[i]->resp_buf = NULL;
1221 
1222 	}
1223 
1224 	/*
1225 	 * Compounding is never used during session establish.
1226 	 */
1227 	if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1228 		struct kvec iov = {
1229 			.iov_base = resp_iov[0].iov_base,
1230 			.iov_len = resp_iov[0].iov_len
1231 		};
1232 		mutex_lock(&server->srv_mutex);
1233 		smb311_update_preauth_hash(ses, &iov, 1);
1234 		mutex_unlock(&server->srv_mutex);
1235 	}
1236 
1237 out:
1238 	/*
1239 	 * This will dequeue all mids. After this it is important that the
1240 	 * demultiplex_thread will not process any of these mids any futher.
1241 	 * This is prevented above by using a noop callback that will not
1242 	 * wake this thread except for the very last PDU.
1243 	 */
1244 	for (i = 0; i < num_rqst; i++) {
1245 		if (!cancelled_mid[i])
1246 			cifs_delete_mid(midQ[i]);
1247 	}
1248 
1249 	return rc;
1250 }
1251 
1252 int
cifs_send_recv(const unsigned int xid,struct cifs_ses * ses,struct TCP_Server_Info * server,struct smb_rqst * rqst,int * resp_buf_type,const int flags,struct kvec * resp_iov)1253 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1254 	       struct TCP_Server_Info *server,
1255 	       struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1256 	       struct kvec *resp_iov)
1257 {
1258 	return compound_send_recv(xid, ses, server, flags, 1,
1259 				  rqst, resp_buf_type, resp_iov);
1260 }
1261 
1262 int
SendReceive2(const unsigned int xid,struct cifs_ses * ses,struct kvec * iov,int n_vec,int * resp_buf_type,const int flags,struct kvec * resp_iov)1263 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1264 	     struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1265 	     const int flags, struct kvec *resp_iov)
1266 {
1267 	struct smb_rqst rqst;
1268 	struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1269 	int rc;
1270 
1271 	if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1272 		new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1273 					GFP_KERNEL);
1274 		if (!new_iov) {
1275 			/* otherwise cifs_send_recv below sets resp_buf_type */
1276 			*resp_buf_type = CIFS_NO_BUFFER;
1277 			return -ENOMEM;
1278 		}
1279 	} else
1280 		new_iov = s_iov;
1281 
1282 	/* 1st iov is a RFC1001 length followed by the rest of the packet */
1283 	memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1284 
1285 	new_iov[0].iov_base = new_iov[1].iov_base;
1286 	new_iov[0].iov_len = 4;
1287 	new_iov[1].iov_base += 4;
1288 	new_iov[1].iov_len -= 4;
1289 
1290 	memset(&rqst, 0, sizeof(struct smb_rqst));
1291 	rqst.rq_iov = new_iov;
1292 	rqst.rq_nvec = n_vec + 1;
1293 
1294 	rc = cifs_send_recv(xid, ses, ses->server,
1295 			    &rqst, resp_buf_type, flags, resp_iov);
1296 	if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1297 		kfree(new_iov);
1298 	return rc;
1299 }
1300 
1301 int
SendReceive(const unsigned int xid,struct cifs_ses * ses,struct smb_hdr * in_buf,struct smb_hdr * out_buf,int * pbytes_returned,const int flags)1302 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1303 	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1304 	    int *pbytes_returned, const int flags)
1305 {
1306 	int rc = 0;
1307 	struct mid_q_entry *midQ;
1308 	unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1309 	struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1310 	struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1311 	struct cifs_credits credits = { .value = 1, .instance = 0 };
1312 	struct TCP_Server_Info *server;
1313 
1314 	if (ses == NULL) {
1315 		cifs_dbg(VFS, "Null smb session\n");
1316 		return -EIO;
1317 	}
1318 	server = ses->server;
1319 	if (server == NULL) {
1320 		cifs_dbg(VFS, "Null tcp session\n");
1321 		return -EIO;
1322 	}
1323 
1324 	if (server->tcpStatus == CifsExiting)
1325 		return -ENOENT;
1326 
1327 	/* Ensure that we do not send more than 50 overlapping requests
1328 	   to the same server. We may make this configurable later or
1329 	   use ses->maxReq */
1330 
1331 	if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1332 		cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1333 				len);
1334 		return -EIO;
1335 	}
1336 
1337 	rc = wait_for_free_request(server, flags, &credits.instance);
1338 	if (rc)
1339 		return rc;
1340 
1341 	/* make sure that we sign in the same order that we send on this socket
1342 	   and avoid races inside tcp sendmsg code that could cause corruption
1343 	   of smb data */
1344 
1345 	mutex_lock(&server->srv_mutex);
1346 
1347 	rc = allocate_mid(ses, in_buf, &midQ);
1348 	if (rc) {
1349 		mutex_unlock(&server->srv_mutex);
1350 		/* Update # of requests on wire to server */
1351 		add_credits(server, &credits, 0);
1352 		return rc;
1353 	}
1354 
1355 	rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1356 	if (rc) {
1357 		mutex_unlock(&server->srv_mutex);
1358 		goto out;
1359 	}
1360 
1361 	midQ->mid_state = MID_REQUEST_SUBMITTED;
1362 
1363 	cifs_in_send_inc(server);
1364 	rc = smb_send(server, in_buf, len);
1365 	cifs_in_send_dec(server);
1366 	cifs_save_when_sent(midQ);
1367 
1368 	if (rc < 0)
1369 		server->sequence_number -= 2;
1370 
1371 	mutex_unlock(&server->srv_mutex);
1372 
1373 	if (rc < 0)
1374 		goto out;
1375 
1376 	rc = wait_for_response(server, midQ);
1377 	if (rc != 0) {
1378 		send_cancel(server, &rqst, midQ);
1379 		spin_lock(&GlobalMid_Lock);
1380 		if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1381 			/* no longer considered to be "in-flight" */
1382 			midQ->callback = DeleteMidQEntry;
1383 			spin_unlock(&GlobalMid_Lock);
1384 			add_credits(server, &credits, 0);
1385 			return rc;
1386 		}
1387 		spin_unlock(&GlobalMid_Lock);
1388 	}
1389 
1390 	rc = cifs_sync_mid_result(midQ, server);
1391 	if (rc != 0) {
1392 		add_credits(server, &credits, 0);
1393 		return rc;
1394 	}
1395 
1396 	if (!midQ->resp_buf || !out_buf ||
1397 	    midQ->mid_state != MID_RESPONSE_RECEIVED) {
1398 		rc = -EIO;
1399 		cifs_server_dbg(VFS, "Bad MID state?\n");
1400 		goto out;
1401 	}
1402 
1403 	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1404 	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1405 	rc = cifs_check_receive(midQ, server, 0);
1406 out:
1407 	cifs_delete_mid(midQ);
1408 	add_credits(server, &credits, 0);
1409 
1410 	return rc;
1411 }
1412 
1413 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1414    blocking lock to return. */
1415 
1416 static int
send_lock_cancel(const unsigned int xid,struct cifs_tcon * tcon,struct smb_hdr * in_buf,struct smb_hdr * out_buf)1417 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1418 			struct smb_hdr *in_buf,
1419 			struct smb_hdr *out_buf)
1420 {
1421 	int bytes_returned;
1422 	struct cifs_ses *ses = tcon->ses;
1423 	LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1424 
1425 	/* We just modify the current in_buf to change
1426 	   the type of lock from LOCKING_ANDX_SHARED_LOCK
1427 	   or LOCKING_ANDX_EXCLUSIVE_LOCK to
1428 	   LOCKING_ANDX_CANCEL_LOCK. */
1429 
1430 	pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1431 	pSMB->Timeout = 0;
1432 	pSMB->hdr.Mid = get_next_mid(ses->server);
1433 
1434 	return SendReceive(xid, ses, in_buf, out_buf,
1435 			&bytes_returned, 0);
1436 }
1437 
1438 int
SendReceiveBlockingLock(const unsigned int xid,struct cifs_tcon * tcon,struct smb_hdr * in_buf,struct smb_hdr * out_buf,int * pbytes_returned)1439 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1440 	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1441 	    int *pbytes_returned)
1442 {
1443 	int rc = 0;
1444 	int rstart = 0;
1445 	struct mid_q_entry *midQ;
1446 	struct cifs_ses *ses;
1447 	unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1448 	struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1449 	struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1450 	unsigned int instance;
1451 	struct TCP_Server_Info *server;
1452 
1453 	if (tcon == NULL || tcon->ses == NULL) {
1454 		cifs_dbg(VFS, "Null smb session\n");
1455 		return -EIO;
1456 	}
1457 	ses = tcon->ses;
1458 	server = ses->server;
1459 
1460 	if (server == NULL) {
1461 		cifs_dbg(VFS, "Null tcp session\n");
1462 		return -EIO;
1463 	}
1464 
1465 	if (server->tcpStatus == CifsExiting)
1466 		return -ENOENT;
1467 
1468 	/* Ensure that we do not send more than 50 overlapping requests
1469 	   to the same server. We may make this configurable later or
1470 	   use ses->maxReq */
1471 
1472 	if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1473 		cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
1474 			      len);
1475 		return -EIO;
1476 	}
1477 
1478 	rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1479 	if (rc)
1480 		return rc;
1481 
1482 	/* make sure that we sign in the same order that we send on this socket
1483 	   and avoid races inside tcp sendmsg code that could cause corruption
1484 	   of smb data */
1485 
1486 	mutex_lock(&server->srv_mutex);
1487 
1488 	rc = allocate_mid(ses, in_buf, &midQ);
1489 	if (rc) {
1490 		mutex_unlock(&server->srv_mutex);
1491 		return rc;
1492 	}
1493 
1494 	rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1495 	if (rc) {
1496 		cifs_delete_mid(midQ);
1497 		mutex_unlock(&server->srv_mutex);
1498 		return rc;
1499 	}
1500 
1501 	midQ->mid_state = MID_REQUEST_SUBMITTED;
1502 	cifs_in_send_inc(server);
1503 	rc = smb_send(server, in_buf, len);
1504 	cifs_in_send_dec(server);
1505 	cifs_save_when_sent(midQ);
1506 
1507 	if (rc < 0)
1508 		server->sequence_number -= 2;
1509 
1510 	mutex_unlock(&server->srv_mutex);
1511 
1512 	if (rc < 0) {
1513 		cifs_delete_mid(midQ);
1514 		return rc;
1515 	}
1516 
1517 	/* Wait for a reply - allow signals to interrupt. */
1518 	rc = wait_event_interruptible(server->response_q,
1519 		(!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1520 		((server->tcpStatus != CifsGood) &&
1521 		 (server->tcpStatus != CifsNew)));
1522 
1523 	/* Were we interrupted by a signal ? */
1524 	if ((rc == -ERESTARTSYS) &&
1525 		(midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1526 		((server->tcpStatus == CifsGood) ||
1527 		 (server->tcpStatus == CifsNew))) {
1528 
1529 		if (in_buf->Command == SMB_COM_TRANSACTION2) {
1530 			/* POSIX lock. We send a NT_CANCEL SMB to cause the
1531 			   blocking lock to return. */
1532 			rc = send_cancel(server, &rqst, midQ);
1533 			if (rc) {
1534 				cifs_delete_mid(midQ);
1535 				return rc;
1536 			}
1537 		} else {
1538 			/* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1539 			   to cause the blocking lock to return. */
1540 
1541 			rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1542 
1543 			/* If we get -ENOLCK back the lock may have
1544 			   already been removed. Don't exit in this case. */
1545 			if (rc && rc != -ENOLCK) {
1546 				cifs_delete_mid(midQ);
1547 				return rc;
1548 			}
1549 		}
1550 
1551 		rc = wait_for_response(server, midQ);
1552 		if (rc) {
1553 			send_cancel(server, &rqst, midQ);
1554 			spin_lock(&GlobalMid_Lock);
1555 			if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1556 				/* no longer considered to be "in-flight" */
1557 				midQ->callback = DeleteMidQEntry;
1558 				spin_unlock(&GlobalMid_Lock);
1559 				return rc;
1560 			}
1561 			spin_unlock(&GlobalMid_Lock);
1562 		}
1563 
1564 		/* We got the response - restart system call. */
1565 		rstart = 1;
1566 	}
1567 
1568 	rc = cifs_sync_mid_result(midQ, server);
1569 	if (rc != 0)
1570 		return rc;
1571 
1572 	/* rcvd frame is ok */
1573 	if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1574 		rc = -EIO;
1575 		cifs_tcon_dbg(VFS, "Bad MID state?\n");
1576 		goto out;
1577 	}
1578 
1579 	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1580 	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1581 	rc = cifs_check_receive(midQ, server, 0);
1582 out:
1583 	cifs_delete_mid(midQ);
1584 	if (rstart && rc == -EACCES)
1585 		return -ERESTARTSYS;
1586 	return rc;
1587 }
1588