• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *   fs/cifs/transport.c
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *   Jeremy Allison (jra@samba.org) 2006.
7  *
8  *   This library is free software; you can redistribute it and/or modify
9  *   it under the terms of the GNU Lesser General Public License as published
10  *   by the Free Software Foundation; either version 2.1 of the License, or
11  *   (at your option) any later version.
12  *
13  *   This library is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
16  *   the GNU Lesser General Public License for more details.
17  *
18  *   You should have received a copy of the GNU Lesser General Public License
19  *   along with this library; if not, write to the Free Software
20  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21  */
22 
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/highmem.h>
32 #include <asm/uaccess.h>
33 #include <asm/processor.h>
34 #include <linux/mempool.h>
35 #include "cifspdu.h"
36 #include "cifsglob.h"
37 #include "cifsproto.h"
38 #include "cifs_debug.h"
39 
40 void
cifs_wake_up_task(struct mid_q_entry * mid)41 cifs_wake_up_task(struct mid_q_entry *mid)
42 {
43 	wake_up_process(mid->callback_data);
44 }
45 
46 struct mid_q_entry *
AllocMidQEntry(const struct smb_hdr * smb_buffer,struct TCP_Server_Info * server)47 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
48 {
49 	struct mid_q_entry *temp;
50 
51 	if (server == NULL) {
52 		cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
53 		return NULL;
54 	}
55 
56 	temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
57 	if (temp == NULL)
58 		return temp;
59 	else {
60 		memset(temp, 0, sizeof(struct mid_q_entry));
61 		temp->mid = get_mid(smb_buffer);
62 		temp->pid = current->pid;
63 		temp->command = cpu_to_le16(smb_buffer->Command);
64 		cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
65 	/*	do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
66 		/* when mid allocated can be before when sent */
67 		temp->when_alloc = jiffies;
68 		temp->server = server;
69 
70 		/*
71 		 * The default is for the mid to be synchronous, so the
72 		 * default callback just wakes up the current task.
73 		 */
74 		temp->callback = cifs_wake_up_task;
75 		temp->callback_data = current;
76 	}
77 
78 	atomic_inc(&midCount);
79 	temp->mid_state = MID_REQUEST_ALLOCATED;
80 	return temp;
81 }
82 
83 void
DeleteMidQEntry(struct mid_q_entry * midEntry)84 DeleteMidQEntry(struct mid_q_entry *midEntry)
85 {
86 #ifdef CONFIG_CIFS_STATS2
87 	__le16 command = midEntry->server->vals->lock_cmd;
88 	unsigned long now;
89 #endif
90 	midEntry->mid_state = MID_FREE;
91 	atomic_dec(&midCount);
92 	if (midEntry->large_buf)
93 		cifs_buf_release(midEntry->resp_buf);
94 	else
95 		cifs_small_buf_release(midEntry->resp_buf);
96 #ifdef CONFIG_CIFS_STATS2
97 	now = jiffies;
98 	/* commands taking longer than one second are indications that
99 	   something is wrong, unless it is quite a slow link or server */
100 	if ((now - midEntry->when_alloc) > HZ) {
101 		if ((cifsFYI & CIFS_TIMER) && (midEntry->command != command)) {
102 			pr_debug(" CIFS slow rsp: cmd %d mid %llu",
103 			       midEntry->command, midEntry->mid);
104 			pr_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
105 			       now - midEntry->when_alloc,
106 			       now - midEntry->when_sent,
107 			       now - midEntry->when_received);
108 		}
109 	}
110 #endif
111 	mempool_free(midEntry, cifs_mid_poolp);
112 }
113 
114 void
cifs_delete_mid(struct mid_q_entry * mid)115 cifs_delete_mid(struct mid_q_entry *mid)
116 {
117 	spin_lock(&GlobalMid_Lock);
118 	list_del(&mid->qhead);
119 	spin_unlock(&GlobalMid_Lock);
120 
121 	DeleteMidQEntry(mid);
122 }
123 
124 /*
125  * smb_send_kvec - send an array of kvecs to the server
126  * @server:	Server to send the data to
127  * @iov:	Pointer to array of kvecs
128  * @n_vec:	length of kvec array
129  * @sent:	amount of data sent on socket is stored here
130  *
131  * Our basic "send data to server" function. Should be called with srv_mutex
132  * held. The caller is responsible for handling the results.
133  */
134 static int
smb_send_kvec(struct TCP_Server_Info * server,struct kvec * iov,size_t n_vec,size_t * sent)135 smb_send_kvec(struct TCP_Server_Info *server, struct kvec *iov, size_t n_vec,
136 		size_t *sent)
137 {
138 	int rc = 0;
139 	int i = 0;
140 	struct msghdr smb_msg;
141 	unsigned int remaining;
142 	size_t first_vec = 0;
143 	struct socket *ssocket = server->ssocket;
144 
145 	*sent = 0;
146 
147 	smb_msg.msg_name = (struct sockaddr *) &server->dstaddr;
148 	smb_msg.msg_namelen = sizeof(struct sockaddr);
149 	smb_msg.msg_control = NULL;
150 	smb_msg.msg_controllen = 0;
151 	if (server->noblocksnd)
152 		smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
153 	else
154 		smb_msg.msg_flags = MSG_NOSIGNAL;
155 
156 	remaining = 0;
157 	for (i = 0; i < n_vec; i++)
158 		remaining += iov[i].iov_len;
159 
160 	i = 0;
161 	while (remaining) {
162 		/*
163 		 * If blocking send, we try 3 times, since each can block
164 		 * for 5 seconds. For nonblocking  we have to try more
165 		 * but wait increasing amounts of time allowing time for
166 		 * socket to clear.  The overall time we wait in either
167 		 * case to send on the socket is about 15 seconds.
168 		 * Similarly we wait for 15 seconds for a response from
169 		 * the server in SendReceive[2] for the server to send
170 		 * a response back for most types of requests (except
171 		 * SMB Write past end of file which can be slow, and
172 		 * blocking lock operations). NFS waits slightly longer
173 		 * than CIFS, but this can make it take longer for
174 		 * nonresponsive servers to be detected and 15 seconds
175 		 * is more than enough time for modern networks to
176 		 * send a packet.  In most cases if we fail to send
177 		 * after the retries we will kill the socket and
178 		 * reconnect which may clear the network problem.
179 		 */
180 		rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec],
181 				    n_vec - first_vec, remaining);
182 		if (rc == -EAGAIN) {
183 			i++;
184 			if (i >= 14 || (!server->noblocksnd && (i > 2))) {
185 				cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
186 					 ssocket);
187 				rc = -EAGAIN;
188 				break;
189 			}
190 			msleep(1 << i);
191 			continue;
192 		}
193 
194 		if (rc < 0)
195 			break;
196 
197 		/* send was at least partially successful */
198 		*sent += rc;
199 
200 		if (rc == remaining) {
201 			remaining = 0;
202 			break;
203 		}
204 
205 		if (rc > remaining) {
206 			cifs_dbg(VFS, "sent %d requested %d\n", rc, remaining);
207 			break;
208 		}
209 
210 		if (rc == 0) {
211 			/* should never happen, letting socket clear before
212 			   retrying is our only obvious option here */
213 			cifs_dbg(VFS, "tcp sent no data\n");
214 			msleep(500);
215 			continue;
216 		}
217 
218 		remaining -= rc;
219 
220 		/* the line below resets i */
221 		for (i = first_vec; i < n_vec; i++) {
222 			if (iov[i].iov_len) {
223 				if (rc > iov[i].iov_len) {
224 					rc -= iov[i].iov_len;
225 					iov[i].iov_len = 0;
226 				} else {
227 					iov[i].iov_base += rc;
228 					iov[i].iov_len -= rc;
229 					first_vec = i;
230 					break;
231 				}
232 			}
233 		}
234 
235 		i = 0; /* in case we get ENOSPC on the next send */
236 		rc = 0;
237 	}
238 	return rc;
239 }
240 
241 /**
242  * rqst_page_to_kvec - Turn a slot in the smb_rqst page array into a kvec
243  * @rqst: pointer to smb_rqst
244  * @idx: index into the array of the page
245  * @iov: pointer to struct kvec that will hold the result
246  *
247  * Helper function to convert a slot in the rqst->rq_pages array into a kvec.
248  * The page will be kmapped and the address placed into iov_base. The length
249  * will then be adjusted according to the ptailoff.
250  */
251 void
cifs_rqst_page_to_kvec(struct smb_rqst * rqst,unsigned int idx,struct kvec * iov)252 cifs_rqst_page_to_kvec(struct smb_rqst *rqst, unsigned int idx,
253 			struct kvec *iov)
254 {
255 	/*
256 	 * FIXME: We could avoid this kmap altogether if we used
257 	 * kernel_sendpage instead of kernel_sendmsg. That will only
258 	 * work if signing is disabled though as sendpage inlines the
259 	 * page directly into the fraglist. If userspace modifies the
260 	 * page after we calculate the signature, then the server will
261 	 * reject it and may break the connection. kernel_sendmsg does
262 	 * an extra copy of the data and avoids that issue.
263 	 */
264 	iov->iov_base = kmap(rqst->rq_pages[idx]);
265 
266 	/* if last page, don't send beyond this offset into page */
267 	if (idx == (rqst->rq_npages - 1))
268 		iov->iov_len = rqst->rq_tailsz;
269 	else
270 		iov->iov_len = rqst->rq_pagesz;
271 }
272 
273 static unsigned long
rqst_len(struct smb_rqst * rqst)274 rqst_len(struct smb_rqst *rqst)
275 {
276 	unsigned int i;
277 	struct kvec *iov = rqst->rq_iov;
278 	unsigned long buflen = 0;
279 
280 	/* total up iov array first */
281 	for (i = 0; i < rqst->rq_nvec; i++)
282 		buflen += iov[i].iov_len;
283 
284 	/* add in the page array if there is one */
285 	if (rqst->rq_npages) {
286 		buflen += rqst->rq_pagesz * (rqst->rq_npages - 1);
287 		buflen += rqst->rq_tailsz;
288 	}
289 
290 	return buflen;
291 }
292 
293 static int
smb_send_rqst(struct TCP_Server_Info * server,struct smb_rqst * rqst)294 smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
295 {
296 	int rc;
297 	struct kvec *iov = rqst->rq_iov;
298 	int n_vec = rqst->rq_nvec;
299 	unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
300 	unsigned long send_length;
301 	unsigned int i;
302 	size_t total_len = 0, sent;
303 	struct socket *ssocket = server->ssocket;
304 	int val = 1;
305 
306 	if (ssocket == NULL)
307 		return -ENOTSOCK;
308 
309 	/* sanity check send length */
310 	send_length = rqst_len(rqst);
311 	if (send_length != smb_buf_length + 4) {
312 		WARN(1, "Send length mismatch(send_length=%lu smb_buf_length=%u)\n",
313 			send_length, smb_buf_length);
314 		return -EIO;
315 	}
316 
317 	cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length);
318 	dump_smb(iov[0].iov_base, iov[0].iov_len);
319 
320 	/* cork the socket */
321 	kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
322 				(char *)&val, sizeof(val));
323 
324 	rc = smb_send_kvec(server, iov, n_vec, &sent);
325 	if (rc < 0)
326 		goto uncork;
327 
328 	total_len += sent;
329 
330 	/* now walk the page array and send each page in it */
331 	for (i = 0; i < rqst->rq_npages; i++) {
332 		struct kvec p_iov;
333 
334 		cifs_rqst_page_to_kvec(rqst, i, &p_iov);
335 		rc = smb_send_kvec(server, &p_iov, 1, &sent);
336 		kunmap(rqst->rq_pages[i]);
337 		if (rc < 0)
338 			break;
339 
340 		total_len += sent;
341 	}
342 
343 uncork:
344 	/* uncork it */
345 	val = 0;
346 	kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
347 				(char *)&val, sizeof(val));
348 
349 	if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
350 		cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
351 			 smb_buf_length + 4, total_len);
352 		/*
353 		 * If we have only sent part of an SMB then the next SMB could
354 		 * be taken as the remainder of this one. We need to kill the
355 		 * socket so the server throws away the partial SMB
356 		 */
357 		server->tcpStatus = CifsNeedReconnect;
358 	}
359 
360 	if (rc < 0 && rc != -EINTR)
361 		cifs_dbg(VFS, "Error %d sending data on socket to server\n",
362 			 rc);
363 	else if (rc > 0)
364 		rc = 0;
365 
366 	return rc;
367 }
368 
369 static int
smb_sendv(struct TCP_Server_Info * server,struct kvec * iov,int n_vec)370 smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
371 {
372 	struct smb_rqst rqst = { .rq_iov = iov,
373 				 .rq_nvec = n_vec };
374 
375 	return smb_send_rqst(server, &rqst);
376 }
377 
378 int
smb_send(struct TCP_Server_Info * server,struct smb_hdr * smb_buffer,unsigned int smb_buf_length)379 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
380 	 unsigned int smb_buf_length)
381 {
382 	struct kvec iov;
383 
384 	iov.iov_base = smb_buffer;
385 	iov.iov_len = smb_buf_length + 4;
386 
387 	return smb_sendv(server, &iov, 1);
388 }
389 
390 static int
wait_for_free_credits(struct TCP_Server_Info * server,const int timeout,int * credits)391 wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
392 		      int *credits)
393 {
394 	int rc;
395 
396 	spin_lock(&server->req_lock);
397 	if (timeout == CIFS_ASYNC_OP) {
398 		/* oplock breaks must not be held up */
399 		server->in_flight++;
400 		*credits -= 1;
401 		spin_unlock(&server->req_lock);
402 		return 0;
403 	}
404 
405 	while (1) {
406 		if (*credits <= 0) {
407 			spin_unlock(&server->req_lock);
408 			cifs_num_waiters_inc(server);
409 			rc = wait_event_killable(server->request_q,
410 						 has_credits(server, credits));
411 			cifs_num_waiters_dec(server);
412 			if (rc)
413 				return rc;
414 			spin_lock(&server->req_lock);
415 		} else {
416 			if (server->tcpStatus == CifsExiting) {
417 				spin_unlock(&server->req_lock);
418 				return -ENOENT;
419 			}
420 
421 			/*
422 			 * Can not count locking commands against total
423 			 * as they are allowed to block on server.
424 			 */
425 
426 			/* update # of requests on the wire to server */
427 			if (timeout != CIFS_BLOCKING_OP) {
428 				*credits -= 1;
429 				server->in_flight++;
430 			}
431 			spin_unlock(&server->req_lock);
432 			break;
433 		}
434 	}
435 	return 0;
436 }
437 
438 static int
wait_for_free_request(struct TCP_Server_Info * server,const int timeout,const int optype)439 wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
440 		      const int optype)
441 {
442 	int *val;
443 
444 	val = server->ops->get_credits_field(server, optype);
445 	/* Since an echo is already inflight, no need to wait to send another */
446 	if (*val <= 0 && optype == CIFS_ECHO_OP)
447 		return -EAGAIN;
448 	return wait_for_free_credits(server, timeout, val);
449 }
450 
451 int
cifs_wait_mtu_credits(struct TCP_Server_Info * server,unsigned int size,unsigned int * num,unsigned int * credits)452 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
453 		      unsigned int *num, unsigned int *credits)
454 {
455 	*num = size;
456 	*credits = 0;
457 	return 0;
458 }
459 
allocate_mid(struct cifs_ses * ses,struct smb_hdr * in_buf,struct mid_q_entry ** ppmidQ)460 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
461 			struct mid_q_entry **ppmidQ)
462 {
463 	if (ses->server->tcpStatus == CifsExiting) {
464 		return -ENOENT;
465 	}
466 
467 	if (ses->server->tcpStatus == CifsNeedReconnect) {
468 		cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
469 		return -EAGAIN;
470 	}
471 
472 	if (ses->status == CifsNew) {
473 		if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
474 			(in_buf->Command != SMB_COM_NEGOTIATE))
475 			return -EAGAIN;
476 		/* else ok - we are setting up session */
477 	}
478 
479 	if (ses->status == CifsExiting) {
480 		/* check if SMB session is bad because we are setting it up */
481 		if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
482 			return -EAGAIN;
483 		/* else ok - we are shutting down session */
484 	}
485 
486 	*ppmidQ = AllocMidQEntry(in_buf, ses->server);
487 	if (*ppmidQ == NULL)
488 		return -ENOMEM;
489 	spin_lock(&GlobalMid_Lock);
490 	list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
491 	spin_unlock(&GlobalMid_Lock);
492 	return 0;
493 }
494 
495 static int
wait_for_response(struct TCP_Server_Info * server,struct mid_q_entry * midQ)496 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
497 {
498 	int error;
499 
500 	error = wait_event_freezekillable_unsafe(server->response_q,
501 				    midQ->mid_state != MID_REQUEST_SUBMITTED);
502 	if (error < 0)
503 		return -ERESTARTSYS;
504 
505 	return 0;
506 }
507 
508 struct mid_q_entry *
cifs_setup_async_request(struct TCP_Server_Info * server,struct smb_rqst * rqst)509 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
510 {
511 	int rc;
512 	struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
513 	struct mid_q_entry *mid;
514 
515 	/* enable signing if server requires it */
516 	if (server->sign)
517 		hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
518 
519 	mid = AllocMidQEntry(hdr, server);
520 	if (mid == NULL)
521 		return ERR_PTR(-ENOMEM);
522 
523 	rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
524 	if (rc) {
525 		DeleteMidQEntry(mid);
526 		return ERR_PTR(rc);
527 	}
528 
529 	return mid;
530 }
531 
532 /*
533  * Send a SMB request and set the callback function in the mid to handle
534  * the result. Caller is responsible for dealing with timeouts.
535  */
536 int
cifs_call_async(struct TCP_Server_Info * server,struct smb_rqst * rqst,mid_receive_t * receive,mid_callback_t * callback,void * cbdata,const int flags)537 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
538 		mid_receive_t *receive, mid_callback_t *callback,
539 		void *cbdata, const int flags)
540 {
541 	int rc, timeout, optype;
542 	struct mid_q_entry *mid;
543 	unsigned int credits = 0;
544 
545 	timeout = flags & CIFS_TIMEOUT_MASK;
546 	optype = flags & CIFS_OP_MASK;
547 
548 	if ((flags & CIFS_HAS_CREDITS) == 0) {
549 		rc = wait_for_free_request(server, timeout, optype);
550 		if (rc)
551 			return rc;
552 		credits = 1;
553 	}
554 
555 	mutex_lock(&server->srv_mutex);
556 	mid = server->ops->setup_async_request(server, rqst);
557 	if (IS_ERR(mid)) {
558 		mutex_unlock(&server->srv_mutex);
559 		add_credits_and_wake_if(server, credits, optype);
560 		return PTR_ERR(mid);
561 	}
562 
563 	mid->receive = receive;
564 	mid->callback = callback;
565 	mid->callback_data = cbdata;
566 	mid->mid_state = MID_REQUEST_SUBMITTED;
567 
568 	/* put it on the pending_mid_q */
569 	spin_lock(&GlobalMid_Lock);
570 	list_add_tail(&mid->qhead, &server->pending_mid_q);
571 	spin_unlock(&GlobalMid_Lock);
572 
573 
574 	cifs_in_send_inc(server);
575 	rc = smb_send_rqst(server, rqst);
576 	cifs_in_send_dec(server);
577 	cifs_save_when_sent(mid);
578 
579 	if (rc < 0) {
580 		server->sequence_number -= 2;
581 		cifs_delete_mid(mid);
582 	}
583 
584 	mutex_unlock(&server->srv_mutex);
585 
586 	if (rc == 0)
587 		return 0;
588 
589 	add_credits_and_wake_if(server, credits, optype);
590 	return rc;
591 }
592 
593 /*
594  *
595  * Send an SMB Request.  No response info (other than return code)
596  * needs to be parsed.
597  *
598  * flags indicate the type of request buffer and how long to wait
599  * and whether to log NT STATUS code (error) before mapping it to POSIX error
600  *
601  */
602 int
SendReceiveNoRsp(const unsigned int xid,struct cifs_ses * ses,char * in_buf,int flags)603 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
604 		 char *in_buf, int flags)
605 {
606 	int rc;
607 	struct kvec iov[1];
608 	int resp_buf_type;
609 
610 	iov[0].iov_base = in_buf;
611 	iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
612 	flags |= CIFS_NO_RESP;
613 	rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags);
614 	cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
615 
616 	return rc;
617 }
618 
619 static int
cifs_sync_mid_result(struct mid_q_entry * mid,struct TCP_Server_Info * server)620 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
621 {
622 	int rc = 0;
623 
624 	cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
625 		 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
626 
627 	spin_lock(&GlobalMid_Lock);
628 	switch (mid->mid_state) {
629 	case MID_RESPONSE_RECEIVED:
630 		spin_unlock(&GlobalMid_Lock);
631 		return rc;
632 	case MID_RETRY_NEEDED:
633 		rc = -EAGAIN;
634 		break;
635 	case MID_RESPONSE_MALFORMED:
636 		rc = -EIO;
637 		break;
638 	case MID_SHUTDOWN:
639 		rc = -EHOSTDOWN;
640 		break;
641 	default:
642 		list_del_init(&mid->qhead);
643 		cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
644 			 __func__, mid->mid, mid->mid_state);
645 		rc = -EIO;
646 	}
647 	spin_unlock(&GlobalMid_Lock);
648 
649 	mutex_lock(&server->srv_mutex);
650 	DeleteMidQEntry(mid);
651 	mutex_unlock(&server->srv_mutex);
652 	return rc;
653 }
654 
655 static inline int
send_cancel(struct TCP_Server_Info * server,void * buf,struct mid_q_entry * mid)656 send_cancel(struct TCP_Server_Info *server, void *buf, struct mid_q_entry *mid)
657 {
658 	return server->ops->send_cancel ?
659 				server->ops->send_cancel(server, buf, mid) : 0;
660 }
661 
662 int
cifs_check_receive(struct mid_q_entry * mid,struct TCP_Server_Info * server,bool log_error)663 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
664 		   bool log_error)
665 {
666 	unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
667 
668 	dump_smb(mid->resp_buf, min_t(u32, 92, len));
669 
670 	/* convert the length into a more usable form */
671 	if (server->sign) {
672 		struct kvec iov;
673 		int rc = 0;
674 		struct smb_rqst rqst = { .rq_iov = &iov,
675 					 .rq_nvec = 1 };
676 
677 		iov.iov_base = mid->resp_buf;
678 		iov.iov_len = len;
679 		/* FIXME: add code to kill session */
680 		rc = cifs_verify_signature(&rqst, server,
681 					   mid->sequence_number);
682 		if (rc)
683 			cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
684 				 rc);
685 	}
686 
687 	/* BB special case reconnect tid and uid here? */
688 	return map_smb_to_linux_error(mid->resp_buf, log_error);
689 }
690 
691 struct mid_q_entry *
cifs_setup_request(struct cifs_ses * ses,struct smb_rqst * rqst)692 cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
693 {
694 	int rc;
695 	struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
696 	struct mid_q_entry *mid;
697 
698 	rc = allocate_mid(ses, hdr, &mid);
699 	if (rc)
700 		return ERR_PTR(rc);
701 	rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
702 	if (rc) {
703 		cifs_delete_mid(mid);
704 		return ERR_PTR(rc);
705 	}
706 	return mid;
707 }
708 
709 int
SendReceive2(const unsigned int xid,struct cifs_ses * ses,struct kvec * iov,int n_vec,int * resp_buf_type,const int flags)710 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
711 	     struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
712 	     const int flags)
713 {
714 	int rc = 0;
715 	int timeout, optype;
716 	struct mid_q_entry *midQ;
717 	char *buf = iov[0].iov_base;
718 	unsigned int credits = 1;
719 	struct smb_rqst rqst = { .rq_iov = iov,
720 				 .rq_nvec = n_vec };
721 
722 	timeout = flags & CIFS_TIMEOUT_MASK;
723 	optype = flags & CIFS_OP_MASK;
724 
725 	*resp_buf_type = CIFS_NO_BUFFER;  /* no response buf yet */
726 
727 	if ((ses == NULL) || (ses->server == NULL)) {
728 		cifs_small_buf_release(buf);
729 		cifs_dbg(VFS, "Null session\n");
730 		return -EIO;
731 	}
732 
733 	if (ses->server->tcpStatus == CifsExiting) {
734 		cifs_small_buf_release(buf);
735 		return -ENOENT;
736 	}
737 
738 	/*
739 	 * Ensure that we do not send more than 50 overlapping requests
740 	 * to the same server. We may make this configurable later or
741 	 * use ses->maxReq.
742 	 */
743 
744 	rc = wait_for_free_request(ses->server, timeout, optype);
745 	if (rc) {
746 		cifs_small_buf_release(buf);
747 		return rc;
748 	}
749 
750 	/*
751 	 * Make sure that we sign in the same order that we send on this socket
752 	 * and avoid races inside tcp sendmsg code that could cause corruption
753 	 * of smb data.
754 	 */
755 
756 	mutex_lock(&ses->server->srv_mutex);
757 
758 	midQ = ses->server->ops->setup_request(ses, &rqst);
759 	if (IS_ERR(midQ)) {
760 		mutex_unlock(&ses->server->srv_mutex);
761 		cifs_small_buf_release(buf);
762 		/* Update # of requests on wire to server */
763 		add_credits(ses->server, 1, optype);
764 		return PTR_ERR(midQ);
765 	}
766 
767 	midQ->mid_state = MID_REQUEST_SUBMITTED;
768 	cifs_in_send_inc(ses->server);
769 	rc = smb_sendv(ses->server, iov, n_vec);
770 	cifs_in_send_dec(ses->server);
771 	cifs_save_when_sent(midQ);
772 
773 	if (rc < 0)
774 		ses->server->sequence_number -= 2;
775 	mutex_unlock(&ses->server->srv_mutex);
776 
777 	if (rc < 0) {
778 		cifs_small_buf_release(buf);
779 		goto out;
780 	}
781 
782 	if (timeout == CIFS_ASYNC_OP) {
783 		cifs_small_buf_release(buf);
784 		goto out;
785 	}
786 
787 	rc = wait_for_response(ses->server, midQ);
788 	if (rc != 0) {
789 		cifs_dbg(FYI, "Cancelling wait for mid %llu\n",	midQ->mid);
790 		send_cancel(ses->server, buf, midQ);
791 		spin_lock(&GlobalMid_Lock);
792 		if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
793 			midQ->mid_flags |= MID_WAIT_CANCELLED;
794 			midQ->callback = DeleteMidQEntry;
795 			spin_unlock(&GlobalMid_Lock);
796 			cifs_small_buf_release(buf);
797 			add_credits(ses->server, 1, optype);
798 			return rc;
799 		}
800 		spin_unlock(&GlobalMid_Lock);
801 	}
802 
803 	cifs_small_buf_release(buf);
804 
805 	rc = cifs_sync_mid_result(midQ, ses->server);
806 	if (rc != 0) {
807 		add_credits(ses->server, 1, optype);
808 		return rc;
809 	}
810 
811 	if (!midQ->resp_buf || midQ->mid_state != MID_RESPONSE_RECEIVED) {
812 		rc = -EIO;
813 		cifs_dbg(FYI, "Bad MID state?\n");
814 		goto out;
815 	}
816 
817 	buf = (char *)midQ->resp_buf;
818 	iov[0].iov_base = buf;
819 	iov[0].iov_len = get_rfc1002_length(buf) + 4;
820 	if (midQ->large_buf)
821 		*resp_buf_type = CIFS_LARGE_BUFFER;
822 	else
823 		*resp_buf_type = CIFS_SMALL_BUFFER;
824 
825 	credits = ses->server->ops->get_credits(midQ);
826 
827 	rc = ses->server->ops->check_receive(midQ, ses->server,
828 					     flags & CIFS_LOG_ERROR);
829 
830 	/* mark it so buf will not be freed by cifs_delete_mid */
831 	if ((flags & CIFS_NO_RESP) == 0)
832 		midQ->resp_buf = NULL;
833 out:
834 	cifs_delete_mid(midQ);
835 	add_credits(ses->server, credits, optype);
836 
837 	return rc;
838 }
839 
840 int
SendReceive(const unsigned int xid,struct cifs_ses * ses,struct smb_hdr * in_buf,struct smb_hdr * out_buf,int * pbytes_returned,const int timeout)841 SendReceive(const unsigned int xid, struct cifs_ses *ses,
842 	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
843 	    int *pbytes_returned, const int timeout)
844 {
845 	int rc = 0;
846 	struct mid_q_entry *midQ;
847 
848 	if (ses == NULL) {
849 		cifs_dbg(VFS, "Null smb session\n");
850 		return -EIO;
851 	}
852 	if (ses->server == NULL) {
853 		cifs_dbg(VFS, "Null tcp session\n");
854 		return -EIO;
855 	}
856 
857 	if (ses->server->tcpStatus == CifsExiting)
858 		return -ENOENT;
859 
860 	/* Ensure that we do not send more than 50 overlapping requests
861 	   to the same server. We may make this configurable later or
862 	   use ses->maxReq */
863 
864 	if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
865 			MAX_CIFS_HDR_SIZE - 4) {
866 		cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
867 			 be32_to_cpu(in_buf->smb_buf_length));
868 		return -EIO;
869 	}
870 
871 	rc = wait_for_free_request(ses->server, timeout, 0);
872 	if (rc)
873 		return rc;
874 
875 	/* make sure that we sign in the same order that we send on this socket
876 	   and avoid races inside tcp sendmsg code that could cause corruption
877 	   of smb data */
878 
879 	mutex_lock(&ses->server->srv_mutex);
880 
881 	rc = allocate_mid(ses, in_buf, &midQ);
882 	if (rc) {
883 		mutex_unlock(&ses->server->srv_mutex);
884 		/* Update # of requests on wire to server */
885 		add_credits(ses->server, 1, 0);
886 		return rc;
887 	}
888 
889 	rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
890 	if (rc) {
891 		mutex_unlock(&ses->server->srv_mutex);
892 		goto out;
893 	}
894 
895 	midQ->mid_state = MID_REQUEST_SUBMITTED;
896 
897 	cifs_in_send_inc(ses->server);
898 	rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
899 	cifs_in_send_dec(ses->server);
900 	cifs_save_when_sent(midQ);
901 
902 	if (rc < 0)
903 		ses->server->sequence_number -= 2;
904 
905 	mutex_unlock(&ses->server->srv_mutex);
906 
907 	if (rc < 0)
908 		goto out;
909 
910 	if (timeout == CIFS_ASYNC_OP)
911 		goto out;
912 
913 	rc = wait_for_response(ses->server, midQ);
914 	if (rc != 0) {
915 		send_cancel(ses->server, in_buf, midQ);
916 		spin_lock(&GlobalMid_Lock);
917 		if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
918 			/* no longer considered to be "in-flight" */
919 			midQ->callback = DeleteMidQEntry;
920 			spin_unlock(&GlobalMid_Lock);
921 			add_credits(ses->server, 1, 0);
922 			return rc;
923 		}
924 		spin_unlock(&GlobalMid_Lock);
925 	}
926 
927 	rc = cifs_sync_mid_result(midQ, ses->server);
928 	if (rc != 0) {
929 		add_credits(ses->server, 1, 0);
930 		return rc;
931 	}
932 
933 	if (!midQ->resp_buf || !out_buf ||
934 	    midQ->mid_state != MID_RESPONSE_RECEIVED) {
935 		rc = -EIO;
936 		cifs_dbg(VFS, "Bad MID state?\n");
937 		goto out;
938 	}
939 
940 	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
941 	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
942 	rc = cifs_check_receive(midQ, ses->server, 0);
943 out:
944 	cifs_delete_mid(midQ);
945 	add_credits(ses->server, 1, 0);
946 
947 	return rc;
948 }
949 
950 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
951    blocking lock to return. */
952 
953 static int
send_lock_cancel(const unsigned int xid,struct cifs_tcon * tcon,struct smb_hdr * in_buf,struct smb_hdr * out_buf)954 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
955 			struct smb_hdr *in_buf,
956 			struct smb_hdr *out_buf)
957 {
958 	int bytes_returned;
959 	struct cifs_ses *ses = tcon->ses;
960 	LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
961 
962 	/* We just modify the current in_buf to change
963 	   the type of lock from LOCKING_ANDX_SHARED_LOCK
964 	   or LOCKING_ANDX_EXCLUSIVE_LOCK to
965 	   LOCKING_ANDX_CANCEL_LOCK. */
966 
967 	pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
968 	pSMB->Timeout = 0;
969 	pSMB->hdr.Mid = get_next_mid(ses->server);
970 
971 	return SendReceive(xid, ses, in_buf, out_buf,
972 			&bytes_returned, 0);
973 }
974 
975 int
SendReceiveBlockingLock(const unsigned int xid,struct cifs_tcon * tcon,struct smb_hdr * in_buf,struct smb_hdr * out_buf,int * pbytes_returned)976 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
977 	    struct smb_hdr *in_buf, struct smb_hdr *out_buf,
978 	    int *pbytes_returned)
979 {
980 	int rc = 0;
981 	int rstart = 0;
982 	struct mid_q_entry *midQ;
983 	struct cifs_ses *ses;
984 
985 	if (tcon == NULL || tcon->ses == NULL) {
986 		cifs_dbg(VFS, "Null smb session\n");
987 		return -EIO;
988 	}
989 	ses = tcon->ses;
990 
991 	if (ses->server == NULL) {
992 		cifs_dbg(VFS, "Null tcp session\n");
993 		return -EIO;
994 	}
995 
996 	if (ses->server->tcpStatus == CifsExiting)
997 		return -ENOENT;
998 
999 	/* Ensure that we do not send more than 50 overlapping requests
1000 	   to the same server. We may make this configurable later or
1001 	   use ses->maxReq */
1002 
1003 	if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
1004 			MAX_CIFS_HDR_SIZE - 4) {
1005 		cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1006 			 be32_to_cpu(in_buf->smb_buf_length));
1007 		return -EIO;
1008 	}
1009 
1010 	rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0);
1011 	if (rc)
1012 		return rc;
1013 
1014 	/* make sure that we sign in the same order that we send on this socket
1015 	   and avoid races inside tcp sendmsg code that could cause corruption
1016 	   of smb data */
1017 
1018 	mutex_lock(&ses->server->srv_mutex);
1019 
1020 	rc = allocate_mid(ses, in_buf, &midQ);
1021 	if (rc) {
1022 		mutex_unlock(&ses->server->srv_mutex);
1023 		return rc;
1024 	}
1025 
1026 	rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1027 	if (rc) {
1028 		cifs_delete_mid(midQ);
1029 		mutex_unlock(&ses->server->srv_mutex);
1030 		return rc;
1031 	}
1032 
1033 	midQ->mid_state = MID_REQUEST_SUBMITTED;
1034 	cifs_in_send_inc(ses->server);
1035 	rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
1036 	cifs_in_send_dec(ses->server);
1037 	cifs_save_when_sent(midQ);
1038 
1039 	if (rc < 0)
1040 		ses->server->sequence_number -= 2;
1041 
1042 	mutex_unlock(&ses->server->srv_mutex);
1043 
1044 	if (rc < 0) {
1045 		cifs_delete_mid(midQ);
1046 		return rc;
1047 	}
1048 
1049 	/* Wait for a reply - allow signals to interrupt. */
1050 	rc = wait_event_interruptible(ses->server->response_q,
1051 		(!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1052 		((ses->server->tcpStatus != CifsGood) &&
1053 		 (ses->server->tcpStatus != CifsNew)));
1054 
1055 	/* Were we interrupted by a signal ? */
1056 	if ((rc == -ERESTARTSYS) &&
1057 		(midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1058 		((ses->server->tcpStatus == CifsGood) ||
1059 		 (ses->server->tcpStatus == CifsNew))) {
1060 
1061 		if (in_buf->Command == SMB_COM_TRANSACTION2) {
1062 			/* POSIX lock. We send a NT_CANCEL SMB to cause the
1063 			   blocking lock to return. */
1064 			rc = send_cancel(ses->server, in_buf, midQ);
1065 			if (rc) {
1066 				cifs_delete_mid(midQ);
1067 				return rc;
1068 			}
1069 		} else {
1070 			/* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1071 			   to cause the blocking lock to return. */
1072 
1073 			rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1074 
1075 			/* If we get -ENOLCK back the lock may have
1076 			   already been removed. Don't exit in this case. */
1077 			if (rc && rc != -ENOLCK) {
1078 				cifs_delete_mid(midQ);
1079 				return rc;
1080 			}
1081 		}
1082 
1083 		rc = wait_for_response(ses->server, midQ);
1084 		if (rc) {
1085 			send_cancel(ses->server, in_buf, midQ);
1086 			spin_lock(&GlobalMid_Lock);
1087 			if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1088 				/* no longer considered to be "in-flight" */
1089 				midQ->callback = DeleteMidQEntry;
1090 				spin_unlock(&GlobalMid_Lock);
1091 				return rc;
1092 			}
1093 			spin_unlock(&GlobalMid_Lock);
1094 		}
1095 
1096 		/* We got the response - restart system call. */
1097 		rstart = 1;
1098 	}
1099 
1100 	rc = cifs_sync_mid_result(midQ, ses->server);
1101 	if (rc != 0)
1102 		return rc;
1103 
1104 	/* rcvd frame is ok */
1105 	if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1106 		rc = -EIO;
1107 		cifs_dbg(VFS, "Bad MID state?\n");
1108 		goto out;
1109 	}
1110 
1111 	*pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1112 	memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1113 	rc = cifs_check_receive(midQ, ses->server, 0);
1114 out:
1115 	cifs_delete_mid(midQ);
1116 	if (rstart && rc == -EACCES)
1117 		return -ERESTARTSYS;
1118 	return rc;
1119 }
1120