1 /*
2 * fs/cifs/transport.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
7 *
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/bvec.h>
32 #include <linux/highmem.h>
33 #include <linux/uaccess.h>
34 #include <asm/processor.h>
35 #include <linux/mempool.h>
36 #include "cifspdu.h"
37 #include "cifsglob.h"
38 #include "cifsproto.h"
39 #include "cifs_debug.h"
40
41 void
cifs_wake_up_task(struct mid_q_entry * mid)42 cifs_wake_up_task(struct mid_q_entry *mid)
43 {
44 wake_up_process(mid->callback_data);
45 }
46
47 struct mid_q_entry *
AllocMidQEntry(const struct smb_hdr * smb_buffer,struct TCP_Server_Info * server)48 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
49 {
50 struct mid_q_entry *temp;
51
52 if (server == NULL) {
53 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
54 return NULL;
55 }
56
57 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
58 memset(temp, 0, sizeof(struct mid_q_entry));
59 kref_init(&temp->refcount);
60 temp->mid = get_mid(smb_buffer);
61 temp->pid = current->pid;
62 temp->command = cpu_to_le16(smb_buffer->Command);
63 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
64 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
65 /* when mid allocated can be before when sent */
66 temp->when_alloc = jiffies;
67 temp->server = server;
68
69 /*
70 * The default is for the mid to be synchronous, so the
71 * default callback just wakes up the current task.
72 */
73 temp->callback = cifs_wake_up_task;
74 temp->callback_data = current;
75
76 atomic_inc(&midCount);
77 temp->mid_state = MID_REQUEST_ALLOCATED;
78 return temp;
79 }
80
_cifs_mid_q_entry_release(struct kref * refcount)81 static void _cifs_mid_q_entry_release(struct kref *refcount)
82 {
83 struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
84 refcount);
85
86 mempool_free(mid, cifs_mid_poolp);
87 }
88
cifs_mid_q_entry_release(struct mid_q_entry * midEntry)89 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
90 {
91 spin_lock(&GlobalMid_Lock);
92 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
93 spin_unlock(&GlobalMid_Lock);
94 }
95
96 void
DeleteMidQEntry(struct mid_q_entry * midEntry)97 DeleteMidQEntry(struct mid_q_entry *midEntry)
98 {
99 #ifdef CONFIG_CIFS_STATS2
100 __le16 command = midEntry->server->vals->lock_cmd;
101 unsigned long now;
102 #endif
103 midEntry->mid_state = MID_FREE;
104 atomic_dec(&midCount);
105 if (midEntry->large_buf)
106 cifs_buf_release(midEntry->resp_buf);
107 else
108 cifs_small_buf_release(midEntry->resp_buf);
109 #ifdef CONFIG_CIFS_STATS2
110 now = jiffies;
111 /* commands taking longer than one second are indications that
112 something is wrong, unless it is quite a slow link or server */
113 if (time_after(now, midEntry->when_alloc + HZ)) {
114 if ((cifsFYI & CIFS_TIMER) && (midEntry->command != command)) {
115 pr_debug(" CIFS slow rsp: cmd %d mid %llu",
116 midEntry->command, midEntry->mid);
117 pr_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
118 now - midEntry->when_alloc,
119 now - midEntry->when_sent,
120 now - midEntry->when_received);
121 }
122 }
123 #endif
124 cifs_mid_q_entry_release(midEntry);
125 }
126
127 void
cifs_delete_mid(struct mid_q_entry * mid)128 cifs_delete_mid(struct mid_q_entry *mid)
129 {
130 spin_lock(&GlobalMid_Lock);
131 list_del(&mid->qhead);
132 spin_unlock(&GlobalMid_Lock);
133
134 DeleteMidQEntry(mid);
135 }
136
137 /*
138 * smb_send_kvec - send an array of kvecs to the server
139 * @server: Server to send the data to
140 * @smb_msg: Message to send
141 * @sent: amount of data sent on socket is stored here
142 *
143 * Our basic "send data to server" function. Should be called with srv_mutex
144 * held. The caller is responsible for handling the results.
145 */
146 static int
smb_send_kvec(struct TCP_Server_Info * server,struct msghdr * smb_msg,size_t * sent)147 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
148 size_t *sent)
149 {
150 int rc = 0;
151 int retries = 0;
152 struct socket *ssocket = server->ssocket;
153
154 *sent = 0;
155
156 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
157 smb_msg->msg_namelen = sizeof(struct sockaddr);
158 smb_msg->msg_control = NULL;
159 smb_msg->msg_controllen = 0;
160 if (server->noblocksnd)
161 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
162 else
163 smb_msg->msg_flags = MSG_NOSIGNAL;
164
165 while (msg_data_left(smb_msg)) {
166 /*
167 * If blocking send, we try 3 times, since each can block
168 * for 5 seconds. For nonblocking we have to try more
169 * but wait increasing amounts of time allowing time for
170 * socket to clear. The overall time we wait in either
171 * case to send on the socket is about 15 seconds.
172 * Similarly we wait for 15 seconds for a response from
173 * the server in SendReceive[2] for the server to send
174 * a response back for most types of requests (except
175 * SMB Write past end of file which can be slow, and
176 * blocking lock operations). NFS waits slightly longer
177 * than CIFS, but this can make it take longer for
178 * nonresponsive servers to be detected and 15 seconds
179 * is more than enough time for modern networks to
180 * send a packet. In most cases if we fail to send
181 * after the retries we will kill the socket and
182 * reconnect which may clear the network problem.
183 */
184 rc = sock_sendmsg(ssocket, smb_msg);
185 if (rc == -EAGAIN) {
186 retries++;
187 if (retries >= 14 ||
188 (!server->noblocksnd && (retries > 2))) {
189 cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
190 ssocket);
191 return -EAGAIN;
192 }
193 msleep(1 << retries);
194 continue;
195 }
196
197 if (rc < 0)
198 return rc;
199
200 if (rc == 0) {
201 /* should never happen, letting socket clear before
202 retrying is our only obvious option here */
203 cifs_dbg(VFS, "tcp sent no data\n");
204 msleep(500);
205 continue;
206 }
207
208 /* send was at least partially successful */
209 *sent += rc;
210 retries = 0; /* in case we get ENOSPC on the next send */
211 }
212 return 0;
213 }
214
215 static unsigned long
rqst_len(struct smb_rqst * rqst)216 rqst_len(struct smb_rqst *rqst)
217 {
218 unsigned int i;
219 struct kvec *iov = rqst->rq_iov;
220 unsigned long buflen = 0;
221
222 /* total up iov array first */
223 for (i = 0; i < rqst->rq_nvec; i++)
224 buflen += iov[i].iov_len;
225
226 /* add in the page array if there is one */
227 if (rqst->rq_npages) {
228 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1);
229 buflen += rqst->rq_tailsz;
230 }
231
232 return buflen;
233 }
234
235 static int
__smb_send_rqst(struct TCP_Server_Info * server,struct smb_rqst * rqst)236 __smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
237 {
238 int rc;
239 struct kvec *iov = rqst->rq_iov;
240 int n_vec = rqst->rq_nvec;
241 unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
242 unsigned long send_length;
243 unsigned int i;
244 size_t total_len = 0, sent, size;
245 struct socket *ssocket = server->ssocket;
246 struct msghdr smb_msg;
247 int val = 1;
248
249 if (ssocket == NULL)
250 return -ENOTSOCK;
251
252 /* sanity check send length */
253 send_length = rqst_len(rqst);
254 if (send_length != smb_buf_length + 4) {
255 WARN(1, "Send length mismatch(send_length=%lu smb_buf_length=%u)\n",
256 send_length, smb_buf_length);
257 return -EIO;
258 }
259
260 if (n_vec < 2)
261 return -EIO;
262
263 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length);
264 dump_smb(iov[0].iov_base, iov[0].iov_len);
265 dump_smb(iov[1].iov_base, iov[1].iov_len);
266
267 /* cork the socket */
268 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
269 (char *)&val, sizeof(val));
270
271 size = 0;
272 for (i = 0; i < n_vec; i++)
273 size += iov[i].iov_len;
274
275 iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, iov, n_vec, size);
276
277 rc = smb_send_kvec(server, &smb_msg, &sent);
278 if (rc < 0)
279 goto uncork;
280
281 total_len += sent;
282
283 /* now walk the page array and send each page in it */
284 for (i = 0; i < rqst->rq_npages; i++) {
285 size_t len = i == rqst->rq_npages - 1
286 ? rqst->rq_tailsz
287 : rqst->rq_pagesz;
288 struct bio_vec bvec = {
289 .bv_page = rqst->rq_pages[i],
290 .bv_len = len
291 };
292 iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC,
293 &bvec, 1, len);
294 rc = smb_send_kvec(server, &smb_msg, &sent);
295 if (rc < 0)
296 break;
297
298 total_len += sent;
299 }
300
301 uncork:
302 /* uncork it */
303 val = 0;
304 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
305 (char *)&val, sizeof(val));
306
307 if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
308 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
309 smb_buf_length + 4, total_len);
310 /*
311 * If we have only sent part of an SMB then the next SMB could
312 * be taken as the remainder of this one. We need to kill the
313 * socket so the server throws away the partial SMB
314 */
315 server->tcpStatus = CifsNeedReconnect;
316 }
317
318 if (rc < 0 && rc != -EINTR)
319 cifs_dbg(VFS, "Error %d sending data on socket to server\n",
320 rc);
321 else if (rc > 0)
322 rc = 0;
323
324 return rc;
325 }
326
327 static int
smb_send_rqst(struct TCP_Server_Info * server,struct smb_rqst * rqst,int flags)328 smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst, int flags)
329 {
330 struct smb_rqst cur_rqst;
331 int rc;
332
333 if (!(flags & CIFS_TRANSFORM_REQ))
334 return __smb_send_rqst(server, rqst);
335
336 if (!server->ops->init_transform_rq ||
337 !server->ops->free_transform_rq) {
338 cifs_dbg(VFS, "Encryption requested but transform callbacks are missed\n");
339 return -EIO;
340 }
341
342 rc = server->ops->init_transform_rq(server, &cur_rqst, rqst);
343 if (rc)
344 return rc;
345
346 rc = __smb_send_rqst(server, &cur_rqst);
347 server->ops->free_transform_rq(&cur_rqst);
348 return rc;
349 }
350
351 int
smb_send(struct TCP_Server_Info * server,struct smb_hdr * smb_buffer,unsigned int smb_buf_length)352 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
353 unsigned int smb_buf_length)
354 {
355 struct kvec iov[2];
356 struct smb_rqst rqst = { .rq_iov = iov,
357 .rq_nvec = 2 };
358
359 iov[0].iov_base = smb_buffer;
360 iov[0].iov_len = 4;
361 iov[1].iov_base = (char *)smb_buffer + 4;
362 iov[1].iov_len = smb_buf_length;
363
364 return __smb_send_rqst(server, &rqst);
365 }
366
367 static int
wait_for_free_credits(struct TCP_Server_Info * server,const int timeout,int * credits)368 wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
369 int *credits)
370 {
371 int rc;
372
373 spin_lock(&server->req_lock);
374 if (timeout == CIFS_ASYNC_OP) {
375 /* oplock breaks must not be held up */
376 server->in_flight++;
377 *credits -= 1;
378 spin_unlock(&server->req_lock);
379 return 0;
380 }
381
382 while (1) {
383 if (*credits <= 0) {
384 spin_unlock(&server->req_lock);
385 cifs_num_waiters_inc(server);
386 rc = wait_event_killable(server->request_q,
387 has_credits(server, credits));
388 cifs_num_waiters_dec(server);
389 if (rc)
390 return rc;
391 spin_lock(&server->req_lock);
392 } else {
393 if (server->tcpStatus == CifsExiting) {
394 spin_unlock(&server->req_lock);
395 return -ENOENT;
396 }
397
398 /*
399 * Can not count locking commands against total
400 * as they are allowed to block on server.
401 */
402
403 /* update # of requests on the wire to server */
404 if (timeout != CIFS_BLOCKING_OP) {
405 *credits -= 1;
406 server->in_flight++;
407 }
408 spin_unlock(&server->req_lock);
409 break;
410 }
411 }
412 return 0;
413 }
414
415 static int
wait_for_free_request(struct TCP_Server_Info * server,const int timeout,const int optype)416 wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
417 const int optype)
418 {
419 int *val;
420
421 val = server->ops->get_credits_field(server, optype);
422 /* Since an echo is already inflight, no need to wait to send another */
423 if (*val <= 0 && optype == CIFS_ECHO_OP)
424 return -EAGAIN;
425 return wait_for_free_credits(server, timeout, val);
426 }
427
428 int
cifs_wait_mtu_credits(struct TCP_Server_Info * server,unsigned int size,unsigned int * num,unsigned int * credits)429 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
430 unsigned int *num, unsigned int *credits)
431 {
432 *num = size;
433 *credits = 0;
434 return 0;
435 }
436
allocate_mid(struct cifs_ses * ses,struct smb_hdr * in_buf,struct mid_q_entry ** ppmidQ)437 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
438 struct mid_q_entry **ppmidQ)
439 {
440 if (ses->server->tcpStatus == CifsExiting) {
441 return -ENOENT;
442 }
443
444 if (ses->server->tcpStatus == CifsNeedReconnect) {
445 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
446 return -EAGAIN;
447 }
448
449 if (ses->status == CifsNew) {
450 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
451 (in_buf->Command != SMB_COM_NEGOTIATE))
452 return -EAGAIN;
453 /* else ok - we are setting up session */
454 }
455
456 if (ses->status == CifsExiting) {
457 /* check if SMB session is bad because we are setting it up */
458 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
459 return -EAGAIN;
460 /* else ok - we are shutting down session */
461 }
462
463 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
464 if (*ppmidQ == NULL)
465 return -ENOMEM;
466 spin_lock(&GlobalMid_Lock);
467 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
468 spin_unlock(&GlobalMid_Lock);
469 return 0;
470 }
471
472 static int
wait_for_response(struct TCP_Server_Info * server,struct mid_q_entry * midQ)473 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
474 {
475 int error;
476
477 error = wait_event_freezekillable_unsafe(server->response_q,
478 midQ->mid_state != MID_REQUEST_SUBMITTED);
479 if (error < 0)
480 return -ERESTARTSYS;
481
482 return 0;
483 }
484
485 struct mid_q_entry *
cifs_setup_async_request(struct TCP_Server_Info * server,struct smb_rqst * rqst)486 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
487 {
488 int rc;
489 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
490 struct mid_q_entry *mid;
491
492 if (rqst->rq_iov[0].iov_len != 4 ||
493 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
494 return ERR_PTR(-EIO);
495
496 /* enable signing if server requires it */
497 if (server->sign)
498 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
499
500 mid = AllocMidQEntry(hdr, server);
501 if (mid == NULL)
502 return ERR_PTR(-ENOMEM);
503
504 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
505 if (rc) {
506 DeleteMidQEntry(mid);
507 return ERR_PTR(rc);
508 }
509
510 return mid;
511 }
512
513 /*
514 * Send a SMB request and set the callback function in the mid to handle
515 * the result. Caller is responsible for dealing with timeouts.
516 */
517 int
cifs_call_async(struct TCP_Server_Info * server,struct smb_rqst * rqst,mid_receive_t * receive,mid_callback_t * callback,mid_handle_t * handle,void * cbdata,const int flags)518 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
519 mid_receive_t *receive, mid_callback_t *callback,
520 mid_handle_t *handle, void *cbdata, const int flags)
521 {
522 int rc, timeout, optype;
523 struct mid_q_entry *mid;
524 unsigned int credits = 0;
525
526 timeout = flags & CIFS_TIMEOUT_MASK;
527 optype = flags & CIFS_OP_MASK;
528
529 if ((flags & CIFS_HAS_CREDITS) == 0) {
530 rc = wait_for_free_request(server, timeout, optype);
531 if (rc)
532 return rc;
533 credits = 1;
534 }
535
536 mutex_lock(&server->srv_mutex);
537 mid = server->ops->setup_async_request(server, rqst);
538 if (IS_ERR(mid)) {
539 mutex_unlock(&server->srv_mutex);
540 add_credits_and_wake_if(server, credits, optype);
541 return PTR_ERR(mid);
542 }
543
544 mid->receive = receive;
545 mid->callback = callback;
546 mid->callback_data = cbdata;
547 mid->handle = handle;
548 mid->mid_state = MID_REQUEST_SUBMITTED;
549
550 /* put it on the pending_mid_q */
551 spin_lock(&GlobalMid_Lock);
552 list_add_tail(&mid->qhead, &server->pending_mid_q);
553 spin_unlock(&GlobalMid_Lock);
554
555 /*
556 * Need to store the time in mid before calling I/O. For call_async,
557 * I/O response may come back and free the mid entry on another thread.
558 */
559 cifs_save_when_sent(mid);
560 cifs_in_send_inc(server);
561 rc = smb_send_rqst(server, rqst, flags);
562 cifs_in_send_dec(server);
563
564 if (rc < 0) {
565 server->sequence_number -= 2;
566 cifs_delete_mid(mid);
567 }
568
569 mutex_unlock(&server->srv_mutex);
570
571 if (rc == 0)
572 return 0;
573
574 add_credits_and_wake_if(server, credits, optype);
575 return rc;
576 }
577
578 /*
579 *
580 * Send an SMB Request. No response info (other than return code)
581 * needs to be parsed.
582 *
583 * flags indicate the type of request buffer and how long to wait
584 * and whether to log NT STATUS code (error) before mapping it to POSIX error
585 *
586 */
587 int
SendReceiveNoRsp(const unsigned int xid,struct cifs_ses * ses,char * in_buf,int flags)588 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
589 char *in_buf, int flags)
590 {
591 int rc;
592 struct kvec iov[1];
593 struct kvec rsp_iov;
594 int resp_buf_type;
595
596 iov[0].iov_base = in_buf;
597 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
598 flags |= CIFS_NO_RESP;
599 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
600 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
601
602 return rc;
603 }
604
605 static int
cifs_sync_mid_result(struct mid_q_entry * mid,struct TCP_Server_Info * server)606 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
607 {
608 int rc = 0;
609
610 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
611 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
612
613 spin_lock(&GlobalMid_Lock);
614 switch (mid->mid_state) {
615 case MID_RESPONSE_RECEIVED:
616 spin_unlock(&GlobalMid_Lock);
617 return rc;
618 case MID_RETRY_NEEDED:
619 rc = -EAGAIN;
620 break;
621 case MID_RESPONSE_MALFORMED:
622 rc = -EIO;
623 break;
624 case MID_SHUTDOWN:
625 rc = -EHOSTDOWN;
626 break;
627 default:
628 list_del_init(&mid->qhead);
629 cifs_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
630 __func__, mid->mid, mid->mid_state);
631 rc = -EIO;
632 }
633 spin_unlock(&GlobalMid_Lock);
634
635 DeleteMidQEntry(mid);
636 return rc;
637 }
638
639 static inline int
send_cancel(struct TCP_Server_Info * server,struct smb_rqst * rqst,struct mid_q_entry * mid)640 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
641 struct mid_q_entry *mid)
642 {
643 return server->ops->send_cancel ?
644 server->ops->send_cancel(server, rqst, mid) : 0;
645 }
646
647 int
cifs_check_receive(struct mid_q_entry * mid,struct TCP_Server_Info * server,bool log_error)648 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
649 bool log_error)
650 {
651 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
652
653 dump_smb(mid->resp_buf, min_t(u32, 92, len));
654
655 /* convert the length into a more usable form */
656 if (server->sign) {
657 struct kvec iov[2];
658 int rc = 0;
659 struct smb_rqst rqst = { .rq_iov = iov,
660 .rq_nvec = 2 };
661
662 iov[0].iov_base = mid->resp_buf;
663 iov[0].iov_len = 4;
664 iov[1].iov_base = (char *)mid->resp_buf + 4;
665 iov[1].iov_len = len - 4;
666 /* FIXME: add code to kill session */
667 rc = cifs_verify_signature(&rqst, server,
668 mid->sequence_number);
669 if (rc)
670 cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
671 rc);
672 }
673
674 /* BB special case reconnect tid and uid here? */
675 return map_smb_to_linux_error(mid->resp_buf, log_error);
676 }
677
678 struct mid_q_entry *
cifs_setup_request(struct cifs_ses * ses,struct smb_rqst * rqst)679 cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
680 {
681 int rc;
682 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
683 struct mid_q_entry *mid;
684
685 if (rqst->rq_iov[0].iov_len != 4 ||
686 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
687 return ERR_PTR(-EIO);
688
689 rc = allocate_mid(ses, hdr, &mid);
690 if (rc)
691 return ERR_PTR(rc);
692 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
693 if (rc) {
694 cifs_delete_mid(mid);
695 return ERR_PTR(rc);
696 }
697 return mid;
698 }
699
700 int
cifs_send_recv(const unsigned int xid,struct cifs_ses * ses,struct smb_rqst * rqst,int * resp_buf_type,const int flags,struct kvec * resp_iov)701 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
702 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
703 struct kvec *resp_iov)
704 {
705 int rc = 0;
706 int timeout, optype;
707 struct mid_q_entry *midQ;
708 unsigned int credits = 1;
709 char *buf;
710
711 timeout = flags & CIFS_TIMEOUT_MASK;
712 optype = flags & CIFS_OP_MASK;
713
714 *resp_buf_type = CIFS_NO_BUFFER; /* no response buf yet */
715
716 if ((ses == NULL) || (ses->server == NULL)) {
717 cifs_dbg(VFS, "Null session\n");
718 return -EIO;
719 }
720
721 if (ses->server->tcpStatus == CifsExiting)
722 return -ENOENT;
723
724 /*
725 * Ensure that we do not send more than 50 overlapping requests
726 * to the same server. We may make this configurable later or
727 * use ses->maxReq.
728 */
729
730 rc = wait_for_free_request(ses->server, timeout, optype);
731 if (rc)
732 return rc;
733
734 /*
735 * Make sure that we sign in the same order that we send on this socket
736 * and avoid races inside tcp sendmsg code that could cause corruption
737 * of smb data.
738 */
739
740 mutex_lock(&ses->server->srv_mutex);
741
742 midQ = ses->server->ops->setup_request(ses, rqst);
743 if (IS_ERR(midQ)) {
744 mutex_unlock(&ses->server->srv_mutex);
745 /* Update # of requests on wire to server */
746 add_credits(ses->server, 1, optype);
747 return PTR_ERR(midQ);
748 }
749
750 midQ->mid_state = MID_REQUEST_SUBMITTED;
751 cifs_in_send_inc(ses->server);
752 rc = smb_send_rqst(ses->server, rqst, flags);
753 cifs_in_send_dec(ses->server);
754 cifs_save_when_sent(midQ);
755
756 if (rc < 0)
757 ses->server->sequence_number -= 2;
758 mutex_unlock(&ses->server->srv_mutex);
759
760 if (rc < 0)
761 goto out;
762
763 if (timeout == CIFS_ASYNC_OP)
764 goto out;
765
766 rc = wait_for_response(ses->server, midQ);
767 if (rc != 0) {
768 cifs_dbg(FYI, "Cancelling wait for mid %llu\n", midQ->mid);
769 send_cancel(ses->server, rqst, midQ);
770 spin_lock(&GlobalMid_Lock);
771 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
772 midQ->mid_flags |= MID_WAIT_CANCELLED;
773 midQ->callback = DeleteMidQEntry;
774 spin_unlock(&GlobalMid_Lock);
775 add_credits(ses->server, 1, optype);
776 return rc;
777 }
778 spin_unlock(&GlobalMid_Lock);
779 }
780
781 rc = cifs_sync_mid_result(midQ, ses->server);
782 if (rc != 0) {
783 add_credits(ses->server, 1, optype);
784 return rc;
785 }
786
787 if (!midQ->resp_buf || midQ->mid_state != MID_RESPONSE_RECEIVED) {
788 rc = -EIO;
789 cifs_dbg(FYI, "Bad MID state?\n");
790 goto out;
791 }
792
793 buf = (char *)midQ->resp_buf;
794 resp_iov->iov_base = buf;
795 resp_iov->iov_len = get_rfc1002_length(buf) + 4;
796 if (midQ->large_buf)
797 *resp_buf_type = CIFS_LARGE_BUFFER;
798 else
799 *resp_buf_type = CIFS_SMALL_BUFFER;
800
801 credits = ses->server->ops->get_credits(midQ);
802
803 rc = ses->server->ops->check_receive(midQ, ses->server,
804 flags & CIFS_LOG_ERROR);
805
806 /* mark it so buf will not be freed by cifs_delete_mid */
807 if ((flags & CIFS_NO_RESP) == 0)
808 midQ->resp_buf = NULL;
809 out:
810 cifs_delete_mid(midQ);
811 add_credits(ses->server, credits, optype);
812
813 return rc;
814 }
815
816 int
SendReceive2(const unsigned int xid,struct cifs_ses * ses,struct kvec * iov,int n_vec,int * resp_buf_type,const int flags,struct kvec * resp_iov)817 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
818 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
819 const int flags, struct kvec *resp_iov)
820 {
821 struct smb_rqst rqst;
822 struct kvec *new_iov;
823 int rc;
824
825 new_iov = kmalloc(sizeof(struct kvec) * (n_vec + 1), GFP_KERNEL);
826 if (!new_iov)
827 return -ENOMEM;
828
829 /* 1st iov is a RFC1001 length followed by the rest of the packet */
830 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
831
832 new_iov[0].iov_base = new_iov[1].iov_base;
833 new_iov[0].iov_len = 4;
834 new_iov[1].iov_base += 4;
835 new_iov[1].iov_len -= 4;
836
837 memset(&rqst, 0, sizeof(struct smb_rqst));
838 rqst.rq_iov = new_iov;
839 rqst.rq_nvec = n_vec + 1;
840
841 rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
842 kfree(new_iov);
843 return rc;
844 }
845
846 int
SendReceive(const unsigned int xid,struct cifs_ses * ses,struct smb_hdr * in_buf,struct smb_hdr * out_buf,int * pbytes_returned,const int timeout)847 SendReceive(const unsigned int xid, struct cifs_ses *ses,
848 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
849 int *pbytes_returned, const int timeout)
850 {
851 int rc = 0;
852 struct mid_q_entry *midQ;
853 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
854 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
855 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
856
857 if (ses == NULL) {
858 cifs_dbg(VFS, "Null smb session\n");
859 return -EIO;
860 }
861 if (ses->server == NULL) {
862 cifs_dbg(VFS, "Null tcp session\n");
863 return -EIO;
864 }
865
866 if (ses->server->tcpStatus == CifsExiting)
867 return -ENOENT;
868
869 /* Ensure that we do not send more than 50 overlapping requests
870 to the same server. We may make this configurable later or
871 use ses->maxReq */
872
873 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
874 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
875 len);
876 return -EIO;
877 }
878
879 rc = wait_for_free_request(ses->server, timeout, 0);
880 if (rc)
881 return rc;
882
883 /* make sure that we sign in the same order that we send on this socket
884 and avoid races inside tcp sendmsg code that could cause corruption
885 of smb data */
886
887 mutex_lock(&ses->server->srv_mutex);
888
889 rc = allocate_mid(ses, in_buf, &midQ);
890 if (rc) {
891 mutex_unlock(&ses->server->srv_mutex);
892 /* Update # of requests on wire to server */
893 add_credits(ses->server, 1, 0);
894 return rc;
895 }
896
897 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
898 if (rc) {
899 mutex_unlock(&ses->server->srv_mutex);
900 goto out;
901 }
902
903 midQ->mid_state = MID_REQUEST_SUBMITTED;
904
905 cifs_in_send_inc(ses->server);
906 rc = smb_send(ses->server, in_buf, len);
907 cifs_in_send_dec(ses->server);
908 cifs_save_when_sent(midQ);
909
910 if (rc < 0)
911 ses->server->sequence_number -= 2;
912
913 mutex_unlock(&ses->server->srv_mutex);
914
915 if (rc < 0)
916 goto out;
917
918 if (timeout == CIFS_ASYNC_OP)
919 goto out;
920
921 rc = wait_for_response(ses->server, midQ);
922 if (rc != 0) {
923 send_cancel(ses->server, &rqst, midQ);
924 spin_lock(&GlobalMid_Lock);
925 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
926 /* no longer considered to be "in-flight" */
927 midQ->callback = DeleteMidQEntry;
928 spin_unlock(&GlobalMid_Lock);
929 add_credits(ses->server, 1, 0);
930 return rc;
931 }
932 spin_unlock(&GlobalMid_Lock);
933 }
934
935 rc = cifs_sync_mid_result(midQ, ses->server);
936 if (rc != 0) {
937 add_credits(ses->server, 1, 0);
938 return rc;
939 }
940
941 if (!midQ->resp_buf || !out_buf ||
942 midQ->mid_state != MID_RESPONSE_RECEIVED) {
943 rc = -EIO;
944 cifs_dbg(VFS, "Bad MID state?\n");
945 goto out;
946 }
947
948 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
949 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
950 rc = cifs_check_receive(midQ, ses->server, 0);
951 out:
952 cifs_delete_mid(midQ);
953 add_credits(ses->server, 1, 0);
954
955 return rc;
956 }
957
958 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
959 blocking lock to return. */
960
961 static int
send_lock_cancel(const unsigned int xid,struct cifs_tcon * tcon,struct smb_hdr * in_buf,struct smb_hdr * out_buf)962 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
963 struct smb_hdr *in_buf,
964 struct smb_hdr *out_buf)
965 {
966 int bytes_returned;
967 struct cifs_ses *ses = tcon->ses;
968 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
969
970 /* We just modify the current in_buf to change
971 the type of lock from LOCKING_ANDX_SHARED_LOCK
972 or LOCKING_ANDX_EXCLUSIVE_LOCK to
973 LOCKING_ANDX_CANCEL_LOCK. */
974
975 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
976 pSMB->Timeout = 0;
977 pSMB->hdr.Mid = get_next_mid(ses->server);
978
979 return SendReceive(xid, ses, in_buf, out_buf,
980 &bytes_returned, 0);
981 }
982
983 int
SendReceiveBlockingLock(const unsigned int xid,struct cifs_tcon * tcon,struct smb_hdr * in_buf,struct smb_hdr * out_buf,int * pbytes_returned)984 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
985 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
986 int *pbytes_returned)
987 {
988 int rc = 0;
989 int rstart = 0;
990 struct mid_q_entry *midQ;
991 struct cifs_ses *ses;
992 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
993 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
994 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
995
996 if (tcon == NULL || tcon->ses == NULL) {
997 cifs_dbg(VFS, "Null smb session\n");
998 return -EIO;
999 }
1000 ses = tcon->ses;
1001
1002 if (ses->server == NULL) {
1003 cifs_dbg(VFS, "Null tcp session\n");
1004 return -EIO;
1005 }
1006
1007 if (ses->server->tcpStatus == CifsExiting)
1008 return -ENOENT;
1009
1010 /* Ensure that we do not send more than 50 overlapping requests
1011 to the same server. We may make this configurable later or
1012 use ses->maxReq */
1013
1014 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1015 cifs_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1016 len);
1017 return -EIO;
1018 }
1019
1020 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0);
1021 if (rc)
1022 return rc;
1023
1024 /* make sure that we sign in the same order that we send on this socket
1025 and avoid races inside tcp sendmsg code that could cause corruption
1026 of smb data */
1027
1028 mutex_lock(&ses->server->srv_mutex);
1029
1030 rc = allocate_mid(ses, in_buf, &midQ);
1031 if (rc) {
1032 mutex_unlock(&ses->server->srv_mutex);
1033 return rc;
1034 }
1035
1036 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
1037 if (rc) {
1038 cifs_delete_mid(midQ);
1039 mutex_unlock(&ses->server->srv_mutex);
1040 return rc;
1041 }
1042
1043 midQ->mid_state = MID_REQUEST_SUBMITTED;
1044 cifs_in_send_inc(ses->server);
1045 rc = smb_send(ses->server, in_buf, len);
1046 cifs_in_send_dec(ses->server);
1047 cifs_save_when_sent(midQ);
1048
1049 if (rc < 0)
1050 ses->server->sequence_number -= 2;
1051
1052 mutex_unlock(&ses->server->srv_mutex);
1053
1054 if (rc < 0) {
1055 cifs_delete_mid(midQ);
1056 return rc;
1057 }
1058
1059 /* Wait for a reply - allow signals to interrupt. */
1060 rc = wait_event_interruptible(ses->server->response_q,
1061 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1062 ((ses->server->tcpStatus != CifsGood) &&
1063 (ses->server->tcpStatus != CifsNew)));
1064
1065 /* Were we interrupted by a signal ? */
1066 if ((rc == -ERESTARTSYS) &&
1067 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1068 ((ses->server->tcpStatus == CifsGood) ||
1069 (ses->server->tcpStatus == CifsNew))) {
1070
1071 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1072 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1073 blocking lock to return. */
1074 rc = send_cancel(ses->server, &rqst, midQ);
1075 if (rc) {
1076 cifs_delete_mid(midQ);
1077 return rc;
1078 }
1079 } else {
1080 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1081 to cause the blocking lock to return. */
1082
1083 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1084
1085 /* If we get -ENOLCK back the lock may have
1086 already been removed. Don't exit in this case. */
1087 if (rc && rc != -ENOLCK) {
1088 cifs_delete_mid(midQ);
1089 return rc;
1090 }
1091 }
1092
1093 rc = wait_for_response(ses->server, midQ);
1094 if (rc) {
1095 send_cancel(ses->server, &rqst, midQ);
1096 spin_lock(&GlobalMid_Lock);
1097 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1098 /* no longer considered to be "in-flight" */
1099 midQ->callback = DeleteMidQEntry;
1100 spin_unlock(&GlobalMid_Lock);
1101 return rc;
1102 }
1103 spin_unlock(&GlobalMid_Lock);
1104 }
1105
1106 /* We got the response - restart system call. */
1107 rstart = 1;
1108 }
1109
1110 rc = cifs_sync_mid_result(midQ, ses->server);
1111 if (rc != 0)
1112 return rc;
1113
1114 /* rcvd frame is ok */
1115 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1116 rc = -EIO;
1117 cifs_dbg(VFS, "Bad MID state?\n");
1118 goto out;
1119 }
1120
1121 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1122 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1123 rc = cifs_check_receive(midQ, ses->server, 0);
1124 out:
1125 cifs_delete_mid(midQ);
1126 if (rstart && rc == -EACCES)
1127 return -ERESTARTSYS;
1128 return rc;
1129 }
1130