1 /*
2 * fs/cifs/transport.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
7 *
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/bvec.h>
32 #include <linux/highmem.h>
33 #include <linux/uaccess.h>
34 #include <asm/processor.h>
35 #include <linux/mempool.h>
36 #include <linux/sched/signal.h>
37 #include "cifspdu.h"
38 #include "cifsglob.h"
39 #include "cifsproto.h"
40 #include "cifs_debug.h"
41 #include "smb2proto.h"
42 #include "smbdirect.h"
43
44 /* Max number of iovectors we can use off the stack when sending requests. */
45 #define CIFS_MAX_IOV_SIZE 8
46
47 void
cifs_wake_up_task(struct mid_q_entry * mid)48 cifs_wake_up_task(struct mid_q_entry *mid)
49 {
50 wake_up_process(mid->callback_data);
51 }
52
53 struct mid_q_entry *
AllocMidQEntry(const struct smb_hdr * smb_buffer,struct TCP_Server_Info * server)54 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
55 {
56 struct mid_q_entry *temp;
57
58 if (server == NULL) {
59 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
60 return NULL;
61 }
62
63 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
64 memset(temp, 0, sizeof(struct mid_q_entry));
65 kref_init(&temp->refcount);
66 temp->mid = get_mid(smb_buffer);
67 temp->pid = current->pid;
68 temp->command = cpu_to_le16(smb_buffer->Command);
69 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
70 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
71 /* when mid allocated can be before when sent */
72 temp->when_alloc = jiffies;
73 temp->server = server;
74
75 /*
76 * The default is for the mid to be synchronous, so the
77 * default callback just wakes up the current task.
78 */
79 get_task_struct(current);
80 temp->creator = current;
81 temp->callback = cifs_wake_up_task;
82 temp->callback_data = current;
83
84 atomic_inc(&midCount);
85 temp->mid_state = MID_REQUEST_ALLOCATED;
86 return temp;
87 }
88
_cifs_mid_q_entry_release(struct kref * refcount)89 static void _cifs_mid_q_entry_release(struct kref *refcount)
90 {
91 struct mid_q_entry *midEntry =
92 container_of(refcount, struct mid_q_entry, refcount);
93 #ifdef CONFIG_CIFS_STATS2
94 __le16 command = midEntry->server->vals->lock_cmd;
95 __u16 smb_cmd = le16_to_cpu(midEntry->command);
96 unsigned long now;
97 unsigned long roundtrip_time;
98 #endif
99 struct TCP_Server_Info *server = midEntry->server;
100
101 if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
102 midEntry->mid_state == MID_RESPONSE_RECEIVED &&
103 server->ops->handle_cancelled_mid)
104 server->ops->handle_cancelled_mid(midEntry->resp_buf, server);
105
106 midEntry->mid_state = MID_FREE;
107 atomic_dec(&midCount);
108 if (midEntry->large_buf)
109 cifs_buf_release(midEntry->resp_buf);
110 else
111 cifs_small_buf_release(midEntry->resp_buf);
112 #ifdef CONFIG_CIFS_STATS2
113 now = jiffies;
114 if (now < midEntry->when_alloc)
115 cifs_server_dbg(VFS, "invalid mid allocation time\n");
116 roundtrip_time = now - midEntry->when_alloc;
117
118 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
119 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
120 server->slowest_cmd[smb_cmd] = roundtrip_time;
121 server->fastest_cmd[smb_cmd] = roundtrip_time;
122 } else {
123 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
124 server->slowest_cmd[smb_cmd] = roundtrip_time;
125 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
126 server->fastest_cmd[smb_cmd] = roundtrip_time;
127 }
128 cifs_stats_inc(&server->num_cmds[smb_cmd]);
129 server->time_per_cmd[smb_cmd] += roundtrip_time;
130 }
131 /*
132 * commands taking longer than one second (default) can be indications
133 * that something is wrong, unless it is quite a slow link or a very
134 * busy server. Note that this calc is unlikely or impossible to wrap
135 * as long as slow_rsp_threshold is not set way above recommended max
136 * value (32767 ie 9 hours) and is generally harmless even if wrong
137 * since only affects debug counters - so leaving the calc as simple
138 * comparison rather than doing multiple conversions and overflow
139 * checks
140 */
141 if ((slow_rsp_threshold != 0) &&
142 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
143 (midEntry->command != command)) {
144 /*
145 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
146 * NB: le16_to_cpu returns unsigned so can not be negative below
147 */
148 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
149 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
150
151 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
152 midEntry->when_sent, midEntry->when_received);
153 if (cifsFYI & CIFS_TIMER) {
154 pr_debug(" CIFS slow rsp: cmd %d mid %llu",
155 midEntry->command, midEntry->mid);
156 cifs_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
157 now - midEntry->when_alloc,
158 now - midEntry->when_sent,
159 now - midEntry->when_received);
160 }
161 }
162 #endif
163 put_task_struct(midEntry->creator);
164
165 mempool_free(midEntry, cifs_mid_poolp);
166 }
167
cifs_mid_q_entry_release(struct mid_q_entry * midEntry)168 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
169 {
170 spin_lock(&GlobalMid_Lock);
171 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
172 spin_unlock(&GlobalMid_Lock);
173 }
174
DeleteMidQEntry(struct mid_q_entry * midEntry)175 void DeleteMidQEntry(struct mid_q_entry *midEntry)
176 {
177 cifs_mid_q_entry_release(midEntry);
178 }
179
180 void
cifs_delete_mid(struct mid_q_entry * mid)181 cifs_delete_mid(struct mid_q_entry *mid)
182 {
183 spin_lock(&GlobalMid_Lock);
184 if (!(mid->mid_flags & MID_DELETED)) {
185 list_del_init(&mid->qhead);
186 mid->mid_flags |= MID_DELETED;
187 }
188 spin_unlock(&GlobalMid_Lock);
189
190 DeleteMidQEntry(mid);
191 }
192
193 /*
194 * smb_send_kvec - send an array of kvecs to the server
195 * @server: Server to send the data to
196 * @smb_msg: Message to send
197 * @sent: amount of data sent on socket is stored here
198 *
199 * Our basic "send data to server" function. Should be called with srv_mutex
200 * held. The caller is responsible for handling the results.
201 */
202 static int
smb_send_kvec(struct TCP_Server_Info * server,struct msghdr * smb_msg,size_t * sent)203 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
204 size_t *sent)
205 {
206 int rc = 0;
207 int retries = 0;
208 struct socket *ssocket = server->ssocket;
209
210 *sent = 0;
211
212 smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
213 smb_msg->msg_namelen = sizeof(struct sockaddr);
214 smb_msg->msg_control = NULL;
215 smb_msg->msg_controllen = 0;
216 if (server->noblocksnd)
217 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
218 else
219 smb_msg->msg_flags = MSG_NOSIGNAL;
220
221 while (msg_data_left(smb_msg)) {
222 /*
223 * If blocking send, we try 3 times, since each can block
224 * for 5 seconds. For nonblocking we have to try more
225 * but wait increasing amounts of time allowing time for
226 * socket to clear. The overall time we wait in either
227 * case to send on the socket is about 15 seconds.
228 * Similarly we wait for 15 seconds for a response from
229 * the server in SendReceive[2] for the server to send
230 * a response back for most types of requests (except
231 * SMB Write past end of file which can be slow, and
232 * blocking lock operations). NFS waits slightly longer
233 * than CIFS, but this can make it take longer for
234 * nonresponsive servers to be detected and 15 seconds
235 * is more than enough time for modern networks to
236 * send a packet. In most cases if we fail to send
237 * after the retries we will kill the socket and
238 * reconnect which may clear the network problem.
239 */
240 rc = sock_sendmsg(ssocket, smb_msg);
241 if (rc == -EAGAIN) {
242 retries++;
243 if (retries >= 14 ||
244 (!server->noblocksnd && (retries > 2))) {
245 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
246 ssocket);
247 return -EAGAIN;
248 }
249 msleep(1 << retries);
250 continue;
251 }
252
253 if (rc < 0)
254 return rc;
255
256 if (rc == 0) {
257 /* should never happen, letting socket clear before
258 retrying is our only obvious option here */
259 cifs_server_dbg(VFS, "tcp sent no data\n");
260 msleep(500);
261 continue;
262 }
263
264 /* send was at least partially successful */
265 *sent += rc;
266 retries = 0; /* in case we get ENOSPC on the next send */
267 }
268 return 0;
269 }
270
271 unsigned long
smb_rqst_len(struct TCP_Server_Info * server,struct smb_rqst * rqst)272 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
273 {
274 unsigned int i;
275 struct kvec *iov;
276 int nvec;
277 unsigned long buflen = 0;
278
279 if (server->vals->header_preamble_size == 0 &&
280 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
281 iov = &rqst->rq_iov[1];
282 nvec = rqst->rq_nvec - 1;
283 } else {
284 iov = rqst->rq_iov;
285 nvec = rqst->rq_nvec;
286 }
287
288 /* total up iov array first */
289 for (i = 0; i < nvec; i++)
290 buflen += iov[i].iov_len;
291
292 /*
293 * Add in the page array if there is one. The caller needs to make
294 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
295 * multiple pages ends at page boundary, rq_tailsz needs to be set to
296 * PAGE_SIZE.
297 */
298 if (rqst->rq_npages) {
299 if (rqst->rq_npages == 1)
300 buflen += rqst->rq_tailsz;
301 else {
302 /*
303 * If there is more than one page, calculate the
304 * buffer length based on rq_offset and rq_tailsz
305 */
306 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
307 rqst->rq_offset;
308 buflen += rqst->rq_tailsz;
309 }
310 }
311
312 return buflen;
313 }
314
315 static int
__smb_send_rqst(struct TCP_Server_Info * server,int num_rqst,struct smb_rqst * rqst)316 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
317 struct smb_rqst *rqst)
318 {
319 int rc = 0;
320 struct kvec *iov;
321 int n_vec;
322 unsigned int send_length = 0;
323 unsigned int i, j;
324 sigset_t mask, oldmask;
325 size_t total_len = 0, sent, size;
326 struct socket *ssocket = server->ssocket;
327 struct msghdr smb_msg;
328 int val = 1;
329 __be32 rfc1002_marker;
330
331 if (cifs_rdma_enabled(server)) {
332 /* return -EAGAIN when connecting or reconnecting */
333 rc = -EAGAIN;
334 if (server->smbd_conn)
335 rc = smbd_send(server, num_rqst, rqst);
336 goto smbd_done;
337 }
338
339 if (ssocket == NULL)
340 return -EAGAIN;
341
342 if (signal_pending(current)) {
343 cifs_dbg(FYI, "signal is pending before sending any data\n");
344 return -EINTR;
345 }
346
347 /* cork the socket */
348 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
349 (char *)&val, sizeof(val));
350
351 for (j = 0; j < num_rqst; j++)
352 send_length += smb_rqst_len(server, &rqst[j]);
353 rfc1002_marker = cpu_to_be32(send_length);
354
355 /*
356 * We should not allow signals to interrupt the network send because
357 * any partial send will cause session reconnects thus increasing
358 * latency of system calls and overload a server with unnecessary
359 * requests.
360 */
361
362 sigfillset(&mask);
363 sigprocmask(SIG_BLOCK, &mask, &oldmask);
364
365 /* Generate a rfc1002 marker for SMB2+ */
366 if (server->vals->header_preamble_size == 0) {
367 struct kvec hiov = {
368 .iov_base = &rfc1002_marker,
369 .iov_len = 4
370 };
371 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
372 rc = smb_send_kvec(server, &smb_msg, &sent);
373 if (rc < 0)
374 goto unmask;
375
376 total_len += sent;
377 send_length += 4;
378 }
379
380 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
381
382 for (j = 0; j < num_rqst; j++) {
383 iov = rqst[j].rq_iov;
384 n_vec = rqst[j].rq_nvec;
385
386 size = 0;
387 for (i = 0; i < n_vec; i++) {
388 dump_smb(iov[i].iov_base, iov[i].iov_len);
389 size += iov[i].iov_len;
390 }
391
392 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
393
394 rc = smb_send_kvec(server, &smb_msg, &sent);
395 if (rc < 0)
396 goto unmask;
397
398 total_len += sent;
399
400 /* now walk the page array and send each page in it */
401 for (i = 0; i < rqst[j].rq_npages; i++) {
402 struct bio_vec bvec;
403
404 bvec.bv_page = rqst[j].rq_pages[i];
405 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
406 &bvec.bv_offset);
407
408 iov_iter_bvec(&smb_msg.msg_iter, WRITE,
409 &bvec, 1, bvec.bv_len);
410 rc = smb_send_kvec(server, &smb_msg, &sent);
411 if (rc < 0)
412 break;
413
414 total_len += sent;
415 }
416 }
417
418 unmask:
419 sigprocmask(SIG_SETMASK, &oldmask, NULL);
420
421 /*
422 * If signal is pending but we have already sent the whole packet to
423 * the server we need to return success status to allow a corresponding
424 * mid entry to be kept in the pending requests queue thus allowing
425 * to handle responses from the server by the client.
426 *
427 * If only part of the packet has been sent there is no need to hide
428 * interrupt because the session will be reconnected anyway, so there
429 * won't be any response from the server to handle.
430 */
431
432 if (signal_pending(current) && (total_len != send_length)) {
433 cifs_dbg(FYI, "signal is pending after attempt to send\n");
434 rc = -EINTR;
435 }
436
437 /* uncork it */
438 val = 0;
439 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
440 (char *)&val, sizeof(val));
441
442 if ((total_len > 0) && (total_len != send_length)) {
443 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
444 send_length, total_len);
445 /*
446 * If we have only sent part of an SMB then the next SMB could
447 * be taken as the remainder of this one. We need to kill the
448 * socket so the server throws away the partial SMB
449 */
450 server->tcpStatus = CifsNeedReconnect;
451 trace_smb3_partial_send_reconnect(server->CurrentMid,
452 server->hostname);
453 }
454 smbd_done:
455 if (rc < 0 && rc != -EINTR)
456 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
457 rc);
458 else if (rc > 0)
459 rc = 0;
460
461 return rc;
462 }
463
464 static int
smb_send_rqst(struct TCP_Server_Info * server,int num_rqst,struct smb_rqst * rqst,int flags)465 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
466 struct smb_rqst *rqst, int flags)
467 {
468 struct kvec iov;
469 struct smb2_transform_hdr tr_hdr;
470 struct smb_rqst cur_rqst[MAX_COMPOUND];
471 int rc;
472
473 if (!(flags & CIFS_TRANSFORM_REQ))
474 return __smb_send_rqst(server, num_rqst, rqst);
475
476 if (num_rqst > MAX_COMPOUND - 1)
477 return -ENOMEM;
478
479 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
480 memset(&iov, 0, sizeof(iov));
481 memset(&tr_hdr, 0, sizeof(tr_hdr));
482
483 iov.iov_base = &tr_hdr;
484 iov.iov_len = sizeof(tr_hdr);
485 cur_rqst[0].rq_iov = &iov;
486 cur_rqst[0].rq_nvec = 1;
487
488 if (!server->ops->init_transform_rq) {
489 cifs_server_dbg(VFS, "Encryption requested but transform "
490 "callback is missing\n");
491 return -EIO;
492 }
493
494 rc = server->ops->init_transform_rq(server, num_rqst + 1,
495 &cur_rqst[0], rqst);
496 if (rc)
497 return rc;
498
499 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
500 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
501 return rc;
502 }
503
504 int
smb_send(struct TCP_Server_Info * server,struct smb_hdr * smb_buffer,unsigned int smb_buf_length)505 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
506 unsigned int smb_buf_length)
507 {
508 struct kvec iov[2];
509 struct smb_rqst rqst = { .rq_iov = iov,
510 .rq_nvec = 2 };
511
512 iov[0].iov_base = smb_buffer;
513 iov[0].iov_len = 4;
514 iov[1].iov_base = (char *)smb_buffer + 4;
515 iov[1].iov_len = smb_buf_length;
516
517 return __smb_send_rqst(server, 1, &rqst);
518 }
519
520 static int
wait_for_free_credits(struct TCP_Server_Info * server,const int num_credits,const int timeout,const int flags,unsigned int * instance)521 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
522 const int timeout, const int flags,
523 unsigned int *instance)
524 {
525 int rc;
526 int *credits;
527 int optype;
528 long int t;
529
530 if (timeout < 0)
531 t = MAX_JIFFY_OFFSET;
532 else
533 t = msecs_to_jiffies(timeout);
534
535 optype = flags & CIFS_OP_MASK;
536
537 *instance = 0;
538
539 credits = server->ops->get_credits_field(server, optype);
540 /* Since an echo is already inflight, no need to wait to send another */
541 if (*credits <= 0 && optype == CIFS_ECHO_OP)
542 return -EAGAIN;
543
544 spin_lock(&server->req_lock);
545 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
546 /* oplock breaks must not be held up */
547 server->in_flight++;
548 if (server->in_flight > server->max_in_flight)
549 server->max_in_flight = server->in_flight;
550 *credits -= 1;
551 *instance = server->reconnect_instance;
552 spin_unlock(&server->req_lock);
553 return 0;
554 }
555
556 while (1) {
557 if (*credits < num_credits) {
558 spin_unlock(&server->req_lock);
559 cifs_num_waiters_inc(server);
560 rc = wait_event_killable_timeout(server->request_q,
561 has_credits(server, credits, num_credits), t);
562 cifs_num_waiters_dec(server);
563 if (!rc) {
564 trace_smb3_credit_timeout(server->CurrentMid,
565 server->hostname, num_credits);
566 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
567 timeout);
568 return -ENOTSUPP;
569 }
570 if (rc == -ERESTARTSYS)
571 return -ERESTARTSYS;
572 spin_lock(&server->req_lock);
573 } else {
574 if (server->tcpStatus == CifsExiting) {
575 spin_unlock(&server->req_lock);
576 return -ENOENT;
577 }
578
579 /*
580 * For normal commands, reserve the last MAX_COMPOUND
581 * credits to compound requests.
582 * Otherwise these compounds could be permanently
583 * starved for credits by single-credit requests.
584 *
585 * To prevent spinning CPU, block this thread until
586 * there are >MAX_COMPOUND credits available.
587 * But only do this is we already have a lot of
588 * credits in flight to avoid triggering this check
589 * for servers that are slow to hand out credits on
590 * new sessions.
591 */
592 if (!optype && num_credits == 1 &&
593 server->in_flight > 2 * MAX_COMPOUND &&
594 *credits <= MAX_COMPOUND) {
595 spin_unlock(&server->req_lock);
596 cifs_num_waiters_inc(server);
597 rc = wait_event_killable_timeout(
598 server->request_q,
599 has_credits(server, credits,
600 MAX_COMPOUND + 1),
601 t);
602 cifs_num_waiters_dec(server);
603 if (!rc) {
604 trace_smb3_credit_timeout(
605 server->CurrentMid,
606 server->hostname, num_credits);
607 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
608 timeout);
609 return -ENOTSUPP;
610 }
611 if (rc == -ERESTARTSYS)
612 return -ERESTARTSYS;
613 spin_lock(&server->req_lock);
614 continue;
615 }
616
617 /*
618 * Can not count locking commands against total
619 * as they are allowed to block on server.
620 */
621
622 /* update # of requests on the wire to server */
623 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
624 *credits -= num_credits;
625 server->in_flight += num_credits;
626 if (server->in_flight > server->max_in_flight)
627 server->max_in_flight = server->in_flight;
628 *instance = server->reconnect_instance;
629 }
630 spin_unlock(&server->req_lock);
631 break;
632 }
633 }
634 return 0;
635 }
636
637 static int
wait_for_free_request(struct TCP_Server_Info * server,const int flags,unsigned int * instance)638 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
639 unsigned int *instance)
640 {
641 return wait_for_free_credits(server, 1, -1, flags,
642 instance);
643 }
644
645 static int
wait_for_compound_request(struct TCP_Server_Info * server,int num,const int flags,unsigned int * instance)646 wait_for_compound_request(struct TCP_Server_Info *server, int num,
647 const int flags, unsigned int *instance)
648 {
649 int *credits;
650
651 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
652
653 spin_lock(&server->req_lock);
654 if (*credits < num) {
655 /*
656 * Return immediately if not too many requests in flight since
657 * we will likely be stuck on waiting for credits.
658 */
659 if (server->in_flight < num - *credits) {
660 spin_unlock(&server->req_lock);
661 return -ENOTSUPP;
662 }
663 }
664 spin_unlock(&server->req_lock);
665
666 return wait_for_free_credits(server, num, 60000, flags,
667 instance);
668 }
669
670 int
cifs_wait_mtu_credits(struct TCP_Server_Info * server,unsigned int size,unsigned int * num,struct cifs_credits * credits)671 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
672 unsigned int *num, struct cifs_credits *credits)
673 {
674 *num = size;
675 credits->value = 0;
676 credits->instance = server->reconnect_instance;
677 return 0;
678 }
679
allocate_mid(struct cifs_ses * ses,struct smb_hdr * in_buf,struct mid_q_entry ** ppmidQ)680 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
681 struct mid_q_entry **ppmidQ)
682 {
683 if (ses->server->tcpStatus == CifsExiting) {
684 return -ENOENT;
685 }
686
687 if (ses->server->tcpStatus == CifsNeedReconnect) {
688 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
689 return -EAGAIN;
690 }
691
692 if (ses->status == CifsNew) {
693 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
694 (in_buf->Command != SMB_COM_NEGOTIATE))
695 return -EAGAIN;
696 /* else ok - we are setting up session */
697 }
698
699 if (ses->status == CifsExiting) {
700 /* check if SMB session is bad because we are setting it up */
701 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
702 return -EAGAIN;
703 /* else ok - we are shutting down session */
704 }
705
706 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
707 if (*ppmidQ == NULL)
708 return -ENOMEM;
709 spin_lock(&GlobalMid_Lock);
710 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
711 spin_unlock(&GlobalMid_Lock);
712 return 0;
713 }
714
715 static int
wait_for_response(struct TCP_Server_Info * server,struct mid_q_entry * midQ)716 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
717 {
718 int error;
719
720 error = wait_event_freezekillable_unsafe(server->response_q,
721 midQ->mid_state != MID_REQUEST_SUBMITTED);
722 if (error < 0)
723 return -ERESTARTSYS;
724
725 return 0;
726 }
727
728 struct mid_q_entry *
cifs_setup_async_request(struct TCP_Server_Info * server,struct smb_rqst * rqst)729 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
730 {
731 int rc;
732 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
733 struct mid_q_entry *mid;
734
735 if (rqst->rq_iov[0].iov_len != 4 ||
736 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
737 return ERR_PTR(-EIO);
738
739 /* enable signing if server requires it */
740 if (server->sign)
741 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
742
743 mid = AllocMidQEntry(hdr, server);
744 if (mid == NULL)
745 return ERR_PTR(-ENOMEM);
746
747 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
748 if (rc) {
749 DeleteMidQEntry(mid);
750 return ERR_PTR(rc);
751 }
752
753 return mid;
754 }
755
756 /*
757 * Send a SMB request and set the callback function in the mid to handle
758 * the result. Caller is responsible for dealing with timeouts.
759 */
760 int
cifs_call_async(struct TCP_Server_Info * server,struct smb_rqst * rqst,mid_receive_t * receive,mid_callback_t * callback,mid_handle_t * handle,void * cbdata,const int flags,const struct cifs_credits * exist_credits)761 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
762 mid_receive_t *receive, mid_callback_t *callback,
763 mid_handle_t *handle, void *cbdata, const int flags,
764 const struct cifs_credits *exist_credits)
765 {
766 int rc;
767 struct mid_q_entry *mid;
768 struct cifs_credits credits = { .value = 0, .instance = 0 };
769 unsigned int instance;
770 int optype;
771
772 optype = flags & CIFS_OP_MASK;
773
774 if ((flags & CIFS_HAS_CREDITS) == 0) {
775 rc = wait_for_free_request(server, flags, &instance);
776 if (rc)
777 return rc;
778 credits.value = 1;
779 credits.instance = instance;
780 } else
781 instance = exist_credits->instance;
782
783 mutex_lock(&server->srv_mutex);
784
785 /*
786 * We can't use credits obtained from the previous session to send this
787 * request. Check if there were reconnects after we obtained credits and
788 * return -EAGAIN in such cases to let callers handle it.
789 */
790 if (instance != server->reconnect_instance) {
791 mutex_unlock(&server->srv_mutex);
792 add_credits_and_wake_if(server, &credits, optype);
793 return -EAGAIN;
794 }
795
796 mid = server->ops->setup_async_request(server, rqst);
797 if (IS_ERR(mid)) {
798 mutex_unlock(&server->srv_mutex);
799 add_credits_and_wake_if(server, &credits, optype);
800 return PTR_ERR(mid);
801 }
802
803 mid->receive = receive;
804 mid->callback = callback;
805 mid->callback_data = cbdata;
806 mid->handle = handle;
807 mid->mid_state = MID_REQUEST_SUBMITTED;
808
809 /* put it on the pending_mid_q */
810 spin_lock(&GlobalMid_Lock);
811 list_add_tail(&mid->qhead, &server->pending_mid_q);
812 spin_unlock(&GlobalMid_Lock);
813
814 /*
815 * Need to store the time in mid before calling I/O. For call_async,
816 * I/O response may come back and free the mid entry on another thread.
817 */
818 cifs_save_when_sent(mid);
819 cifs_in_send_inc(server);
820 rc = smb_send_rqst(server, 1, rqst, flags);
821 cifs_in_send_dec(server);
822
823 if (rc < 0) {
824 revert_current_mid(server, mid->credits);
825 server->sequence_number -= 2;
826 cifs_delete_mid(mid);
827 }
828
829 mutex_unlock(&server->srv_mutex);
830
831 if (rc == 0)
832 return 0;
833
834 add_credits_and_wake_if(server, &credits, optype);
835 return rc;
836 }
837
838 /*
839 *
840 * Send an SMB Request. No response info (other than return code)
841 * needs to be parsed.
842 *
843 * flags indicate the type of request buffer and how long to wait
844 * and whether to log NT STATUS code (error) before mapping it to POSIX error
845 *
846 */
847 int
SendReceiveNoRsp(const unsigned int xid,struct cifs_ses * ses,char * in_buf,int flags)848 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
849 char *in_buf, int flags)
850 {
851 int rc;
852 struct kvec iov[1];
853 struct kvec rsp_iov;
854 int resp_buf_type;
855
856 iov[0].iov_base = in_buf;
857 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
858 flags |= CIFS_NO_RSP_BUF;
859 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
860 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
861
862 return rc;
863 }
864
865 static int
cifs_sync_mid_result(struct mid_q_entry * mid,struct TCP_Server_Info * server)866 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
867 {
868 int rc = 0;
869
870 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
871 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
872
873 spin_lock(&GlobalMid_Lock);
874 switch (mid->mid_state) {
875 case MID_RESPONSE_RECEIVED:
876 spin_unlock(&GlobalMid_Lock);
877 return rc;
878 case MID_RETRY_NEEDED:
879 rc = -EAGAIN;
880 break;
881 case MID_RESPONSE_MALFORMED:
882 rc = -EIO;
883 break;
884 case MID_SHUTDOWN:
885 rc = -EHOSTDOWN;
886 break;
887 default:
888 if (!(mid->mid_flags & MID_DELETED)) {
889 list_del_init(&mid->qhead);
890 mid->mid_flags |= MID_DELETED;
891 }
892 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
893 __func__, mid->mid, mid->mid_state);
894 rc = -EIO;
895 }
896 spin_unlock(&GlobalMid_Lock);
897
898 DeleteMidQEntry(mid);
899 return rc;
900 }
901
902 static inline int
send_cancel(struct TCP_Server_Info * server,struct smb_rqst * rqst,struct mid_q_entry * mid)903 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
904 struct mid_q_entry *mid)
905 {
906 return server->ops->send_cancel ?
907 server->ops->send_cancel(server, rqst, mid) : 0;
908 }
909
910 int
cifs_check_receive(struct mid_q_entry * mid,struct TCP_Server_Info * server,bool log_error)911 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
912 bool log_error)
913 {
914 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
915
916 dump_smb(mid->resp_buf, min_t(u32, 92, len));
917
918 /* convert the length into a more usable form */
919 if (server->sign) {
920 struct kvec iov[2];
921 int rc = 0;
922 struct smb_rqst rqst = { .rq_iov = iov,
923 .rq_nvec = 2 };
924
925 iov[0].iov_base = mid->resp_buf;
926 iov[0].iov_len = 4;
927 iov[1].iov_base = (char *)mid->resp_buf + 4;
928 iov[1].iov_len = len - 4;
929 /* FIXME: add code to kill session */
930 rc = cifs_verify_signature(&rqst, server,
931 mid->sequence_number);
932 if (rc)
933 cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
934 rc);
935 }
936
937 /* BB special case reconnect tid and uid here? */
938 return map_smb_to_linux_error(mid->resp_buf, log_error);
939 }
940
941 struct mid_q_entry *
cifs_setup_request(struct cifs_ses * ses,struct smb_rqst * rqst)942 cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
943 {
944 int rc;
945 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
946 struct mid_q_entry *mid;
947
948 if (rqst->rq_iov[0].iov_len != 4 ||
949 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
950 return ERR_PTR(-EIO);
951
952 rc = allocate_mid(ses, hdr, &mid);
953 if (rc)
954 return ERR_PTR(rc);
955 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
956 if (rc) {
957 cifs_delete_mid(mid);
958 return ERR_PTR(rc);
959 }
960 return mid;
961 }
962
963 static void
cifs_compound_callback(struct mid_q_entry * mid)964 cifs_compound_callback(struct mid_q_entry *mid)
965 {
966 struct TCP_Server_Info *server = mid->server;
967 struct cifs_credits credits;
968
969 credits.value = server->ops->get_credits(mid);
970 credits.instance = server->reconnect_instance;
971
972 add_credits(server, &credits, mid->optype);
973 }
974
975 static void
cifs_compound_last_callback(struct mid_q_entry * mid)976 cifs_compound_last_callback(struct mid_q_entry *mid)
977 {
978 cifs_compound_callback(mid);
979 cifs_wake_up_task(mid);
980 }
981
982 static void
cifs_cancelled_callback(struct mid_q_entry * mid)983 cifs_cancelled_callback(struct mid_q_entry *mid)
984 {
985 cifs_compound_callback(mid);
986 DeleteMidQEntry(mid);
987 }
988
989 int
compound_send_recv(const unsigned int xid,struct cifs_ses * ses,const int flags,const int num_rqst,struct smb_rqst * rqst,int * resp_buf_type,struct kvec * resp_iov)990 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
991 const int flags, const int num_rqst, struct smb_rqst *rqst,
992 int *resp_buf_type, struct kvec *resp_iov)
993 {
994 int i, j, optype, rc = 0;
995 struct mid_q_entry *midQ[MAX_COMPOUND];
996 bool cancelled_mid[MAX_COMPOUND] = {false};
997 struct cifs_credits credits[MAX_COMPOUND] = {
998 { .value = 0, .instance = 0 }
999 };
1000 unsigned int instance;
1001 char *buf;
1002 struct TCP_Server_Info *server;
1003
1004 optype = flags & CIFS_OP_MASK;
1005
1006 for (i = 0; i < num_rqst; i++)
1007 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
1008
1009 if ((ses == NULL) || (ses->server == NULL)) {
1010 cifs_dbg(VFS, "Null session\n");
1011 return -EIO;
1012 }
1013
1014 server = ses->server;
1015 if (server->tcpStatus == CifsExiting)
1016 return -ENOENT;
1017
1018 /*
1019 * Wait for all the requests to become available.
1020 * This approach still leaves the possibility to be stuck waiting for
1021 * credits if the server doesn't grant credits to the outstanding
1022 * requests and if the client is completely idle, not generating any
1023 * other requests.
1024 * This can be handled by the eventual session reconnect.
1025 */
1026 rc = wait_for_compound_request(server, num_rqst, flags,
1027 &instance);
1028 if (rc)
1029 return rc;
1030
1031 for (i = 0; i < num_rqst; i++) {
1032 credits[i].value = 1;
1033 credits[i].instance = instance;
1034 }
1035
1036 /*
1037 * Make sure that we sign in the same order that we send on this socket
1038 * and avoid races inside tcp sendmsg code that could cause corruption
1039 * of smb data.
1040 */
1041
1042 mutex_lock(&server->srv_mutex);
1043
1044 /*
1045 * All the parts of the compound chain belong obtained credits from the
1046 * same session. We can not use credits obtained from the previous
1047 * session to send this request. Check if there were reconnects after
1048 * we obtained credits and return -EAGAIN in such cases to let callers
1049 * handle it.
1050 */
1051 if (instance != server->reconnect_instance) {
1052 mutex_unlock(&server->srv_mutex);
1053 for (j = 0; j < num_rqst; j++)
1054 add_credits(server, &credits[j], optype);
1055 return -EAGAIN;
1056 }
1057
1058 for (i = 0; i < num_rqst; i++) {
1059 midQ[i] = server->ops->setup_request(ses, &rqst[i]);
1060 if (IS_ERR(midQ[i])) {
1061 revert_current_mid(server, i);
1062 for (j = 0; j < i; j++)
1063 cifs_delete_mid(midQ[j]);
1064 mutex_unlock(&server->srv_mutex);
1065
1066 /* Update # of requests on wire to server */
1067 for (j = 0; j < num_rqst; j++)
1068 add_credits(server, &credits[j], optype);
1069 return PTR_ERR(midQ[i]);
1070 }
1071
1072 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1073 midQ[i]->optype = optype;
1074 /*
1075 * Invoke callback for every part of the compound chain
1076 * to calculate credits properly. Wake up this thread only when
1077 * the last element is received.
1078 */
1079 if (i < num_rqst - 1)
1080 midQ[i]->callback = cifs_compound_callback;
1081 else
1082 midQ[i]->callback = cifs_compound_last_callback;
1083 }
1084 cifs_in_send_inc(server);
1085 rc = smb_send_rqst(server, num_rqst, rqst, flags);
1086 cifs_in_send_dec(server);
1087
1088 for (i = 0; i < num_rqst; i++)
1089 cifs_save_when_sent(midQ[i]);
1090
1091 if (rc < 0) {
1092 revert_current_mid(server, num_rqst);
1093 server->sequence_number -= 2;
1094 }
1095
1096 mutex_unlock(&server->srv_mutex);
1097
1098 /*
1099 * If sending failed for some reason or it is an oplock break that we
1100 * will not receive a response to - return credits back
1101 */
1102 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1103 for (i = 0; i < num_rqst; i++)
1104 add_credits(server, &credits[i], optype);
1105 goto out;
1106 }
1107
1108 /*
1109 * At this point the request is passed to the network stack - we assume
1110 * that any credits taken from the server structure on the client have
1111 * been spent and we can't return them back. Once we receive responses
1112 * we will collect credits granted by the server in the mid callbacks
1113 * and add those credits to the server structure.
1114 */
1115
1116 /*
1117 * Compounding is never used during session establish.
1118 */
1119 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
1120 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1121 rqst[0].rq_nvec);
1122
1123 for (i = 0; i < num_rqst; i++) {
1124 rc = wait_for_response(server, midQ[i]);
1125 if (rc != 0)
1126 break;
1127 }
1128 if (rc != 0) {
1129 for (; i < num_rqst; i++) {
1130 cifs_server_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
1131 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1132 send_cancel(server, &rqst[i], midQ[i]);
1133 spin_lock(&GlobalMid_Lock);
1134 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1135 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1136 midQ[i]->callback = cifs_cancelled_callback;
1137 cancelled_mid[i] = true;
1138 credits[i].value = 0;
1139 }
1140 spin_unlock(&GlobalMid_Lock);
1141 }
1142 }
1143
1144 for (i = 0; i < num_rqst; i++) {
1145 if (rc < 0)
1146 goto out;
1147
1148 rc = cifs_sync_mid_result(midQ[i], server);
1149 if (rc != 0) {
1150 /* mark this mid as cancelled to not free it below */
1151 cancelled_mid[i] = true;
1152 goto out;
1153 }
1154
1155 if (!midQ[i]->resp_buf ||
1156 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1157 rc = -EIO;
1158 cifs_dbg(FYI, "Bad MID state?\n");
1159 goto out;
1160 }
1161
1162 buf = (char *)midQ[i]->resp_buf;
1163 resp_iov[i].iov_base = buf;
1164 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1165 server->vals->header_preamble_size;
1166
1167 if (midQ[i]->large_buf)
1168 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1169 else
1170 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1171
1172 rc = server->ops->check_receive(midQ[i], server,
1173 flags & CIFS_LOG_ERROR);
1174
1175 /* mark it so buf will not be freed by cifs_delete_mid */
1176 if ((flags & CIFS_NO_RSP_BUF) == 0)
1177 midQ[i]->resp_buf = NULL;
1178
1179 }
1180
1181 /*
1182 * Compounding is never used during session establish.
1183 */
1184 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1185 struct kvec iov = {
1186 .iov_base = resp_iov[0].iov_base,
1187 .iov_len = resp_iov[0].iov_len
1188 };
1189 smb311_update_preauth_hash(ses, &iov, 1);
1190 }
1191
1192 out:
1193 /*
1194 * This will dequeue all mids. After this it is important that the
1195 * demultiplex_thread will not process any of these mids any futher.
1196 * This is prevented above by using a noop callback that will not
1197 * wake this thread except for the very last PDU.
1198 */
1199 for (i = 0; i < num_rqst; i++) {
1200 if (!cancelled_mid[i])
1201 cifs_delete_mid(midQ[i]);
1202 }
1203
1204 return rc;
1205 }
1206
1207 int
cifs_send_recv(const unsigned int xid,struct cifs_ses * ses,struct smb_rqst * rqst,int * resp_buf_type,const int flags,struct kvec * resp_iov)1208 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1209 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1210 struct kvec *resp_iov)
1211 {
1212 return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
1213 resp_iov);
1214 }
1215
1216 int
SendReceive2(const unsigned int xid,struct cifs_ses * ses,struct kvec * iov,int n_vec,int * resp_buf_type,const int flags,struct kvec * resp_iov)1217 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1218 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1219 const int flags, struct kvec *resp_iov)
1220 {
1221 struct smb_rqst rqst;
1222 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1223 int rc;
1224
1225 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1226 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1227 GFP_KERNEL);
1228 if (!new_iov) {
1229 /* otherwise cifs_send_recv below sets resp_buf_type */
1230 *resp_buf_type = CIFS_NO_BUFFER;
1231 return -ENOMEM;
1232 }
1233 } else
1234 new_iov = s_iov;
1235
1236 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1237 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1238
1239 new_iov[0].iov_base = new_iov[1].iov_base;
1240 new_iov[0].iov_len = 4;
1241 new_iov[1].iov_base += 4;
1242 new_iov[1].iov_len -= 4;
1243
1244 memset(&rqst, 0, sizeof(struct smb_rqst));
1245 rqst.rq_iov = new_iov;
1246 rqst.rq_nvec = n_vec + 1;
1247
1248 rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
1249 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1250 kfree(new_iov);
1251 return rc;
1252 }
1253
1254 int
SendReceive(const unsigned int xid,struct cifs_ses * ses,struct smb_hdr * in_buf,struct smb_hdr * out_buf,int * pbytes_returned,const int flags)1255 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1256 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1257 int *pbytes_returned, const int flags)
1258 {
1259 int rc = 0;
1260 struct mid_q_entry *midQ;
1261 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1262 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1263 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1264 struct cifs_credits credits = { .value = 1, .instance = 0 };
1265 struct TCP_Server_Info *server;
1266
1267 if (ses == NULL) {
1268 cifs_dbg(VFS, "Null smb session\n");
1269 return -EIO;
1270 }
1271 server = ses->server;
1272 if (server == NULL) {
1273 cifs_dbg(VFS, "Null tcp session\n");
1274 return -EIO;
1275 }
1276
1277 if (server->tcpStatus == CifsExiting)
1278 return -ENOENT;
1279
1280 /* Ensure that we do not send more than 50 overlapping requests
1281 to the same server. We may make this configurable later or
1282 use ses->maxReq */
1283
1284 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1285 cifs_server_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1286 len);
1287 return -EIO;
1288 }
1289
1290 rc = wait_for_free_request(server, flags, &credits.instance);
1291 if (rc)
1292 return rc;
1293
1294 /* make sure that we sign in the same order that we send on this socket
1295 and avoid races inside tcp sendmsg code that could cause corruption
1296 of smb data */
1297
1298 mutex_lock(&server->srv_mutex);
1299
1300 rc = allocate_mid(ses, in_buf, &midQ);
1301 if (rc) {
1302 mutex_unlock(&ses->server->srv_mutex);
1303 /* Update # of requests on wire to server */
1304 add_credits(server, &credits, 0);
1305 return rc;
1306 }
1307
1308 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1309 if (rc) {
1310 mutex_unlock(&server->srv_mutex);
1311 goto out;
1312 }
1313
1314 midQ->mid_state = MID_REQUEST_SUBMITTED;
1315
1316 cifs_in_send_inc(server);
1317 rc = smb_send(server, in_buf, len);
1318 cifs_in_send_dec(server);
1319 cifs_save_when_sent(midQ);
1320
1321 if (rc < 0)
1322 server->sequence_number -= 2;
1323
1324 mutex_unlock(&server->srv_mutex);
1325
1326 if (rc < 0)
1327 goto out;
1328
1329 rc = wait_for_response(server, midQ);
1330 if (rc != 0) {
1331 send_cancel(server, &rqst, midQ);
1332 spin_lock(&GlobalMid_Lock);
1333 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1334 /* no longer considered to be "in-flight" */
1335 midQ->callback = DeleteMidQEntry;
1336 spin_unlock(&GlobalMid_Lock);
1337 add_credits(server, &credits, 0);
1338 return rc;
1339 }
1340 spin_unlock(&GlobalMid_Lock);
1341 }
1342
1343 rc = cifs_sync_mid_result(midQ, server);
1344 if (rc != 0) {
1345 add_credits(server, &credits, 0);
1346 return rc;
1347 }
1348
1349 if (!midQ->resp_buf || !out_buf ||
1350 midQ->mid_state != MID_RESPONSE_RECEIVED) {
1351 rc = -EIO;
1352 cifs_server_dbg(VFS, "Bad MID state?\n");
1353 goto out;
1354 }
1355
1356 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1357 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1358 rc = cifs_check_receive(midQ, server, 0);
1359 out:
1360 cifs_delete_mid(midQ);
1361 add_credits(server, &credits, 0);
1362
1363 return rc;
1364 }
1365
1366 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1367 blocking lock to return. */
1368
1369 static int
send_lock_cancel(const unsigned int xid,struct cifs_tcon * tcon,struct smb_hdr * in_buf,struct smb_hdr * out_buf)1370 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1371 struct smb_hdr *in_buf,
1372 struct smb_hdr *out_buf)
1373 {
1374 int bytes_returned;
1375 struct cifs_ses *ses = tcon->ses;
1376 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1377
1378 /* We just modify the current in_buf to change
1379 the type of lock from LOCKING_ANDX_SHARED_LOCK
1380 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1381 LOCKING_ANDX_CANCEL_LOCK. */
1382
1383 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1384 pSMB->Timeout = 0;
1385 pSMB->hdr.Mid = get_next_mid(ses->server);
1386
1387 return SendReceive(xid, ses, in_buf, out_buf,
1388 &bytes_returned, 0);
1389 }
1390
1391 int
SendReceiveBlockingLock(const unsigned int xid,struct cifs_tcon * tcon,struct smb_hdr * in_buf,struct smb_hdr * out_buf,int * pbytes_returned)1392 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1393 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1394 int *pbytes_returned)
1395 {
1396 int rc = 0;
1397 int rstart = 0;
1398 struct mid_q_entry *midQ;
1399 struct cifs_ses *ses;
1400 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1401 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1402 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1403 unsigned int instance;
1404 struct TCP_Server_Info *server;
1405
1406 if (tcon == NULL || tcon->ses == NULL) {
1407 cifs_dbg(VFS, "Null smb session\n");
1408 return -EIO;
1409 }
1410 ses = tcon->ses;
1411 server = ses->server;
1412
1413 if (server == NULL) {
1414 cifs_dbg(VFS, "Null tcp session\n");
1415 return -EIO;
1416 }
1417
1418 if (server->tcpStatus == CifsExiting)
1419 return -ENOENT;
1420
1421 /* Ensure that we do not send more than 50 overlapping requests
1422 to the same server. We may make this configurable later or
1423 use ses->maxReq */
1424
1425 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1426 cifs_tcon_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1427 len);
1428 return -EIO;
1429 }
1430
1431 rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1432 if (rc)
1433 return rc;
1434
1435 /* make sure that we sign in the same order that we send on this socket
1436 and avoid races inside tcp sendmsg code that could cause corruption
1437 of smb data */
1438
1439 mutex_lock(&server->srv_mutex);
1440
1441 rc = allocate_mid(ses, in_buf, &midQ);
1442 if (rc) {
1443 mutex_unlock(&server->srv_mutex);
1444 return rc;
1445 }
1446
1447 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1448 if (rc) {
1449 cifs_delete_mid(midQ);
1450 mutex_unlock(&server->srv_mutex);
1451 return rc;
1452 }
1453
1454 midQ->mid_state = MID_REQUEST_SUBMITTED;
1455 cifs_in_send_inc(server);
1456 rc = smb_send(server, in_buf, len);
1457 cifs_in_send_dec(server);
1458 cifs_save_when_sent(midQ);
1459
1460 if (rc < 0)
1461 server->sequence_number -= 2;
1462
1463 mutex_unlock(&server->srv_mutex);
1464
1465 if (rc < 0) {
1466 cifs_delete_mid(midQ);
1467 return rc;
1468 }
1469
1470 /* Wait for a reply - allow signals to interrupt. */
1471 rc = wait_event_interruptible(server->response_q,
1472 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1473 ((server->tcpStatus != CifsGood) &&
1474 (server->tcpStatus != CifsNew)));
1475
1476 /* Were we interrupted by a signal ? */
1477 if ((rc == -ERESTARTSYS) &&
1478 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1479 ((server->tcpStatus == CifsGood) ||
1480 (server->tcpStatus == CifsNew))) {
1481
1482 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1483 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1484 blocking lock to return. */
1485 rc = send_cancel(server, &rqst, midQ);
1486 if (rc) {
1487 cifs_delete_mid(midQ);
1488 return rc;
1489 }
1490 } else {
1491 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1492 to cause the blocking lock to return. */
1493
1494 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1495
1496 /* If we get -ENOLCK back the lock may have
1497 already been removed. Don't exit in this case. */
1498 if (rc && rc != -ENOLCK) {
1499 cifs_delete_mid(midQ);
1500 return rc;
1501 }
1502 }
1503
1504 rc = wait_for_response(server, midQ);
1505 if (rc) {
1506 send_cancel(server, &rqst, midQ);
1507 spin_lock(&GlobalMid_Lock);
1508 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1509 /* no longer considered to be "in-flight" */
1510 midQ->callback = DeleteMidQEntry;
1511 spin_unlock(&GlobalMid_Lock);
1512 return rc;
1513 }
1514 spin_unlock(&GlobalMid_Lock);
1515 }
1516
1517 /* We got the response - restart system call. */
1518 rstart = 1;
1519 }
1520
1521 rc = cifs_sync_mid_result(midQ, server);
1522 if (rc != 0)
1523 return rc;
1524
1525 /* rcvd frame is ok */
1526 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1527 rc = -EIO;
1528 cifs_tcon_dbg(VFS, "Bad MID state?\n");
1529 goto out;
1530 }
1531
1532 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1533 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1534 rc = cifs_check_receive(midQ, server, 0);
1535 out:
1536 cifs_delete_mid(midQ);
1537 if (rstart && rc == -EACCES)
1538 return -ERESTARTSYS;
1539 return rc;
1540 }
1541