1 /*
2 * fs/cifs/transport.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
7 *
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/bvec.h>
32 #include <linux/highmem.h>
33 #include <linux/uaccess.h>
34 #include <asm/processor.h>
35 #include <linux/mempool.h>
36 #include <linux/sched/signal.h>
37 #include "cifspdu.h"
38 #include "cifsglob.h"
39 #include "cifsproto.h"
40 #include "cifs_debug.h"
41 #include "smb2proto.h"
42 #include "smbdirect.h"
43
44 /* Max number of iovectors we can use off the stack when sending requests. */
45 #define CIFS_MAX_IOV_SIZE 8
46
47 void
cifs_wake_up_task(struct mid_q_entry * mid)48 cifs_wake_up_task(struct mid_q_entry *mid)
49 {
50 wake_up_process(mid->callback_data);
51 }
52
53 struct mid_q_entry *
AllocMidQEntry(const struct smb_hdr * smb_buffer,struct TCP_Server_Info * server)54 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
55 {
56 struct mid_q_entry *temp;
57
58 if (server == NULL) {
59 cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n");
60 return NULL;
61 }
62
63 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
64 memset(temp, 0, sizeof(struct mid_q_entry));
65 kref_init(&temp->refcount);
66 temp->mid = get_mid(smb_buffer);
67 temp->pid = current->pid;
68 temp->command = cpu_to_le16(smb_buffer->Command);
69 cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
70 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
71 /* when mid allocated can be before when sent */
72 temp->when_alloc = jiffies;
73 temp->server = server;
74
75 /*
76 * The default is for the mid to be synchronous, so the
77 * default callback just wakes up the current task.
78 */
79 get_task_struct(current);
80 temp->creator = current;
81 temp->callback = cifs_wake_up_task;
82 temp->callback_data = current;
83
84 atomic_inc(&midCount);
85 temp->mid_state = MID_REQUEST_ALLOCATED;
86 return temp;
87 }
88
_cifs_mid_q_entry_release(struct kref * refcount)89 static void _cifs_mid_q_entry_release(struct kref *refcount)
90 {
91 struct mid_q_entry *midEntry =
92 container_of(refcount, struct mid_q_entry, refcount);
93 #ifdef CONFIG_CIFS_STATS2
94 __le16 command = midEntry->server->vals->lock_cmd;
95 __u16 smb_cmd = le16_to_cpu(midEntry->command);
96 unsigned long now;
97 unsigned long roundtrip_time;
98 #endif
99 struct TCP_Server_Info *server = midEntry->server;
100
101 if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
102 midEntry->mid_state == MID_RESPONSE_RECEIVED &&
103 server->ops->handle_cancelled_mid)
104 server->ops->handle_cancelled_mid(midEntry->resp_buf, server);
105
106 midEntry->mid_state = MID_FREE;
107 atomic_dec(&midCount);
108 if (midEntry->large_buf)
109 cifs_buf_release(midEntry->resp_buf);
110 else
111 cifs_small_buf_release(midEntry->resp_buf);
112 #ifdef CONFIG_CIFS_STATS2
113 now = jiffies;
114 if (now < midEntry->when_alloc)
115 cifs_server_dbg(VFS, "invalid mid allocation time\n");
116 roundtrip_time = now - midEntry->when_alloc;
117
118 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
119 if (atomic_read(&server->num_cmds[smb_cmd]) == 0) {
120 server->slowest_cmd[smb_cmd] = roundtrip_time;
121 server->fastest_cmd[smb_cmd] = roundtrip_time;
122 } else {
123 if (server->slowest_cmd[smb_cmd] < roundtrip_time)
124 server->slowest_cmd[smb_cmd] = roundtrip_time;
125 else if (server->fastest_cmd[smb_cmd] > roundtrip_time)
126 server->fastest_cmd[smb_cmd] = roundtrip_time;
127 }
128 cifs_stats_inc(&server->num_cmds[smb_cmd]);
129 server->time_per_cmd[smb_cmd] += roundtrip_time;
130 }
131 /*
132 * commands taking longer than one second (default) can be indications
133 * that something is wrong, unless it is quite a slow link or a very
134 * busy server. Note that this calc is unlikely or impossible to wrap
135 * as long as slow_rsp_threshold is not set way above recommended max
136 * value (32767 ie 9 hours) and is generally harmless even if wrong
137 * since only affects debug counters - so leaving the calc as simple
138 * comparison rather than doing multiple conversions and overflow
139 * checks
140 */
141 if ((slow_rsp_threshold != 0) &&
142 time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
143 (midEntry->command != command)) {
144 /*
145 * smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command
146 * NB: le16_to_cpu returns unsigned so can not be negative below
147 */
148 if (smb_cmd < NUMBER_OF_SMB2_COMMANDS)
149 cifs_stats_inc(&server->smb2slowcmd[smb_cmd]);
150
151 trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
152 midEntry->when_sent, midEntry->when_received);
153 if (cifsFYI & CIFS_TIMER) {
154 pr_debug(" CIFS slow rsp: cmd %d mid %llu",
155 midEntry->command, midEntry->mid);
156 cifs_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
157 now - midEntry->when_alloc,
158 now - midEntry->when_sent,
159 now - midEntry->when_received);
160 }
161 }
162 #endif
163 put_task_struct(midEntry->creator);
164
165 mempool_free(midEntry, cifs_mid_poolp);
166 }
167
cifs_mid_q_entry_release(struct mid_q_entry * midEntry)168 void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
169 {
170 spin_lock(&GlobalMid_Lock);
171 kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
172 spin_unlock(&GlobalMid_Lock);
173 }
174
DeleteMidQEntry(struct mid_q_entry * midEntry)175 void DeleteMidQEntry(struct mid_q_entry *midEntry)
176 {
177 cifs_mid_q_entry_release(midEntry);
178 }
179
180 void
cifs_delete_mid(struct mid_q_entry * mid)181 cifs_delete_mid(struct mid_q_entry *mid)
182 {
183 spin_lock(&GlobalMid_Lock);
184 if (!(mid->mid_flags & MID_DELETED)) {
185 list_del_init(&mid->qhead);
186 mid->mid_flags |= MID_DELETED;
187 }
188 spin_unlock(&GlobalMid_Lock);
189
190 DeleteMidQEntry(mid);
191 }
192
193 /*
194 * smb_send_kvec - send an array of kvecs to the server
195 * @server: Server to send the data to
196 * @smb_msg: Message to send
197 * @sent: amount of data sent on socket is stored here
198 *
199 * Our basic "send data to server" function. Should be called with srv_mutex
200 * held. The caller is responsible for handling the results.
201 */
202 static int
smb_send_kvec(struct TCP_Server_Info * server,struct msghdr * smb_msg,size_t * sent)203 smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
204 size_t *sent)
205 {
206 int rc = 0;
207 int retries = 0;
208 struct socket *ssocket = server->ssocket;
209
210 *sent = 0;
211
212 if (server->noblocksnd)
213 smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
214 else
215 smb_msg->msg_flags = MSG_NOSIGNAL;
216
217 while (msg_data_left(smb_msg)) {
218 /*
219 * If blocking send, we try 3 times, since each can block
220 * for 5 seconds. For nonblocking we have to try more
221 * but wait increasing amounts of time allowing time for
222 * socket to clear. The overall time we wait in either
223 * case to send on the socket is about 15 seconds.
224 * Similarly we wait for 15 seconds for a response from
225 * the server in SendReceive[2] for the server to send
226 * a response back for most types of requests (except
227 * SMB Write past end of file which can be slow, and
228 * blocking lock operations). NFS waits slightly longer
229 * than CIFS, but this can make it take longer for
230 * nonresponsive servers to be detected and 15 seconds
231 * is more than enough time for modern networks to
232 * send a packet. In most cases if we fail to send
233 * after the retries we will kill the socket and
234 * reconnect which may clear the network problem.
235 */
236 rc = sock_sendmsg(ssocket, smb_msg);
237 if (rc == -EAGAIN) {
238 retries++;
239 if (retries >= 14 ||
240 (!server->noblocksnd && (retries > 2))) {
241 cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
242 ssocket);
243 return -EAGAIN;
244 }
245 msleep(1 << retries);
246 continue;
247 }
248
249 if (rc < 0)
250 return rc;
251
252 if (rc == 0) {
253 /* should never happen, letting socket clear before
254 retrying is our only obvious option here */
255 cifs_server_dbg(VFS, "tcp sent no data\n");
256 msleep(500);
257 continue;
258 }
259
260 /* send was at least partially successful */
261 *sent += rc;
262 retries = 0; /* in case we get ENOSPC on the next send */
263 }
264 return 0;
265 }
266
267 unsigned long
smb_rqst_len(struct TCP_Server_Info * server,struct smb_rqst * rqst)268 smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
269 {
270 unsigned int i;
271 struct kvec *iov;
272 int nvec;
273 unsigned long buflen = 0;
274
275 if (server->vals->header_preamble_size == 0 &&
276 rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
277 iov = &rqst->rq_iov[1];
278 nvec = rqst->rq_nvec - 1;
279 } else {
280 iov = rqst->rq_iov;
281 nvec = rqst->rq_nvec;
282 }
283
284 /* total up iov array first */
285 for (i = 0; i < nvec; i++)
286 buflen += iov[i].iov_len;
287
288 /*
289 * Add in the page array if there is one. The caller needs to make
290 * sure rq_offset and rq_tailsz are set correctly. If a buffer of
291 * multiple pages ends at page boundary, rq_tailsz needs to be set to
292 * PAGE_SIZE.
293 */
294 if (rqst->rq_npages) {
295 if (rqst->rq_npages == 1)
296 buflen += rqst->rq_tailsz;
297 else {
298 /*
299 * If there is more than one page, calculate the
300 * buffer length based on rq_offset and rq_tailsz
301 */
302 buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
303 rqst->rq_offset;
304 buflen += rqst->rq_tailsz;
305 }
306 }
307
308 return buflen;
309 }
310
311 static int
__smb_send_rqst(struct TCP_Server_Info * server,int num_rqst,struct smb_rqst * rqst)312 __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
313 struct smb_rqst *rqst)
314 {
315 int rc;
316 struct kvec *iov;
317 int n_vec;
318 unsigned int send_length = 0;
319 unsigned int i, j;
320 sigset_t mask, oldmask;
321 size_t total_len = 0, sent, size;
322 struct socket *ssocket = server->ssocket;
323 struct msghdr smb_msg = {};
324 int val = 1;
325 __be32 rfc1002_marker;
326
327 cifs_in_send_inc(server);
328 if (cifs_rdma_enabled(server)) {
329 /* return -EAGAIN when connecting or reconnecting */
330 rc = -EAGAIN;
331 if (server->smbd_conn)
332 rc = smbd_send(server, num_rqst, rqst);
333 goto smbd_done;
334 }
335
336 rc = -EAGAIN;
337 if (ssocket == NULL)
338 goto out;
339
340 rc = -ERESTARTSYS;
341 if (fatal_signal_pending(current)) {
342 cifs_dbg(FYI, "signal pending before send request\n");
343 goto out;
344 }
345
346 rc = 0;
347 /* cork the socket */
348 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
349 (char *)&val, sizeof(val));
350
351 for (j = 0; j < num_rqst; j++)
352 send_length += smb_rqst_len(server, &rqst[j]);
353 rfc1002_marker = cpu_to_be32(send_length);
354
355 /*
356 * We should not allow signals to interrupt the network send because
357 * any partial send will cause session reconnects thus increasing
358 * latency of system calls and overload a server with unnecessary
359 * requests.
360 */
361
362 sigfillset(&mask);
363 sigprocmask(SIG_BLOCK, &mask, &oldmask);
364
365 /* Generate a rfc1002 marker for SMB2+ */
366 if (server->vals->header_preamble_size == 0) {
367 struct kvec hiov = {
368 .iov_base = &rfc1002_marker,
369 .iov_len = 4
370 };
371 iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
372 rc = smb_send_kvec(server, &smb_msg, &sent);
373 if (rc < 0)
374 goto unmask;
375
376 total_len += sent;
377 send_length += 4;
378 }
379
380 cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
381
382 for (j = 0; j < num_rqst; j++) {
383 iov = rqst[j].rq_iov;
384 n_vec = rqst[j].rq_nvec;
385
386 size = 0;
387 for (i = 0; i < n_vec; i++) {
388 dump_smb(iov[i].iov_base, iov[i].iov_len);
389 size += iov[i].iov_len;
390 }
391
392 iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
393
394 rc = smb_send_kvec(server, &smb_msg, &sent);
395 if (rc < 0)
396 goto unmask;
397
398 total_len += sent;
399
400 /* now walk the page array and send each page in it */
401 for (i = 0; i < rqst[j].rq_npages; i++) {
402 struct bio_vec bvec;
403
404 bvec.bv_page = rqst[j].rq_pages[i];
405 rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
406 &bvec.bv_offset);
407
408 iov_iter_bvec(&smb_msg.msg_iter, WRITE,
409 &bvec, 1, bvec.bv_len);
410 rc = smb_send_kvec(server, &smb_msg, &sent);
411 if (rc < 0)
412 break;
413
414 total_len += sent;
415 }
416 }
417
418 unmask:
419 sigprocmask(SIG_SETMASK, &oldmask, NULL);
420
421 /*
422 * If signal is pending but we have already sent the whole packet to
423 * the server we need to return success status to allow a corresponding
424 * mid entry to be kept in the pending requests queue thus allowing
425 * to handle responses from the server by the client.
426 *
427 * If only part of the packet has been sent there is no need to hide
428 * interrupt because the session will be reconnected anyway, so there
429 * won't be any response from the server to handle.
430 */
431
432 if (signal_pending(current) && (total_len != send_length)) {
433 cifs_dbg(FYI, "signal is pending after attempt to send\n");
434 rc = -ERESTARTSYS;
435 }
436
437 /* uncork it */
438 val = 0;
439 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
440 (char *)&val, sizeof(val));
441
442 if ((total_len > 0) && (total_len != send_length)) {
443 cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
444 send_length, total_len);
445 /*
446 * If we have only sent part of an SMB then the next SMB could
447 * be taken as the remainder of this one. We need to kill the
448 * socket so the server throws away the partial SMB
449 */
450 server->tcpStatus = CifsNeedReconnect;
451 trace_smb3_partial_send_reconnect(server->CurrentMid,
452 server->hostname);
453 }
454 smbd_done:
455 if (rc < 0 && rc != -EINTR)
456 cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
457 rc);
458 else if (rc > 0)
459 rc = 0;
460 out:
461 cifs_in_send_dec(server);
462 return rc;
463 }
464
465 static int
smb_send_rqst(struct TCP_Server_Info * server,int num_rqst,struct smb_rqst * rqst,int flags)466 smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
467 struct smb_rqst *rqst, int flags)
468 {
469 struct kvec iov;
470 struct smb2_transform_hdr *tr_hdr;
471 struct smb_rqst cur_rqst[MAX_COMPOUND];
472 int rc;
473
474 if (!(flags & CIFS_TRANSFORM_REQ))
475 return __smb_send_rqst(server, num_rqst, rqst);
476
477 if (num_rqst > MAX_COMPOUND - 1)
478 return -ENOMEM;
479
480 if (!server->ops->init_transform_rq) {
481 cifs_server_dbg(VFS, "Encryption requested but transform "
482 "callback is missing\n");
483 return -EIO;
484 }
485
486 tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS);
487 if (!tr_hdr)
488 return -ENOMEM;
489
490 memset(&cur_rqst[0], 0, sizeof(cur_rqst));
491 memset(&iov, 0, sizeof(iov));
492 memset(tr_hdr, 0, sizeof(*tr_hdr));
493
494 iov.iov_base = tr_hdr;
495 iov.iov_len = sizeof(*tr_hdr);
496 cur_rqst[0].rq_iov = &iov;
497 cur_rqst[0].rq_nvec = 1;
498
499 rc = server->ops->init_transform_rq(server, num_rqst + 1,
500 &cur_rqst[0], rqst);
501 if (rc)
502 goto out;
503
504 rc = __smb_send_rqst(server, num_rqst + 1, &cur_rqst[0]);
505 smb3_free_compound_rqst(num_rqst, &cur_rqst[1]);
506 out:
507 kfree(tr_hdr);
508 return rc;
509 }
510
511 int
smb_send(struct TCP_Server_Info * server,struct smb_hdr * smb_buffer,unsigned int smb_buf_length)512 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
513 unsigned int smb_buf_length)
514 {
515 struct kvec iov[2];
516 struct smb_rqst rqst = { .rq_iov = iov,
517 .rq_nvec = 2 };
518
519 iov[0].iov_base = smb_buffer;
520 iov[0].iov_len = 4;
521 iov[1].iov_base = (char *)smb_buffer + 4;
522 iov[1].iov_len = smb_buf_length;
523
524 return __smb_send_rqst(server, 1, &rqst);
525 }
526
527 static int
wait_for_free_credits(struct TCP_Server_Info * server,const int num_credits,const int timeout,const int flags,unsigned int * instance)528 wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
529 const int timeout, const int flags,
530 unsigned int *instance)
531 {
532 long rc;
533 int *credits;
534 int optype;
535 long int t;
536
537 if (timeout < 0)
538 t = MAX_JIFFY_OFFSET;
539 else
540 t = msecs_to_jiffies(timeout);
541
542 optype = flags & CIFS_OP_MASK;
543
544 *instance = 0;
545
546 credits = server->ops->get_credits_field(server, optype);
547 /* Since an echo is already inflight, no need to wait to send another */
548 if (*credits <= 0 && optype == CIFS_ECHO_OP)
549 return -EAGAIN;
550
551 spin_lock(&server->req_lock);
552 if ((flags & CIFS_TIMEOUT_MASK) == CIFS_NON_BLOCKING) {
553 /* oplock breaks must not be held up */
554 server->in_flight++;
555 if (server->in_flight > server->max_in_flight)
556 server->max_in_flight = server->in_flight;
557 *credits -= 1;
558 *instance = server->reconnect_instance;
559 spin_unlock(&server->req_lock);
560 return 0;
561 }
562
563 while (1) {
564 if (*credits < num_credits) {
565 spin_unlock(&server->req_lock);
566 cifs_num_waiters_inc(server);
567 rc = wait_event_killable_timeout(server->request_q,
568 has_credits(server, credits, num_credits), t);
569 cifs_num_waiters_dec(server);
570 if (!rc) {
571 trace_smb3_credit_timeout(server->CurrentMid,
572 server->hostname, num_credits);
573 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
574 timeout);
575 return -ENOTSUPP;
576 }
577 if (rc == -ERESTARTSYS)
578 return -ERESTARTSYS;
579 spin_lock(&server->req_lock);
580 } else {
581 if (server->tcpStatus == CifsExiting) {
582 spin_unlock(&server->req_lock);
583 return -ENOENT;
584 }
585
586 /*
587 * For normal commands, reserve the last MAX_COMPOUND
588 * credits to compound requests.
589 * Otherwise these compounds could be permanently
590 * starved for credits by single-credit requests.
591 *
592 * To prevent spinning CPU, block this thread until
593 * there are >MAX_COMPOUND credits available.
594 * But only do this is we already have a lot of
595 * credits in flight to avoid triggering this check
596 * for servers that are slow to hand out credits on
597 * new sessions.
598 */
599 if (!optype && num_credits == 1 &&
600 server->in_flight > 2 * MAX_COMPOUND &&
601 *credits <= MAX_COMPOUND) {
602 spin_unlock(&server->req_lock);
603 cifs_num_waiters_inc(server);
604 rc = wait_event_killable_timeout(
605 server->request_q,
606 has_credits(server, credits,
607 MAX_COMPOUND + 1),
608 t);
609 cifs_num_waiters_dec(server);
610 if (!rc) {
611 trace_smb3_credit_timeout(
612 server->CurrentMid,
613 server->hostname, num_credits);
614 cifs_server_dbg(VFS, "wait timed out after %d ms\n",
615 timeout);
616 return -ENOTSUPP;
617 }
618 if (rc == -ERESTARTSYS)
619 return -ERESTARTSYS;
620 spin_lock(&server->req_lock);
621 continue;
622 }
623
624 /*
625 * Can not count locking commands against total
626 * as they are allowed to block on server.
627 */
628
629 /* update # of requests on the wire to server */
630 if ((flags & CIFS_TIMEOUT_MASK) != CIFS_BLOCKING_OP) {
631 *credits -= num_credits;
632 server->in_flight += num_credits;
633 if (server->in_flight > server->max_in_flight)
634 server->max_in_flight = server->in_flight;
635 *instance = server->reconnect_instance;
636 }
637 spin_unlock(&server->req_lock);
638 break;
639 }
640 }
641 return 0;
642 }
643
644 static int
wait_for_free_request(struct TCP_Server_Info * server,const int flags,unsigned int * instance)645 wait_for_free_request(struct TCP_Server_Info *server, const int flags,
646 unsigned int *instance)
647 {
648 return wait_for_free_credits(server, 1, -1, flags,
649 instance);
650 }
651
652 static int
wait_for_compound_request(struct TCP_Server_Info * server,int num,const int flags,unsigned int * instance)653 wait_for_compound_request(struct TCP_Server_Info *server, int num,
654 const int flags, unsigned int *instance)
655 {
656 int *credits;
657
658 credits = server->ops->get_credits_field(server, flags & CIFS_OP_MASK);
659
660 spin_lock(&server->req_lock);
661 if (*credits < num) {
662 /*
663 * If the server is tight on resources or just gives us less
664 * credits for other reasons (e.g. requests are coming out of
665 * order and the server delays granting more credits until it
666 * processes a missing mid) and we exhausted most available
667 * credits there may be situations when we try to send
668 * a compound request but we don't have enough credits. At this
669 * point the client needs to decide if it should wait for
670 * additional credits or fail the request. If at least one
671 * request is in flight there is a high probability that the
672 * server will return enough credits to satisfy this compound
673 * request.
674 *
675 * Return immediately if no requests in flight since we will be
676 * stuck on waiting for credits.
677 */
678 if (server->in_flight == 0) {
679 spin_unlock(&server->req_lock);
680 return -ENOTSUPP;
681 }
682 }
683 spin_unlock(&server->req_lock);
684
685 return wait_for_free_credits(server, num, 60000, flags,
686 instance);
687 }
688
689 int
cifs_wait_mtu_credits(struct TCP_Server_Info * server,unsigned int size,unsigned int * num,struct cifs_credits * credits)690 cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
691 unsigned int *num, struct cifs_credits *credits)
692 {
693 *num = size;
694 credits->value = 0;
695 credits->instance = server->reconnect_instance;
696 return 0;
697 }
698
allocate_mid(struct cifs_ses * ses,struct smb_hdr * in_buf,struct mid_q_entry ** ppmidQ)699 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
700 struct mid_q_entry **ppmidQ)
701 {
702 if (ses->server->tcpStatus == CifsExiting) {
703 return -ENOENT;
704 }
705
706 if (ses->server->tcpStatus == CifsNeedReconnect) {
707 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
708 return -EAGAIN;
709 }
710
711 if (ses->status == CifsNew) {
712 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
713 (in_buf->Command != SMB_COM_NEGOTIATE))
714 return -EAGAIN;
715 /* else ok - we are setting up session */
716 }
717
718 if (ses->status == CifsExiting) {
719 /* check if SMB session is bad because we are setting it up */
720 if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
721 return -EAGAIN;
722 /* else ok - we are shutting down session */
723 }
724
725 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
726 if (*ppmidQ == NULL)
727 return -ENOMEM;
728 spin_lock(&GlobalMid_Lock);
729 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
730 spin_unlock(&GlobalMid_Lock);
731 return 0;
732 }
733
734 static int
wait_for_response(struct TCP_Server_Info * server,struct mid_q_entry * midQ)735 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
736 {
737 int error;
738
739 error = wait_event_freezekillable_unsafe(server->response_q,
740 midQ->mid_state != MID_REQUEST_SUBMITTED);
741 if (error < 0)
742 return -ERESTARTSYS;
743
744 return 0;
745 }
746
747 struct mid_q_entry *
cifs_setup_async_request(struct TCP_Server_Info * server,struct smb_rqst * rqst)748 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
749 {
750 int rc;
751 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
752 struct mid_q_entry *mid;
753
754 if (rqst->rq_iov[0].iov_len != 4 ||
755 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
756 return ERR_PTR(-EIO);
757
758 /* enable signing if server requires it */
759 if (server->sign)
760 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
761
762 mid = AllocMidQEntry(hdr, server);
763 if (mid == NULL)
764 return ERR_PTR(-ENOMEM);
765
766 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
767 if (rc) {
768 DeleteMidQEntry(mid);
769 return ERR_PTR(rc);
770 }
771
772 return mid;
773 }
774
775 /*
776 * Send a SMB request and set the callback function in the mid to handle
777 * the result. Caller is responsible for dealing with timeouts.
778 */
779 int
cifs_call_async(struct TCP_Server_Info * server,struct smb_rqst * rqst,mid_receive_t * receive,mid_callback_t * callback,mid_handle_t * handle,void * cbdata,const int flags,const struct cifs_credits * exist_credits)780 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
781 mid_receive_t *receive, mid_callback_t *callback,
782 mid_handle_t *handle, void *cbdata, const int flags,
783 const struct cifs_credits *exist_credits)
784 {
785 int rc;
786 struct mid_q_entry *mid;
787 struct cifs_credits credits = { .value = 0, .instance = 0 };
788 unsigned int instance;
789 int optype;
790
791 optype = flags & CIFS_OP_MASK;
792
793 if ((flags & CIFS_HAS_CREDITS) == 0) {
794 rc = wait_for_free_request(server, flags, &instance);
795 if (rc)
796 return rc;
797 credits.value = 1;
798 credits.instance = instance;
799 } else
800 instance = exist_credits->instance;
801
802 mutex_lock(&server->srv_mutex);
803
804 /*
805 * We can't use credits obtained from the previous session to send this
806 * request. Check if there were reconnects after we obtained credits and
807 * return -EAGAIN in such cases to let callers handle it.
808 */
809 if (instance != server->reconnect_instance) {
810 mutex_unlock(&server->srv_mutex);
811 add_credits_and_wake_if(server, &credits, optype);
812 return -EAGAIN;
813 }
814
815 mid = server->ops->setup_async_request(server, rqst);
816 if (IS_ERR(mid)) {
817 mutex_unlock(&server->srv_mutex);
818 add_credits_and_wake_if(server, &credits, optype);
819 return PTR_ERR(mid);
820 }
821
822 mid->receive = receive;
823 mid->callback = callback;
824 mid->callback_data = cbdata;
825 mid->handle = handle;
826 mid->mid_state = MID_REQUEST_SUBMITTED;
827
828 /* put it on the pending_mid_q */
829 spin_lock(&GlobalMid_Lock);
830 list_add_tail(&mid->qhead, &server->pending_mid_q);
831 spin_unlock(&GlobalMid_Lock);
832
833 /*
834 * Need to store the time in mid before calling I/O. For call_async,
835 * I/O response may come back and free the mid entry on another thread.
836 */
837 cifs_save_when_sent(mid);
838 rc = smb_send_rqst(server, 1, rqst, flags);
839
840 if (rc < 0) {
841 revert_current_mid(server, mid->credits);
842 server->sequence_number -= 2;
843 cifs_delete_mid(mid);
844 }
845
846 mutex_unlock(&server->srv_mutex);
847
848 if (rc == 0)
849 return 0;
850
851 add_credits_and_wake_if(server, &credits, optype);
852 return rc;
853 }
854
855 /*
856 *
857 * Send an SMB Request. No response info (other than return code)
858 * needs to be parsed.
859 *
860 * flags indicate the type of request buffer and how long to wait
861 * and whether to log NT STATUS code (error) before mapping it to POSIX error
862 *
863 */
864 int
SendReceiveNoRsp(const unsigned int xid,struct cifs_ses * ses,char * in_buf,int flags)865 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
866 char *in_buf, int flags)
867 {
868 int rc;
869 struct kvec iov[1];
870 struct kvec rsp_iov;
871 int resp_buf_type;
872
873 iov[0].iov_base = in_buf;
874 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
875 flags |= CIFS_NO_RSP_BUF;
876 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
877 cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
878
879 return rc;
880 }
881
882 static int
cifs_sync_mid_result(struct mid_q_entry * mid,struct TCP_Server_Info * server)883 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
884 {
885 int rc = 0;
886
887 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
888 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
889
890 spin_lock(&GlobalMid_Lock);
891 switch (mid->mid_state) {
892 case MID_RESPONSE_RECEIVED:
893 spin_unlock(&GlobalMid_Lock);
894 return rc;
895 case MID_RETRY_NEEDED:
896 rc = -EAGAIN;
897 break;
898 case MID_RESPONSE_MALFORMED:
899 rc = -EIO;
900 break;
901 case MID_SHUTDOWN:
902 rc = -EHOSTDOWN;
903 break;
904 default:
905 if (!(mid->mid_flags & MID_DELETED)) {
906 list_del_init(&mid->qhead);
907 mid->mid_flags |= MID_DELETED;
908 }
909 cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
910 __func__, mid->mid, mid->mid_state);
911 rc = -EIO;
912 }
913 spin_unlock(&GlobalMid_Lock);
914
915 DeleteMidQEntry(mid);
916 return rc;
917 }
918
919 static inline int
send_cancel(struct TCP_Server_Info * server,struct smb_rqst * rqst,struct mid_q_entry * mid)920 send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
921 struct mid_q_entry *mid)
922 {
923 return server->ops->send_cancel ?
924 server->ops->send_cancel(server, rqst, mid) : 0;
925 }
926
927 int
cifs_check_receive(struct mid_q_entry * mid,struct TCP_Server_Info * server,bool log_error)928 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
929 bool log_error)
930 {
931 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
932
933 dump_smb(mid->resp_buf, min_t(u32, 92, len));
934
935 /* convert the length into a more usable form */
936 if (server->sign) {
937 struct kvec iov[2];
938 int rc = 0;
939 struct smb_rqst rqst = { .rq_iov = iov,
940 .rq_nvec = 2 };
941
942 iov[0].iov_base = mid->resp_buf;
943 iov[0].iov_len = 4;
944 iov[1].iov_base = (char *)mid->resp_buf + 4;
945 iov[1].iov_len = len - 4;
946 /* FIXME: add code to kill session */
947 rc = cifs_verify_signature(&rqst, server,
948 mid->sequence_number);
949 if (rc)
950 cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
951 rc);
952 }
953
954 /* BB special case reconnect tid and uid here? */
955 return map_smb_to_linux_error(mid->resp_buf, log_error);
956 }
957
958 struct mid_q_entry *
cifs_setup_request(struct cifs_ses * ses,struct smb_rqst * rqst)959 cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
960 {
961 int rc;
962 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
963 struct mid_q_entry *mid;
964
965 if (rqst->rq_iov[0].iov_len != 4 ||
966 rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
967 return ERR_PTR(-EIO);
968
969 rc = allocate_mid(ses, hdr, &mid);
970 if (rc)
971 return ERR_PTR(rc);
972 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
973 if (rc) {
974 cifs_delete_mid(mid);
975 return ERR_PTR(rc);
976 }
977 return mid;
978 }
979
980 static void
cifs_compound_callback(struct mid_q_entry * mid)981 cifs_compound_callback(struct mid_q_entry *mid)
982 {
983 struct TCP_Server_Info *server = mid->server;
984 struct cifs_credits credits;
985
986 credits.value = server->ops->get_credits(mid);
987 credits.instance = server->reconnect_instance;
988
989 add_credits(server, &credits, mid->optype);
990 }
991
992 static void
cifs_compound_last_callback(struct mid_q_entry * mid)993 cifs_compound_last_callback(struct mid_q_entry *mid)
994 {
995 cifs_compound_callback(mid);
996 cifs_wake_up_task(mid);
997 }
998
999 static void
cifs_cancelled_callback(struct mid_q_entry * mid)1000 cifs_cancelled_callback(struct mid_q_entry *mid)
1001 {
1002 cifs_compound_callback(mid);
1003 DeleteMidQEntry(mid);
1004 }
1005
1006 int
compound_send_recv(const unsigned int xid,struct cifs_ses * ses,const int flags,const int num_rqst,struct smb_rqst * rqst,int * resp_buf_type,struct kvec * resp_iov)1007 compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
1008 const int flags, const int num_rqst, struct smb_rqst *rqst,
1009 int *resp_buf_type, struct kvec *resp_iov)
1010 {
1011 int i, j, optype, rc = 0;
1012 struct mid_q_entry *midQ[MAX_COMPOUND];
1013 bool cancelled_mid[MAX_COMPOUND] = {false};
1014 struct cifs_credits credits[MAX_COMPOUND] = {
1015 { .value = 0, .instance = 0 }
1016 };
1017 unsigned int instance;
1018 char *buf;
1019 struct TCP_Server_Info *server;
1020
1021 optype = flags & CIFS_OP_MASK;
1022
1023 for (i = 0; i < num_rqst; i++)
1024 resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
1025
1026 if ((ses == NULL) || (ses->server == NULL)) {
1027 cifs_dbg(VFS, "Null session\n");
1028 return -EIO;
1029 }
1030
1031 server = ses->server;
1032 if (server->tcpStatus == CifsExiting)
1033 return -ENOENT;
1034
1035 /*
1036 * Wait for all the requests to become available.
1037 * This approach still leaves the possibility to be stuck waiting for
1038 * credits if the server doesn't grant credits to the outstanding
1039 * requests and if the client is completely idle, not generating any
1040 * other requests.
1041 * This can be handled by the eventual session reconnect.
1042 */
1043 rc = wait_for_compound_request(server, num_rqst, flags,
1044 &instance);
1045 if (rc)
1046 return rc;
1047
1048 for (i = 0; i < num_rqst; i++) {
1049 credits[i].value = 1;
1050 credits[i].instance = instance;
1051 }
1052
1053 /*
1054 * Make sure that we sign in the same order that we send on this socket
1055 * and avoid races inside tcp sendmsg code that could cause corruption
1056 * of smb data.
1057 */
1058
1059 mutex_lock(&server->srv_mutex);
1060
1061 /*
1062 * All the parts of the compound chain belong obtained credits from the
1063 * same session. We can not use credits obtained from the previous
1064 * session to send this request. Check if there were reconnects after
1065 * we obtained credits and return -EAGAIN in such cases to let callers
1066 * handle it.
1067 */
1068 if (instance != server->reconnect_instance) {
1069 mutex_unlock(&server->srv_mutex);
1070 for (j = 0; j < num_rqst; j++)
1071 add_credits(server, &credits[j], optype);
1072 return -EAGAIN;
1073 }
1074
1075 for (i = 0; i < num_rqst; i++) {
1076 midQ[i] = server->ops->setup_request(ses, &rqst[i]);
1077 if (IS_ERR(midQ[i])) {
1078 revert_current_mid(server, i);
1079 for (j = 0; j < i; j++)
1080 cifs_delete_mid(midQ[j]);
1081 mutex_unlock(&server->srv_mutex);
1082
1083 /* Update # of requests on wire to server */
1084 for (j = 0; j < num_rqst; j++)
1085 add_credits(server, &credits[j], optype);
1086 return PTR_ERR(midQ[i]);
1087 }
1088
1089 midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
1090 midQ[i]->optype = optype;
1091 /*
1092 * Invoke callback for every part of the compound chain
1093 * to calculate credits properly. Wake up this thread only when
1094 * the last element is received.
1095 */
1096 if (i < num_rqst - 1)
1097 midQ[i]->callback = cifs_compound_callback;
1098 else
1099 midQ[i]->callback = cifs_compound_last_callback;
1100 }
1101 rc = smb_send_rqst(server, num_rqst, rqst, flags);
1102
1103 for (i = 0; i < num_rqst; i++)
1104 cifs_save_when_sent(midQ[i]);
1105
1106 if (rc < 0) {
1107 revert_current_mid(server, num_rqst);
1108 server->sequence_number -= 2;
1109 }
1110
1111 mutex_unlock(&server->srv_mutex);
1112
1113 /*
1114 * If sending failed for some reason or it is an oplock break that we
1115 * will not receive a response to - return credits back
1116 */
1117 if (rc < 0 || (flags & CIFS_NO_SRV_RSP)) {
1118 for (i = 0; i < num_rqst; i++)
1119 add_credits(server, &credits[i], optype);
1120 goto out;
1121 }
1122
1123 /*
1124 * At this point the request is passed to the network stack - we assume
1125 * that any credits taken from the server structure on the client have
1126 * been spent and we can't return them back. Once we receive responses
1127 * we will collect credits granted by the server in the mid callbacks
1128 * and add those credits to the server structure.
1129 */
1130
1131 /*
1132 * Compounding is never used during session establish.
1133 */
1134 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1135 mutex_lock(&server->srv_mutex);
1136 smb311_update_preauth_hash(ses, rqst[0].rq_iov,
1137 rqst[0].rq_nvec);
1138 mutex_unlock(&server->srv_mutex);
1139 }
1140
1141 for (i = 0; i < num_rqst; i++) {
1142 rc = wait_for_response(server, midQ[i]);
1143 if (rc != 0)
1144 break;
1145 }
1146 if (rc != 0) {
1147 for (; i < num_rqst; i++) {
1148 cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
1149 midQ[i]->mid, le16_to_cpu(midQ[i]->command));
1150 send_cancel(server, &rqst[i], midQ[i]);
1151 spin_lock(&GlobalMid_Lock);
1152 midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
1153 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
1154 midQ[i]->callback = cifs_cancelled_callback;
1155 cancelled_mid[i] = true;
1156 credits[i].value = 0;
1157 }
1158 spin_unlock(&GlobalMid_Lock);
1159 }
1160 }
1161
1162 for (i = 0; i < num_rqst; i++) {
1163 if (rc < 0)
1164 goto out;
1165
1166 rc = cifs_sync_mid_result(midQ[i], server);
1167 if (rc != 0) {
1168 /* mark this mid as cancelled to not free it below */
1169 cancelled_mid[i] = true;
1170 goto out;
1171 }
1172
1173 if (!midQ[i]->resp_buf ||
1174 midQ[i]->mid_state != MID_RESPONSE_RECEIVED) {
1175 rc = -EIO;
1176 cifs_dbg(FYI, "Bad MID state?\n");
1177 goto out;
1178 }
1179
1180 buf = (char *)midQ[i]->resp_buf;
1181 resp_iov[i].iov_base = buf;
1182 resp_iov[i].iov_len = midQ[i]->resp_buf_size +
1183 server->vals->header_preamble_size;
1184
1185 if (midQ[i]->large_buf)
1186 resp_buf_type[i] = CIFS_LARGE_BUFFER;
1187 else
1188 resp_buf_type[i] = CIFS_SMALL_BUFFER;
1189
1190 rc = server->ops->check_receive(midQ[i], server,
1191 flags & CIFS_LOG_ERROR);
1192
1193 /* mark it so buf will not be freed by cifs_delete_mid */
1194 if ((flags & CIFS_NO_RSP_BUF) == 0)
1195 midQ[i]->resp_buf = NULL;
1196
1197 }
1198
1199 /*
1200 * Compounding is never used during session establish.
1201 */
1202 if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
1203 struct kvec iov = {
1204 .iov_base = resp_iov[0].iov_base,
1205 .iov_len = resp_iov[0].iov_len
1206 };
1207 mutex_lock(&server->srv_mutex);
1208 smb311_update_preauth_hash(ses, &iov, 1);
1209 mutex_unlock(&server->srv_mutex);
1210 }
1211
1212 out:
1213 /*
1214 * This will dequeue all mids. After this it is important that the
1215 * demultiplex_thread will not process any of these mids any futher.
1216 * This is prevented above by using a noop callback that will not
1217 * wake this thread except for the very last PDU.
1218 */
1219 for (i = 0; i < num_rqst; i++) {
1220 if (!cancelled_mid[i])
1221 cifs_delete_mid(midQ[i]);
1222 }
1223
1224 return rc;
1225 }
1226
1227 int
cifs_send_recv(const unsigned int xid,struct cifs_ses * ses,struct smb_rqst * rqst,int * resp_buf_type,const int flags,struct kvec * resp_iov)1228 cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
1229 struct smb_rqst *rqst, int *resp_buf_type, const int flags,
1230 struct kvec *resp_iov)
1231 {
1232 return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
1233 resp_iov);
1234 }
1235
1236 int
SendReceive2(const unsigned int xid,struct cifs_ses * ses,struct kvec * iov,int n_vec,int * resp_buf_type,const int flags,struct kvec * resp_iov)1237 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
1238 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
1239 const int flags, struct kvec *resp_iov)
1240 {
1241 struct smb_rqst rqst;
1242 struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
1243 int rc;
1244
1245 if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
1246 new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
1247 GFP_KERNEL);
1248 if (!new_iov) {
1249 /* otherwise cifs_send_recv below sets resp_buf_type */
1250 *resp_buf_type = CIFS_NO_BUFFER;
1251 return -ENOMEM;
1252 }
1253 } else
1254 new_iov = s_iov;
1255
1256 /* 1st iov is a RFC1001 length followed by the rest of the packet */
1257 memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
1258
1259 new_iov[0].iov_base = new_iov[1].iov_base;
1260 new_iov[0].iov_len = 4;
1261 new_iov[1].iov_base += 4;
1262 new_iov[1].iov_len -= 4;
1263
1264 memset(&rqst, 0, sizeof(struct smb_rqst));
1265 rqst.rq_iov = new_iov;
1266 rqst.rq_nvec = n_vec + 1;
1267
1268 rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
1269 if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
1270 kfree(new_iov);
1271 return rc;
1272 }
1273
1274 int
SendReceive(const unsigned int xid,struct cifs_ses * ses,struct smb_hdr * in_buf,struct smb_hdr * out_buf,int * pbytes_returned,const int flags)1275 SendReceive(const unsigned int xid, struct cifs_ses *ses,
1276 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1277 int *pbytes_returned, const int flags)
1278 {
1279 int rc = 0;
1280 struct mid_q_entry *midQ;
1281 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1282 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1283 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1284 struct cifs_credits credits = { .value = 1, .instance = 0 };
1285 struct TCP_Server_Info *server;
1286
1287 if (ses == NULL) {
1288 cifs_dbg(VFS, "Null smb session\n");
1289 return -EIO;
1290 }
1291 server = ses->server;
1292 if (server == NULL) {
1293 cifs_dbg(VFS, "Null tcp session\n");
1294 return -EIO;
1295 }
1296
1297 if (server->tcpStatus == CifsExiting)
1298 return -ENOENT;
1299
1300 /* Ensure that we do not send more than 50 overlapping requests
1301 to the same server. We may make this configurable later or
1302 use ses->maxReq */
1303
1304 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1305 cifs_server_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1306 len);
1307 return -EIO;
1308 }
1309
1310 rc = wait_for_free_request(server, flags, &credits.instance);
1311 if (rc)
1312 return rc;
1313
1314 /* make sure that we sign in the same order that we send on this socket
1315 and avoid races inside tcp sendmsg code that could cause corruption
1316 of smb data */
1317
1318 mutex_lock(&server->srv_mutex);
1319
1320 rc = allocate_mid(ses, in_buf, &midQ);
1321 if (rc) {
1322 mutex_unlock(&ses->server->srv_mutex);
1323 /* Update # of requests on wire to server */
1324 add_credits(server, &credits, 0);
1325 return rc;
1326 }
1327
1328 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1329 if (rc) {
1330 mutex_unlock(&server->srv_mutex);
1331 goto out;
1332 }
1333
1334 midQ->mid_state = MID_REQUEST_SUBMITTED;
1335
1336 rc = smb_send(server, in_buf, len);
1337 cifs_save_when_sent(midQ);
1338
1339 if (rc < 0)
1340 server->sequence_number -= 2;
1341
1342 mutex_unlock(&server->srv_mutex);
1343
1344 if (rc < 0)
1345 goto out;
1346
1347 rc = wait_for_response(server, midQ);
1348 if (rc != 0) {
1349 send_cancel(server, &rqst, midQ);
1350 spin_lock(&GlobalMid_Lock);
1351 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1352 /* no longer considered to be "in-flight" */
1353 midQ->callback = DeleteMidQEntry;
1354 spin_unlock(&GlobalMid_Lock);
1355 add_credits(server, &credits, 0);
1356 return rc;
1357 }
1358 spin_unlock(&GlobalMid_Lock);
1359 }
1360
1361 rc = cifs_sync_mid_result(midQ, server);
1362 if (rc != 0) {
1363 add_credits(server, &credits, 0);
1364 return rc;
1365 }
1366
1367 if (!midQ->resp_buf || !out_buf ||
1368 midQ->mid_state != MID_RESPONSE_RECEIVED) {
1369 rc = -EIO;
1370 cifs_server_dbg(VFS, "Bad MID state?\n");
1371 goto out;
1372 }
1373
1374 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1375 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1376 rc = cifs_check_receive(midQ, server, 0);
1377 out:
1378 cifs_delete_mid(midQ);
1379 add_credits(server, &credits, 0);
1380
1381 return rc;
1382 }
1383
1384 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
1385 blocking lock to return. */
1386
1387 static int
send_lock_cancel(const unsigned int xid,struct cifs_tcon * tcon,struct smb_hdr * in_buf,struct smb_hdr * out_buf)1388 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
1389 struct smb_hdr *in_buf,
1390 struct smb_hdr *out_buf)
1391 {
1392 int bytes_returned;
1393 struct cifs_ses *ses = tcon->ses;
1394 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
1395
1396 /* We just modify the current in_buf to change
1397 the type of lock from LOCKING_ANDX_SHARED_LOCK
1398 or LOCKING_ANDX_EXCLUSIVE_LOCK to
1399 LOCKING_ANDX_CANCEL_LOCK. */
1400
1401 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
1402 pSMB->Timeout = 0;
1403 pSMB->hdr.Mid = get_next_mid(ses->server);
1404
1405 return SendReceive(xid, ses, in_buf, out_buf,
1406 &bytes_returned, 0);
1407 }
1408
1409 int
SendReceiveBlockingLock(const unsigned int xid,struct cifs_tcon * tcon,struct smb_hdr * in_buf,struct smb_hdr * out_buf,int * pbytes_returned)1410 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
1411 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
1412 int *pbytes_returned)
1413 {
1414 int rc = 0;
1415 int rstart = 0;
1416 struct mid_q_entry *midQ;
1417 struct cifs_ses *ses;
1418 unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
1419 struct kvec iov = { .iov_base = in_buf, .iov_len = len };
1420 struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
1421 unsigned int instance;
1422 struct TCP_Server_Info *server;
1423
1424 if (tcon == NULL || tcon->ses == NULL) {
1425 cifs_dbg(VFS, "Null smb session\n");
1426 return -EIO;
1427 }
1428 ses = tcon->ses;
1429 server = ses->server;
1430
1431 if (server == NULL) {
1432 cifs_dbg(VFS, "Null tcp session\n");
1433 return -EIO;
1434 }
1435
1436 if (server->tcpStatus == CifsExiting)
1437 return -ENOENT;
1438
1439 /* Ensure that we do not send more than 50 overlapping requests
1440 to the same server. We may make this configurable later or
1441 use ses->maxReq */
1442
1443 if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
1444 cifs_tcon_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
1445 len);
1446 return -EIO;
1447 }
1448
1449 rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
1450 if (rc)
1451 return rc;
1452
1453 /* make sure that we sign in the same order that we send on this socket
1454 and avoid races inside tcp sendmsg code that could cause corruption
1455 of smb data */
1456
1457 mutex_lock(&server->srv_mutex);
1458
1459 rc = allocate_mid(ses, in_buf, &midQ);
1460 if (rc) {
1461 mutex_unlock(&server->srv_mutex);
1462 return rc;
1463 }
1464
1465 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
1466 if (rc) {
1467 cifs_delete_mid(midQ);
1468 mutex_unlock(&server->srv_mutex);
1469 return rc;
1470 }
1471
1472 midQ->mid_state = MID_REQUEST_SUBMITTED;
1473 rc = smb_send(server, in_buf, len);
1474 cifs_save_when_sent(midQ);
1475
1476 if (rc < 0)
1477 server->sequence_number -= 2;
1478
1479 mutex_unlock(&server->srv_mutex);
1480
1481 if (rc < 0) {
1482 cifs_delete_mid(midQ);
1483 return rc;
1484 }
1485
1486 /* Wait for a reply - allow signals to interrupt. */
1487 rc = wait_event_interruptible(server->response_q,
1488 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
1489 ((server->tcpStatus != CifsGood) &&
1490 (server->tcpStatus != CifsNew)));
1491
1492 /* Were we interrupted by a signal ? */
1493 if ((rc == -ERESTARTSYS) &&
1494 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
1495 ((server->tcpStatus == CifsGood) ||
1496 (server->tcpStatus == CifsNew))) {
1497
1498 if (in_buf->Command == SMB_COM_TRANSACTION2) {
1499 /* POSIX lock. We send a NT_CANCEL SMB to cause the
1500 blocking lock to return. */
1501 rc = send_cancel(server, &rqst, midQ);
1502 if (rc) {
1503 cifs_delete_mid(midQ);
1504 return rc;
1505 }
1506 } else {
1507 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
1508 to cause the blocking lock to return. */
1509
1510 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1511
1512 /* If we get -ENOLCK back the lock may have
1513 already been removed. Don't exit in this case. */
1514 if (rc && rc != -ENOLCK) {
1515 cifs_delete_mid(midQ);
1516 return rc;
1517 }
1518 }
1519
1520 rc = wait_for_response(server, midQ);
1521 if (rc) {
1522 send_cancel(server, &rqst, midQ);
1523 spin_lock(&GlobalMid_Lock);
1524 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1525 /* no longer considered to be "in-flight" */
1526 midQ->callback = DeleteMidQEntry;
1527 spin_unlock(&GlobalMid_Lock);
1528 return rc;
1529 }
1530 spin_unlock(&GlobalMid_Lock);
1531 }
1532
1533 /* We got the response - restart system call. */
1534 rstart = 1;
1535 }
1536
1537 rc = cifs_sync_mid_result(midQ, server);
1538 if (rc != 0)
1539 return rc;
1540
1541 /* rcvd frame is ok */
1542 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1543 rc = -EIO;
1544 cifs_tcon_dbg(VFS, "Bad MID state?\n");
1545 goto out;
1546 }
1547
1548 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1549 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1550 rc = cifs_check_receive(midQ, server, 0);
1551 out:
1552 cifs_delete_mid(midQ);
1553 if (rstart && rc == -EACCES)
1554 return -ERESTARTSYS;
1555 return rc;
1556 }
1557