1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3 *
4 * Copyright (C) International Business Machines Corp., 2009, 2013
5 * Etersoft, 2012
6 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Pavel Shilovsky (pshilovsky@samba.org) 2012
8 *
9 * Contains the routines for constructing the SMB2 PDUs themselves
10 *
11 */
12
13 /* SMB2 PDU handling routines here - except for leftovers (eg session setup) */
14 /* Note that there are handle based routines which must be */
15 /* treated slightly differently for reconnection purposes since we never */
16 /* want to reuse a stale file handle and only the caller knows the file info */
17
18 #include <linux/fs.h>
19 #include <linux/kernel.h>
20 #include <linux/vfs.h>
21 #include <linux/task_io_accounting_ops.h>
22 #include <linux/uaccess.h>
23 #include <linux/uuid.h>
24 #include <linux/pagemap.h>
25 #include <linux/xattr.h>
26 #include <linux/netfs.h>
27 #include <trace/events/netfs.h>
28 #include "cifsglob.h"
29 #include "cifsacl.h"
30 #include "cifsproto.h"
31 #include "smb2proto.h"
32 #include "cifs_unicode.h"
33 #include "cifs_debug.h"
34 #include "ntlmssp.h"
35 #include "../common/smb2status.h"
36 #include "smb2glob.h"
37 #include "cifspdu.h"
38 #include "cifs_spnego.h"
39 #include "smbdirect.h"
40 #include "trace.h"
41 #ifdef CONFIG_CIFS_DFS_UPCALL
42 #include "dfs_cache.h"
43 #endif
44 #include "cached_dir.h"
45 #include "compress.h"
46
47 /*
48 * The following table defines the expected "StructureSize" of SMB2 requests
49 * in order by SMB2 command. This is similar to "wct" in SMB/CIFS requests.
50 *
51 * Note that commands are defined in smb2pdu.h in le16 but the array below is
52 * indexed by command in host byte order.
53 */
54 static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
55 /* SMB2_NEGOTIATE */ 36,
56 /* SMB2_SESSION_SETUP */ 25,
57 /* SMB2_LOGOFF */ 4,
58 /* SMB2_TREE_CONNECT */ 9,
59 /* SMB2_TREE_DISCONNECT */ 4,
60 /* SMB2_CREATE */ 57,
61 /* SMB2_CLOSE */ 24,
62 /* SMB2_FLUSH */ 24,
63 /* SMB2_READ */ 49,
64 /* SMB2_WRITE */ 49,
65 /* SMB2_LOCK */ 48,
66 /* SMB2_IOCTL */ 57,
67 /* SMB2_CANCEL */ 4,
68 /* SMB2_ECHO */ 4,
69 /* SMB2_QUERY_DIRECTORY */ 33,
70 /* SMB2_CHANGE_NOTIFY */ 32,
71 /* SMB2_QUERY_INFO */ 41,
72 /* SMB2_SET_INFO */ 33,
73 /* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */
74 };
75
smb3_encryption_required(const struct cifs_tcon * tcon)76 int smb3_encryption_required(const struct cifs_tcon *tcon)
77 {
78 if (!tcon || !tcon->ses)
79 return 0;
80 if ((tcon->ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) ||
81 (tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA))
82 return 1;
83 if (tcon->seal &&
84 (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
85 return 1;
86 if (((global_secflags & CIFSSEC_MUST_SEAL) == CIFSSEC_MUST_SEAL) &&
87 (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
88 return 1;
89 return 0;
90 }
91
92 static void
smb2_hdr_assemble(struct smb2_hdr * shdr,__le16 smb2_cmd,const struct cifs_tcon * tcon,struct TCP_Server_Info * server)93 smb2_hdr_assemble(struct smb2_hdr *shdr, __le16 smb2_cmd,
94 const struct cifs_tcon *tcon,
95 struct TCP_Server_Info *server)
96 {
97 struct smb3_hdr_req *smb3_hdr;
98
99 shdr->ProtocolId = SMB2_PROTO_NUMBER;
100 shdr->StructureSize = cpu_to_le16(64);
101 shdr->Command = smb2_cmd;
102
103 if (server) {
104 /* After reconnect SMB3 must set ChannelSequence on subsequent reqs */
105 if (server->dialect >= SMB30_PROT_ID) {
106 smb3_hdr = (struct smb3_hdr_req *)shdr;
107 /*
108 * if primary channel is not set yet, use default
109 * channel for chan sequence num
110 */
111 if (SERVER_IS_CHAN(server))
112 smb3_hdr->ChannelSequence =
113 cpu_to_le16(server->primary_server->channel_sequence_num);
114 else
115 smb3_hdr->ChannelSequence =
116 cpu_to_le16(server->channel_sequence_num);
117 }
118 spin_lock(&server->req_lock);
119 /* Request up to 10 credits but don't go over the limit. */
120 if (server->credits >= server->max_credits)
121 shdr->CreditRequest = cpu_to_le16(0);
122 else
123 shdr->CreditRequest = cpu_to_le16(
124 min_t(int, server->max_credits -
125 server->credits, 10));
126 spin_unlock(&server->req_lock);
127 } else {
128 shdr->CreditRequest = cpu_to_le16(2);
129 }
130 shdr->Id.SyncId.ProcessId = cpu_to_le32((__u16)current->tgid);
131
132 if (!tcon)
133 goto out;
134
135 /* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */
136 /* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */
137 if (server && (server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
138 shdr->CreditCharge = cpu_to_le16(1);
139 /* else CreditCharge MBZ */
140
141 shdr->Id.SyncId.TreeId = cpu_to_le32(tcon->tid);
142 /* Uid is not converted */
143 if (tcon->ses)
144 shdr->SessionId = cpu_to_le64(tcon->ses->Suid);
145
146 /*
147 * If we would set SMB2_FLAGS_DFS_OPERATIONS on open we also would have
148 * to pass the path on the Open SMB prefixed by \\server\share.
149 * Not sure when we would need to do the augmented path (if ever) and
150 * setting this flag breaks the SMB2 open operation since it is
151 * illegal to send an empty path name (without \\server\share prefix)
152 * when the DFS flag is set in the SMB open header. We could
153 * consider setting the flag on all operations other than open
154 * but it is safer to net set it for now.
155 */
156 /* if (tcon->share_flags & SHI1005_FLAGS_DFS)
157 shdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS; */
158
159 if (server && server->sign && !smb3_encryption_required(tcon))
160 shdr->Flags |= SMB2_FLAGS_SIGNED;
161 out:
162 return;
163 }
164
165 /* helper function for code reuse */
166 static int
cifs_chan_skip_or_disable(struct cifs_ses * ses,struct TCP_Server_Info * server,bool from_reconnect)167 cifs_chan_skip_or_disable(struct cifs_ses *ses,
168 struct TCP_Server_Info *server,
169 bool from_reconnect)
170 {
171 struct TCP_Server_Info *pserver;
172 unsigned int chan_index;
173
174 if (SERVER_IS_CHAN(server)) {
175 cifs_dbg(VFS,
176 "server %s does not support multichannel anymore. Skip secondary channel\n",
177 ses->server->hostname);
178
179 spin_lock(&ses->chan_lock);
180 chan_index = cifs_ses_get_chan_index(ses, server);
181 if (chan_index == CIFS_INVAL_CHAN_INDEX) {
182 spin_unlock(&ses->chan_lock);
183 goto skip_terminate;
184 }
185
186 ses->chans[chan_index].server = NULL;
187 server->terminate = true;
188 spin_unlock(&ses->chan_lock);
189
190 /*
191 * the above reference of server by channel
192 * needs to be dropped without holding chan_lock
193 * as cifs_put_tcp_session takes a higher lock
194 * i.e. cifs_tcp_ses_lock
195 */
196 cifs_put_tcp_session(server, from_reconnect);
197
198 cifs_signal_cifsd_for_reconnect(server, false);
199
200 /* mark primary server as needing reconnect */
201 pserver = server->primary_server;
202 cifs_signal_cifsd_for_reconnect(pserver, false);
203 skip_terminate:
204 return -EHOSTDOWN;
205 }
206
207 cifs_server_dbg(VFS,
208 "server does not support multichannel anymore. Disable all other channels\n");
209 cifs_disable_secondary_channels(ses);
210
211
212 return 0;
213 }
214
215 static int
smb2_reconnect(__le16 smb2_command,struct cifs_tcon * tcon,struct TCP_Server_Info * server,bool from_reconnect)216 smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
217 struct TCP_Server_Info *server, bool from_reconnect)
218 {
219 int rc = 0;
220 struct nls_table *nls_codepage = NULL;
221 struct cifs_ses *ses;
222 int xid;
223
224 /*
225 * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so
226 * check for tcp and smb session status done differently
227 * for those three - in the calling routine.
228 */
229 if (tcon == NULL)
230 return 0;
231
232 /*
233 * Need to also skip SMB2_IOCTL because it is used for checking nested dfs links in
234 * cifs_tree_connect().
235 */
236 if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL)
237 return 0;
238
239 spin_lock(&tcon->tc_lock);
240 if (tcon->status == TID_EXITING) {
241 /*
242 * only tree disconnect allowed when disconnecting ...
243 */
244 if (smb2_command != SMB2_TREE_DISCONNECT) {
245 spin_unlock(&tcon->tc_lock);
246 cifs_dbg(FYI, "can not send cmd %d while umounting\n",
247 smb2_command);
248 return -ENODEV;
249 }
250 }
251 spin_unlock(&tcon->tc_lock);
252
253 ses = tcon->ses;
254 if (!ses)
255 return -EIO;
256 spin_lock(&ses->ses_lock);
257 if (ses->ses_status == SES_EXITING) {
258 spin_unlock(&ses->ses_lock);
259 return -EIO;
260 }
261 spin_unlock(&ses->ses_lock);
262 if (!ses->server || !server)
263 return -EIO;
264
265 spin_lock(&server->srv_lock);
266 if (server->tcpStatus == CifsNeedReconnect) {
267 /*
268 * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE
269 * here since they are implicitly done when session drops.
270 */
271 switch (smb2_command) {
272 /*
273 * BB Should we keep oplock break and add flush to exceptions?
274 */
275 case SMB2_TREE_DISCONNECT:
276 case SMB2_CANCEL:
277 case SMB2_CLOSE:
278 case SMB2_OPLOCK_BREAK:
279 spin_unlock(&server->srv_lock);
280 return -EAGAIN;
281 }
282 }
283
284 /* if server is marked for termination, cifsd will cleanup */
285 if (server->terminate) {
286 spin_unlock(&server->srv_lock);
287 return -EHOSTDOWN;
288 }
289 spin_unlock(&server->srv_lock);
290
291 again:
292 rc = cifs_wait_for_server_reconnect(server, tcon->retry);
293 if (rc)
294 return rc;
295
296 spin_lock(&ses->chan_lock);
297 if (!cifs_chan_needs_reconnect(ses, server) && !tcon->need_reconnect) {
298 spin_unlock(&ses->chan_lock);
299 return 0;
300 }
301 spin_unlock(&ses->chan_lock);
302 cifs_dbg(FYI, "sess reconnect mask: 0x%lx, tcon reconnect: %d",
303 tcon->ses->chans_need_reconnect,
304 tcon->need_reconnect);
305
306 mutex_lock(&ses->session_mutex);
307 /*
308 * if this is called by delayed work, and the channel has been disabled
309 * in parallel, the delayed work can continue to execute in parallel
310 * there's a chance that this channel may not exist anymore
311 */
312 spin_lock(&server->srv_lock);
313 if (server->tcpStatus == CifsExiting) {
314 spin_unlock(&server->srv_lock);
315 mutex_unlock(&ses->session_mutex);
316 rc = -EHOSTDOWN;
317 goto out;
318 }
319
320 /*
321 * Recheck after acquire mutex. If another thread is negotiating
322 * and the server never sends an answer the socket will be closed
323 * and tcpStatus set to reconnect.
324 */
325 if (server->tcpStatus == CifsNeedReconnect) {
326 spin_unlock(&server->srv_lock);
327 mutex_unlock(&ses->session_mutex);
328
329 if (tcon->retry)
330 goto again;
331
332 rc = -EHOSTDOWN;
333 goto out;
334 }
335 spin_unlock(&server->srv_lock);
336
337 nls_codepage = ses->local_nls;
338
339 /*
340 * need to prevent multiple threads trying to simultaneously
341 * reconnect the same SMB session
342 */
343 spin_lock(&ses->ses_lock);
344 spin_lock(&ses->chan_lock);
345 if (!cifs_chan_needs_reconnect(ses, server) &&
346 ses->ses_status == SES_GOOD) {
347 spin_unlock(&ses->chan_lock);
348 spin_unlock(&ses->ses_lock);
349 /* this means that we only need to tree connect */
350 if (tcon->need_reconnect)
351 goto skip_sess_setup;
352
353 mutex_unlock(&ses->session_mutex);
354 goto out;
355 }
356 spin_unlock(&ses->chan_lock);
357 spin_unlock(&ses->ses_lock);
358
359 rc = cifs_negotiate_protocol(0, ses, server);
360 if (!rc) {
361 /*
362 * if server stopped supporting multichannel
363 * and the first channel reconnected, disable all the others.
364 */
365 if (ses->chan_count > 1 &&
366 !(server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
367 rc = cifs_chan_skip_or_disable(ses, server,
368 from_reconnect);
369 if (rc) {
370 mutex_unlock(&ses->session_mutex);
371 goto out;
372 }
373 }
374
375 rc = cifs_setup_session(0, ses, server, nls_codepage);
376 if ((rc == -EACCES) || (rc == -EKEYEXPIRED) || (rc == -EKEYREVOKED)) {
377 /*
378 * Try alternate password for next reconnect (key rotation
379 * could be enabled on the server e.g.) if an alternate
380 * password is available and the current password is expired,
381 * but do not swap on non pwd related errors like host down
382 */
383 if (ses->password2)
384 swap(ses->password2, ses->password);
385 }
386
387 if ((rc == -EACCES) && !tcon->retry) {
388 mutex_unlock(&ses->session_mutex);
389 rc = -EHOSTDOWN;
390 goto failed;
391 } else if (rc) {
392 mutex_unlock(&ses->session_mutex);
393 goto out;
394 }
395 } else {
396 mutex_unlock(&ses->session_mutex);
397 goto out;
398 }
399
400 skip_sess_setup:
401 if (!tcon->need_reconnect) {
402 mutex_unlock(&ses->session_mutex);
403 goto out;
404 }
405 cifs_mark_open_files_invalid(tcon);
406 if (tcon->use_persistent)
407 tcon->need_reopen_files = true;
408
409 rc = cifs_tree_connect(0, tcon, nls_codepage);
410
411 cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
412 if (rc) {
413 /* If sess reconnected but tcon didn't, something strange ... */
414 mutex_unlock(&ses->session_mutex);
415 cifs_dbg(VFS, "reconnect tcon failed rc = %d\n", rc);
416 goto out;
417 }
418
419 spin_lock(&ses->ses_lock);
420 if (ses->flags & CIFS_SES_FLAG_SCALE_CHANNELS) {
421 spin_unlock(&ses->ses_lock);
422 mutex_unlock(&ses->session_mutex);
423 goto skip_add_channels;
424 }
425 ses->flags |= CIFS_SES_FLAG_SCALE_CHANNELS;
426 spin_unlock(&ses->ses_lock);
427
428 if (!rc &&
429 (server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL) &&
430 server->ops->query_server_interfaces) {
431 /*
432 * query server network interfaces, in case they change.
433 * Also mark the session as pending this update while the query
434 * is in progress. This will be used to avoid calling
435 * smb2_reconnect recursively.
436 */
437 ses->flags |= CIFS_SES_FLAGS_PENDING_QUERY_INTERFACES;
438 xid = get_xid();
439 rc = server->ops->query_server_interfaces(xid, tcon, false);
440 free_xid(xid);
441 ses->flags &= ~CIFS_SES_FLAGS_PENDING_QUERY_INTERFACES;
442
443 if (!tcon->ipc && !tcon->dummy)
444 queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
445 (SMB_INTERFACE_POLL_INTERVAL * HZ));
446
447 mutex_unlock(&ses->session_mutex);
448
449 if (rc == -EOPNOTSUPP && ses->chan_count > 1) {
450 /*
451 * some servers like Azure SMB server do not advertise
452 * that multichannel has been disabled with server
453 * capabilities, rather return STATUS_NOT_IMPLEMENTED.
454 * treat this as server not supporting multichannel
455 */
456
457 rc = cifs_chan_skip_or_disable(ses, server,
458 from_reconnect);
459 goto skip_add_channels;
460 } else if (rc)
461 cifs_dbg(FYI, "%s: failed to query server interfaces: %d\n",
462 __func__, rc);
463
464 if (ses->chan_max > ses->chan_count &&
465 ses->iface_count &&
466 !SERVER_IS_CHAN(server)) {
467 if (ses->chan_count == 1)
468 cifs_server_dbg(VFS, "supports multichannel now\n");
469
470 cifs_try_adding_channels(ses);
471 }
472 } else {
473 mutex_unlock(&ses->session_mutex);
474 }
475
476 skip_add_channels:
477 spin_lock(&ses->ses_lock);
478 ses->flags &= ~CIFS_SES_FLAG_SCALE_CHANNELS;
479 spin_unlock(&ses->ses_lock);
480
481 if (smb2_command != SMB2_INTERNAL_CMD)
482 mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
483
484 atomic_inc(&tconInfoReconnectCount);
485 out:
486 /*
487 * Check if handle based operation so we know whether we can continue
488 * or not without returning to caller to reset file handle.
489 */
490 /*
491 * BB Is flush done by server on drop of tcp session? Should we special
492 * case it and skip above?
493 */
494 switch (smb2_command) {
495 case SMB2_FLUSH:
496 case SMB2_READ:
497 case SMB2_WRITE:
498 case SMB2_LOCK:
499 case SMB2_QUERY_DIRECTORY:
500 case SMB2_CHANGE_NOTIFY:
501 case SMB2_QUERY_INFO:
502 case SMB2_SET_INFO:
503 rc = -EAGAIN;
504 }
505 failed:
506 return rc;
507 }
508
509 static void
fill_small_buf(__le16 smb2_command,struct cifs_tcon * tcon,struct TCP_Server_Info * server,void * buf,unsigned int * total_len)510 fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon,
511 struct TCP_Server_Info *server,
512 void *buf,
513 unsigned int *total_len)
514 {
515 struct smb2_pdu *spdu = buf;
516 /* lookup word count ie StructureSize from table */
517 __u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_command)];
518
519 /*
520 * smaller than SMALL_BUFFER_SIZE but bigger than fixed area of
521 * largest operations (Create)
522 */
523 memset(buf, 0, 256);
524
525 smb2_hdr_assemble(&spdu->hdr, smb2_command, tcon, server);
526 spdu->StructureSize2 = cpu_to_le16(parmsize);
527
528 *total_len = parmsize + sizeof(struct smb2_hdr);
529 }
530
531 /*
532 * Allocate and return pointer to an SMB request hdr, and set basic
533 * SMB information in the SMB header. If the return code is zero, this
534 * function must have filled in request_buf pointer.
535 */
__smb2_plain_req_init(__le16 smb2_command,struct cifs_tcon * tcon,struct TCP_Server_Info * server,void ** request_buf,unsigned int * total_len)536 static int __smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
537 struct TCP_Server_Info *server,
538 void **request_buf, unsigned int *total_len)
539 {
540 /* BB eventually switch this to SMB2 specific small buf size */
541 switch (smb2_command) {
542 case SMB2_SET_INFO:
543 case SMB2_QUERY_INFO:
544 *request_buf = cifs_buf_get();
545 break;
546 default:
547 *request_buf = cifs_small_buf_get();
548 break;
549 }
550 if (*request_buf == NULL) {
551 /* BB should we add a retry in here if not a writepage? */
552 return -ENOMEM;
553 }
554
555 fill_small_buf(smb2_command, tcon, server,
556 (struct smb2_hdr *)(*request_buf),
557 total_len);
558
559 if (tcon != NULL) {
560 uint16_t com_code = le16_to_cpu(smb2_command);
561 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
562 cifs_stats_inc(&tcon->num_smbs_sent);
563 }
564
565 return 0;
566 }
567
smb2_plain_req_init(__le16 smb2_command,struct cifs_tcon * tcon,struct TCP_Server_Info * server,void ** request_buf,unsigned int * total_len)568 static int smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
569 struct TCP_Server_Info *server,
570 void **request_buf, unsigned int *total_len)
571 {
572 int rc;
573
574 rc = smb2_reconnect(smb2_command, tcon, server, false);
575 if (rc)
576 return rc;
577
578 return __smb2_plain_req_init(smb2_command, tcon, server, request_buf,
579 total_len);
580 }
581
smb2_ioctl_req_init(u32 opcode,struct cifs_tcon * tcon,struct TCP_Server_Info * server,void ** request_buf,unsigned int * total_len)582 static int smb2_ioctl_req_init(u32 opcode, struct cifs_tcon *tcon,
583 struct TCP_Server_Info *server,
584 void **request_buf, unsigned int *total_len)
585 {
586 /*
587 * Skip reconnect in one of the following cases:
588 * 1. For FSCTL_VALIDATE_NEGOTIATE_INFO IOCTLs
589 * 2. For FSCTL_QUERY_NETWORK_INTERFACE_INFO IOCTL when called from
590 * smb2_reconnect (indicated by CIFS_SES_FLAG_SCALE_CHANNELS ses flag)
591 */
592 if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO ||
593 (opcode == FSCTL_QUERY_NETWORK_INTERFACE_INFO &&
594 (tcon->ses->flags & CIFS_SES_FLAGS_PENDING_QUERY_INTERFACES)))
595 return __smb2_plain_req_init(SMB2_IOCTL, tcon, server,
596 request_buf, total_len);
597
598 return smb2_plain_req_init(SMB2_IOCTL, tcon, server,
599 request_buf, total_len);
600 }
601
602 /* For explanation of negotiate contexts see MS-SMB2 section 2.2.3.1 */
603
604 static void
build_preauth_ctxt(struct smb2_preauth_neg_context * pneg_ctxt)605 build_preauth_ctxt(struct smb2_preauth_neg_context *pneg_ctxt)
606 {
607 pneg_ctxt->ContextType = SMB2_PREAUTH_INTEGRITY_CAPABILITIES;
608 pneg_ctxt->DataLength = cpu_to_le16(38);
609 pneg_ctxt->HashAlgorithmCount = cpu_to_le16(1);
610 pneg_ctxt->SaltLength = cpu_to_le16(SMB311_SALT_SIZE);
611 get_random_bytes(pneg_ctxt->Salt, SMB311_SALT_SIZE);
612 pneg_ctxt->HashAlgorithms = SMB2_PREAUTH_INTEGRITY_SHA512;
613 }
614
615 static void
build_compression_ctxt(struct smb2_compression_capabilities_context * pneg_ctxt)616 build_compression_ctxt(struct smb2_compression_capabilities_context *pneg_ctxt)
617 {
618 pneg_ctxt->ContextType = SMB2_COMPRESSION_CAPABILITIES;
619 pneg_ctxt->DataLength =
620 cpu_to_le16(sizeof(struct smb2_compression_capabilities_context)
621 - sizeof(struct smb2_neg_context));
622 pneg_ctxt->CompressionAlgorithmCount = cpu_to_le16(3);
623 pneg_ctxt->CompressionAlgorithms[0] = SMB3_COMPRESS_LZ77;
624 pneg_ctxt->CompressionAlgorithms[1] = SMB3_COMPRESS_LZ77_HUFF;
625 pneg_ctxt->CompressionAlgorithms[2] = SMB3_COMPRESS_LZNT1;
626 }
627
628 static unsigned int
build_signing_ctxt(struct smb2_signing_capabilities * pneg_ctxt)629 build_signing_ctxt(struct smb2_signing_capabilities *pneg_ctxt)
630 {
631 unsigned int ctxt_len = sizeof(struct smb2_signing_capabilities);
632 unsigned short num_algs = 1; /* number of signing algorithms sent */
633
634 pneg_ctxt->ContextType = SMB2_SIGNING_CAPABILITIES;
635 /*
636 * Context Data length must be rounded to multiple of 8 for some servers
637 */
638 pneg_ctxt->DataLength = cpu_to_le16(ALIGN(sizeof(struct smb2_signing_capabilities) -
639 sizeof(struct smb2_neg_context) +
640 (num_algs * sizeof(u16)), 8));
641 pneg_ctxt->SigningAlgorithmCount = cpu_to_le16(num_algs);
642 pneg_ctxt->SigningAlgorithms[0] = cpu_to_le16(SIGNING_ALG_AES_CMAC);
643
644 ctxt_len += sizeof(__le16) * num_algs;
645 ctxt_len = ALIGN(ctxt_len, 8);
646 return ctxt_len;
647 /* TBD add SIGNING_ALG_AES_GMAC and/or SIGNING_ALG_HMAC_SHA256 */
648 }
649
650 static void
build_encrypt_ctxt(struct smb2_encryption_neg_context * pneg_ctxt)651 build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt)
652 {
653 pneg_ctxt->ContextType = SMB2_ENCRYPTION_CAPABILITIES;
654 if (require_gcm_256) {
655 pneg_ctxt->DataLength = cpu_to_le16(4); /* Cipher Count + 1 cipher */
656 pneg_ctxt->CipherCount = cpu_to_le16(1);
657 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES256_GCM;
658 } else if (enable_gcm_256) {
659 pneg_ctxt->DataLength = cpu_to_le16(8); /* Cipher Count + 3 ciphers */
660 pneg_ctxt->CipherCount = cpu_to_le16(3);
661 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM;
662 pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES256_GCM;
663 pneg_ctxt->Ciphers[2] = SMB2_ENCRYPTION_AES128_CCM;
664 } else {
665 pneg_ctxt->DataLength = cpu_to_le16(6); /* Cipher Count + 2 ciphers */
666 pneg_ctxt->CipherCount = cpu_to_le16(2);
667 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM;
668 pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES128_CCM;
669 }
670 }
671
672 static unsigned int
build_netname_ctxt(struct smb2_netname_neg_context * pneg_ctxt,char * hostname)673 build_netname_ctxt(struct smb2_netname_neg_context *pneg_ctxt, char *hostname)
674 {
675 struct nls_table *cp = load_nls_default();
676
677 pneg_ctxt->ContextType = SMB2_NETNAME_NEGOTIATE_CONTEXT_ID;
678
679 /* copy up to max of first 100 bytes of server name to NetName field */
680 pneg_ctxt->DataLength = cpu_to_le16(2 * cifs_strtoUTF16(pneg_ctxt->NetName, hostname, 100, cp));
681 /* context size is DataLength + minimal smb2_neg_context */
682 return ALIGN(le16_to_cpu(pneg_ctxt->DataLength) + sizeof(struct smb2_neg_context), 8);
683 }
684
685 static void
build_posix_ctxt(struct smb2_posix_neg_context * pneg_ctxt)686 build_posix_ctxt(struct smb2_posix_neg_context *pneg_ctxt)
687 {
688 pneg_ctxt->ContextType = SMB2_POSIX_EXTENSIONS_AVAILABLE;
689 pneg_ctxt->DataLength = cpu_to_le16(POSIX_CTXT_DATA_LEN);
690 /* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */
691 pneg_ctxt->Name[0] = 0x93;
692 pneg_ctxt->Name[1] = 0xAD;
693 pneg_ctxt->Name[2] = 0x25;
694 pneg_ctxt->Name[3] = 0x50;
695 pneg_ctxt->Name[4] = 0x9C;
696 pneg_ctxt->Name[5] = 0xB4;
697 pneg_ctxt->Name[6] = 0x11;
698 pneg_ctxt->Name[7] = 0xE7;
699 pneg_ctxt->Name[8] = 0xB4;
700 pneg_ctxt->Name[9] = 0x23;
701 pneg_ctxt->Name[10] = 0x83;
702 pneg_ctxt->Name[11] = 0xDE;
703 pneg_ctxt->Name[12] = 0x96;
704 pneg_ctxt->Name[13] = 0x8B;
705 pneg_ctxt->Name[14] = 0xCD;
706 pneg_ctxt->Name[15] = 0x7C;
707 }
708
709 static void
assemble_neg_contexts(struct smb2_negotiate_req * req,struct TCP_Server_Info * server,unsigned int * total_len)710 assemble_neg_contexts(struct smb2_negotiate_req *req,
711 struct TCP_Server_Info *server, unsigned int *total_len)
712 {
713 unsigned int ctxt_len, neg_context_count;
714 struct TCP_Server_Info *pserver;
715 char *pneg_ctxt;
716 char *hostname;
717
718 if (*total_len > 200) {
719 /* In case length corrupted don't want to overrun smb buffer */
720 cifs_server_dbg(VFS, "Bad frame length assembling neg contexts\n");
721 return;
722 }
723
724 /*
725 * round up total_len of fixed part of SMB3 negotiate request to 8
726 * byte boundary before adding negotiate contexts
727 */
728 *total_len = ALIGN(*total_len, 8);
729
730 pneg_ctxt = (*total_len) + (char *)req;
731 req->NegotiateContextOffset = cpu_to_le32(*total_len);
732
733 build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt);
734 ctxt_len = ALIGN(sizeof(struct smb2_preauth_neg_context), 8);
735 *total_len += ctxt_len;
736 pneg_ctxt += ctxt_len;
737
738 build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt);
739 ctxt_len = ALIGN(sizeof(struct smb2_encryption_neg_context), 8);
740 *total_len += ctxt_len;
741 pneg_ctxt += ctxt_len;
742
743 /*
744 * secondary channels don't have the hostname field populated
745 * use the hostname field in the primary channel instead
746 */
747 pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
748 cifs_server_lock(pserver);
749 hostname = pserver->hostname;
750 if (hostname && (hostname[0] != 0)) {
751 ctxt_len = build_netname_ctxt((struct smb2_netname_neg_context *)pneg_ctxt,
752 hostname);
753 *total_len += ctxt_len;
754 pneg_ctxt += ctxt_len;
755 neg_context_count = 3;
756 } else
757 neg_context_count = 2;
758 cifs_server_unlock(pserver);
759
760 build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt);
761 *total_len += sizeof(struct smb2_posix_neg_context);
762 pneg_ctxt += sizeof(struct smb2_posix_neg_context);
763 neg_context_count++;
764
765 if (server->compression.requested) {
766 build_compression_ctxt((struct smb2_compression_capabilities_context *)
767 pneg_ctxt);
768 ctxt_len = ALIGN(sizeof(struct smb2_compression_capabilities_context), 8);
769 *total_len += ctxt_len;
770 pneg_ctxt += ctxt_len;
771 neg_context_count++;
772 }
773
774 if (enable_negotiate_signing) {
775 ctxt_len = build_signing_ctxt((struct smb2_signing_capabilities *)
776 pneg_ctxt);
777 *total_len += ctxt_len;
778 pneg_ctxt += ctxt_len;
779 neg_context_count++;
780 }
781
782 /* check for and add transport_capabilities and signing capabilities */
783 req->NegotiateContextCount = cpu_to_le16(neg_context_count);
784
785 }
786
787 /* If invalid preauth context warn but use what we requested, SHA-512 */
decode_preauth_context(struct smb2_preauth_neg_context * ctxt)788 static void decode_preauth_context(struct smb2_preauth_neg_context *ctxt)
789 {
790 unsigned int len = le16_to_cpu(ctxt->DataLength);
791
792 /*
793 * Caller checked that DataLength remains within SMB boundary. We still
794 * need to confirm that one HashAlgorithms member is accounted for.
795 */
796 if (len < MIN_PREAUTH_CTXT_DATA_LEN) {
797 pr_warn_once("server sent bad preauth context\n");
798 return;
799 } else if (len < MIN_PREAUTH_CTXT_DATA_LEN + le16_to_cpu(ctxt->SaltLength)) {
800 pr_warn_once("server sent invalid SaltLength\n");
801 return;
802 }
803 if (le16_to_cpu(ctxt->HashAlgorithmCount) != 1)
804 pr_warn_once("Invalid SMB3 hash algorithm count\n");
805 if (ctxt->HashAlgorithms != SMB2_PREAUTH_INTEGRITY_SHA512)
806 pr_warn_once("unknown SMB3 hash algorithm\n");
807 }
808
decode_compress_ctx(struct TCP_Server_Info * server,struct smb2_compression_capabilities_context * ctxt)809 static void decode_compress_ctx(struct TCP_Server_Info *server,
810 struct smb2_compression_capabilities_context *ctxt)
811 {
812 unsigned int len = le16_to_cpu(ctxt->DataLength);
813 __le16 alg;
814
815 server->compression.enabled = false;
816
817 /*
818 * Caller checked that DataLength remains within SMB boundary. We still
819 * need to confirm that one CompressionAlgorithms member is accounted
820 * for.
821 */
822 if (len < 10) {
823 pr_warn_once("server sent bad compression cntxt\n");
824 return;
825 }
826
827 if (le16_to_cpu(ctxt->CompressionAlgorithmCount) != 1) {
828 pr_warn_once("invalid SMB3 compress algorithm count\n");
829 return;
830 }
831
832 alg = ctxt->CompressionAlgorithms[0];
833
834 /* 'NONE' (0) compressor type is never negotiated */
835 if (alg == 0 || le16_to_cpu(alg) > 3) {
836 pr_warn_once("invalid compression algorithm '%u'\n", alg);
837 return;
838 }
839
840 server->compression.alg = alg;
841 server->compression.enabled = true;
842 }
843
decode_encrypt_ctx(struct TCP_Server_Info * server,struct smb2_encryption_neg_context * ctxt)844 static int decode_encrypt_ctx(struct TCP_Server_Info *server,
845 struct smb2_encryption_neg_context *ctxt)
846 {
847 unsigned int len = le16_to_cpu(ctxt->DataLength);
848
849 cifs_dbg(FYI, "decode SMB3.11 encryption neg context of len %d\n", len);
850 /*
851 * Caller checked that DataLength remains within SMB boundary. We still
852 * need to confirm that one Cipher flexible array member is accounted
853 * for.
854 */
855 if (len < MIN_ENCRYPT_CTXT_DATA_LEN) {
856 pr_warn_once("server sent bad crypto ctxt len\n");
857 return -EINVAL;
858 }
859
860 if (le16_to_cpu(ctxt->CipherCount) != 1) {
861 pr_warn_once("Invalid SMB3.11 cipher count\n");
862 return -EINVAL;
863 }
864 cifs_dbg(FYI, "SMB311 cipher type:%d\n", le16_to_cpu(ctxt->Ciphers[0]));
865 if (require_gcm_256) {
866 if (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES256_GCM) {
867 cifs_dbg(VFS, "Server does not support requested encryption type (AES256 GCM)\n");
868 return -EOPNOTSUPP;
869 }
870 } else if (ctxt->Ciphers[0] == 0) {
871 /*
872 * e.g. if server only supported AES256_CCM (very unlikely)
873 * or server supported no encryption types or had all disabled.
874 * Since GLOBAL_CAP_ENCRYPTION will be not set, in the case
875 * in which mount requested encryption ("seal") checks later
876 * on during tree connection will return proper rc, but if
877 * seal not requested by client, since server is allowed to
878 * return 0 to indicate no supported cipher, we can't fail here
879 */
880 server->cipher_type = 0;
881 server->capabilities &= ~SMB2_GLOBAL_CAP_ENCRYPTION;
882 pr_warn_once("Server does not support requested encryption types\n");
883 return 0;
884 } else if ((ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_CCM) &&
885 (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_GCM) &&
886 (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES256_GCM)) {
887 /* server returned a cipher we didn't ask for */
888 pr_warn_once("Invalid SMB3.11 cipher returned\n");
889 return -EINVAL;
890 }
891 server->cipher_type = ctxt->Ciphers[0];
892 server->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
893 return 0;
894 }
895
decode_signing_ctx(struct TCP_Server_Info * server,struct smb2_signing_capabilities * pctxt)896 static void decode_signing_ctx(struct TCP_Server_Info *server,
897 struct smb2_signing_capabilities *pctxt)
898 {
899 unsigned int len = le16_to_cpu(pctxt->DataLength);
900
901 /*
902 * Caller checked that DataLength remains within SMB boundary. We still
903 * need to confirm that one SigningAlgorithms flexible array member is
904 * accounted for.
905 */
906 if ((len < 4) || (len > 16)) {
907 pr_warn_once("server sent bad signing negcontext\n");
908 return;
909 }
910 if (le16_to_cpu(pctxt->SigningAlgorithmCount) != 1) {
911 pr_warn_once("Invalid signing algorithm count\n");
912 return;
913 }
914 if (le16_to_cpu(pctxt->SigningAlgorithms[0]) > 2) {
915 pr_warn_once("unknown signing algorithm\n");
916 return;
917 }
918
919 server->signing_negotiated = true;
920 server->signing_algorithm = le16_to_cpu(pctxt->SigningAlgorithms[0]);
921 cifs_dbg(FYI, "signing algorithm %d chosen\n",
922 server->signing_algorithm);
923 }
924
925
smb311_decode_neg_context(struct smb2_negotiate_rsp * rsp,struct TCP_Server_Info * server,unsigned int len_of_smb)926 static int smb311_decode_neg_context(struct smb2_negotiate_rsp *rsp,
927 struct TCP_Server_Info *server,
928 unsigned int len_of_smb)
929 {
930 struct smb2_neg_context *pctx;
931 unsigned int offset = le32_to_cpu(rsp->NegotiateContextOffset);
932 unsigned int ctxt_cnt = le16_to_cpu(rsp->NegotiateContextCount);
933 unsigned int len_of_ctxts, i;
934 int rc = 0;
935
936 cifs_dbg(FYI, "decoding %d negotiate contexts\n", ctxt_cnt);
937 if (len_of_smb <= offset) {
938 cifs_server_dbg(VFS, "Invalid response: negotiate context offset\n");
939 return -EINVAL;
940 }
941
942 len_of_ctxts = len_of_smb - offset;
943
944 for (i = 0; i < ctxt_cnt; i++) {
945 int clen;
946 /* check that offset is not beyond end of SMB */
947 if (len_of_ctxts < sizeof(struct smb2_neg_context))
948 break;
949
950 pctx = (struct smb2_neg_context *)(offset + (char *)rsp);
951 clen = sizeof(struct smb2_neg_context)
952 + le16_to_cpu(pctx->DataLength);
953 /*
954 * 2.2.4 SMB2 NEGOTIATE Response
955 * Subsequent negotiate contexts MUST appear at the first 8-byte
956 * aligned offset following the previous negotiate context.
957 */
958 if (i + 1 != ctxt_cnt)
959 clen = ALIGN(clen, 8);
960 if (clen > len_of_ctxts)
961 break;
962
963 if (pctx->ContextType == SMB2_PREAUTH_INTEGRITY_CAPABILITIES)
964 decode_preauth_context(
965 (struct smb2_preauth_neg_context *)pctx);
966 else if (pctx->ContextType == SMB2_ENCRYPTION_CAPABILITIES)
967 rc = decode_encrypt_ctx(server,
968 (struct smb2_encryption_neg_context *)pctx);
969 else if (pctx->ContextType == SMB2_COMPRESSION_CAPABILITIES)
970 decode_compress_ctx(server,
971 (struct smb2_compression_capabilities_context *)pctx);
972 else if (pctx->ContextType == SMB2_POSIX_EXTENSIONS_AVAILABLE)
973 server->posix_ext_supported = true;
974 else if (pctx->ContextType == SMB2_SIGNING_CAPABILITIES)
975 decode_signing_ctx(server,
976 (struct smb2_signing_capabilities *)pctx);
977 else
978 cifs_server_dbg(VFS, "unknown negcontext of type %d ignored\n",
979 le16_to_cpu(pctx->ContextType));
980 if (rc)
981 break;
982
983 offset += clen;
984 len_of_ctxts -= clen;
985 }
986 return rc;
987 }
988
989 static struct create_posix *
create_posix_buf(umode_t mode)990 create_posix_buf(umode_t mode)
991 {
992 struct create_posix *buf;
993
994 buf = kzalloc(sizeof(struct create_posix),
995 GFP_KERNEL);
996 if (!buf)
997 return NULL;
998
999 buf->ccontext.DataOffset =
1000 cpu_to_le16(offsetof(struct create_posix, Mode));
1001 buf->ccontext.DataLength = cpu_to_le32(4);
1002 buf->ccontext.NameOffset =
1003 cpu_to_le16(offsetof(struct create_posix, Name));
1004 buf->ccontext.NameLength = cpu_to_le16(16);
1005
1006 /* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */
1007 buf->Name[0] = 0x93;
1008 buf->Name[1] = 0xAD;
1009 buf->Name[2] = 0x25;
1010 buf->Name[3] = 0x50;
1011 buf->Name[4] = 0x9C;
1012 buf->Name[5] = 0xB4;
1013 buf->Name[6] = 0x11;
1014 buf->Name[7] = 0xE7;
1015 buf->Name[8] = 0xB4;
1016 buf->Name[9] = 0x23;
1017 buf->Name[10] = 0x83;
1018 buf->Name[11] = 0xDE;
1019 buf->Name[12] = 0x96;
1020 buf->Name[13] = 0x8B;
1021 buf->Name[14] = 0xCD;
1022 buf->Name[15] = 0x7C;
1023 buf->Mode = cpu_to_le32(mode);
1024 cifs_dbg(FYI, "mode on posix create 0%o\n", mode);
1025 return buf;
1026 }
1027
1028 static int
add_posix_context(struct kvec * iov,unsigned int * num_iovec,umode_t mode)1029 add_posix_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode)
1030 {
1031 unsigned int num = *num_iovec;
1032
1033 iov[num].iov_base = create_posix_buf(mode);
1034 if (mode == ACL_NO_MODE)
1035 cifs_dbg(FYI, "%s: no mode\n", __func__);
1036 if (iov[num].iov_base == NULL)
1037 return -ENOMEM;
1038 iov[num].iov_len = sizeof(struct create_posix);
1039 *num_iovec = num + 1;
1040 return 0;
1041 }
1042
1043
1044 /*
1045 *
1046 * SMB2 Worker functions follow:
1047 *
1048 * The general structure of the worker functions is:
1049 * 1) Call smb2_init (assembles SMB2 header)
1050 * 2) Initialize SMB2 command specific fields in fixed length area of SMB
1051 * 3) Call smb_sendrcv2 (sends request on socket and waits for response)
1052 * 4) Decode SMB2 command specific fields in the fixed length area
1053 * 5) Decode variable length data area (if any for this SMB2 command type)
1054 * 6) Call free smb buffer
1055 * 7) return
1056 *
1057 */
1058
1059 int
SMB2_negotiate(const unsigned int xid,struct cifs_ses * ses,struct TCP_Server_Info * server)1060 SMB2_negotiate(const unsigned int xid,
1061 struct cifs_ses *ses,
1062 struct TCP_Server_Info *server)
1063 {
1064 struct smb_rqst rqst;
1065 struct smb2_negotiate_req *req;
1066 struct smb2_negotiate_rsp *rsp;
1067 struct kvec iov[1];
1068 struct kvec rsp_iov;
1069 int rc;
1070 int resp_buftype;
1071 int blob_offset, blob_length;
1072 char *security_blob;
1073 int flags = CIFS_NEG_OP;
1074 unsigned int total_len;
1075
1076 cifs_dbg(FYI, "Negotiate protocol\n");
1077
1078 if (!server) {
1079 WARN(1, "%s: server is NULL!\n", __func__);
1080 return -EIO;
1081 }
1082
1083 rc = smb2_plain_req_init(SMB2_NEGOTIATE, NULL, server,
1084 (void **) &req, &total_len);
1085 if (rc)
1086 return rc;
1087
1088 req->hdr.SessionId = 0;
1089
1090 memset(server->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
1091 memset(ses->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
1092
1093 if (strcmp(server->vals->version_string,
1094 SMB3ANY_VERSION_STRING) == 0) {
1095 req->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
1096 req->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
1097 req->Dialects[2] = cpu_to_le16(SMB311_PROT_ID);
1098 req->DialectCount = cpu_to_le16(3);
1099 total_len += 6;
1100 } else if (strcmp(server->vals->version_string,
1101 SMBDEFAULT_VERSION_STRING) == 0) {
1102 req->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
1103 req->Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
1104 req->Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
1105 req->Dialects[3] = cpu_to_le16(SMB311_PROT_ID);
1106 req->DialectCount = cpu_to_le16(4);
1107 total_len += 8;
1108 } else {
1109 /* otherwise send specific dialect */
1110 req->Dialects[0] = cpu_to_le16(server->vals->protocol_id);
1111 req->DialectCount = cpu_to_le16(1);
1112 total_len += 2;
1113 }
1114
1115 /* only one of SMB2 signing flags may be set in SMB2 request */
1116 if (ses->sign)
1117 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
1118 else if (global_secflags & CIFSSEC_MAY_SIGN)
1119 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
1120 else
1121 req->SecurityMode = 0;
1122
1123 req->Capabilities = cpu_to_le32(server->vals->req_capabilities);
1124 if (ses->chan_max > 1)
1125 req->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL);
1126
1127 /* ClientGUID must be zero for SMB2.02 dialect */
1128 if (server->vals->protocol_id == SMB20_PROT_ID)
1129 memset(req->ClientGUID, 0, SMB2_CLIENT_GUID_SIZE);
1130 else {
1131 memcpy(req->ClientGUID, server->client_guid,
1132 SMB2_CLIENT_GUID_SIZE);
1133 if ((server->vals->protocol_id == SMB311_PROT_ID) ||
1134 (strcmp(server->vals->version_string,
1135 SMB3ANY_VERSION_STRING) == 0) ||
1136 (strcmp(server->vals->version_string,
1137 SMBDEFAULT_VERSION_STRING) == 0))
1138 assemble_neg_contexts(req, server, &total_len);
1139 }
1140 iov[0].iov_base = (char *)req;
1141 iov[0].iov_len = total_len;
1142
1143 memset(&rqst, 0, sizeof(struct smb_rqst));
1144 rqst.rq_iov = iov;
1145 rqst.rq_nvec = 1;
1146
1147 rc = cifs_send_recv(xid, ses, server,
1148 &rqst, &resp_buftype, flags, &rsp_iov);
1149 cifs_small_buf_release(req);
1150 rsp = (struct smb2_negotiate_rsp *)rsp_iov.iov_base;
1151 /*
1152 * No tcon so can't do
1153 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
1154 */
1155 if (rc == -EOPNOTSUPP) {
1156 cifs_server_dbg(VFS, "Dialect not supported by server. Consider specifying vers=1.0 or vers=2.0 on mount for accessing older servers\n");
1157 goto neg_exit;
1158 } else if (rc != 0)
1159 goto neg_exit;
1160
1161 rc = -EIO;
1162 if (strcmp(server->vals->version_string,
1163 SMB3ANY_VERSION_STRING) == 0) {
1164 if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
1165 cifs_server_dbg(VFS,
1166 "SMB2 dialect returned but not requested\n");
1167 goto neg_exit;
1168 } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
1169 cifs_server_dbg(VFS,
1170 "SMB2.1 dialect returned but not requested\n");
1171 goto neg_exit;
1172 } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
1173 /* ops set to 3.0 by default for default so update */
1174 server->ops = &smb311_operations;
1175 server->vals = &smb311_values;
1176 }
1177 } else if (strcmp(server->vals->version_string,
1178 SMBDEFAULT_VERSION_STRING) == 0) {
1179 if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
1180 cifs_server_dbg(VFS,
1181 "SMB2 dialect returned but not requested\n");
1182 goto neg_exit;
1183 } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
1184 /* ops set to 3.0 by default for default so update */
1185 server->ops = &smb21_operations;
1186 server->vals = &smb21_values;
1187 } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
1188 server->ops = &smb311_operations;
1189 server->vals = &smb311_values;
1190 }
1191 } else if (le16_to_cpu(rsp->DialectRevision) !=
1192 server->vals->protocol_id) {
1193 /* if requested single dialect ensure returned dialect matched */
1194 cifs_server_dbg(VFS, "Invalid 0x%x dialect returned: not requested\n",
1195 le16_to_cpu(rsp->DialectRevision));
1196 goto neg_exit;
1197 }
1198
1199 cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode);
1200
1201 if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID))
1202 cifs_dbg(FYI, "negotiated smb2.0 dialect\n");
1203 else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID))
1204 cifs_dbg(FYI, "negotiated smb2.1 dialect\n");
1205 else if (rsp->DialectRevision == cpu_to_le16(SMB30_PROT_ID))
1206 cifs_dbg(FYI, "negotiated smb3.0 dialect\n");
1207 else if (rsp->DialectRevision == cpu_to_le16(SMB302_PROT_ID))
1208 cifs_dbg(FYI, "negotiated smb3.02 dialect\n");
1209 else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID))
1210 cifs_dbg(FYI, "negotiated smb3.1.1 dialect\n");
1211 else {
1212 cifs_server_dbg(VFS, "Invalid dialect returned by server 0x%x\n",
1213 le16_to_cpu(rsp->DialectRevision));
1214 goto neg_exit;
1215 }
1216
1217 rc = 0;
1218 server->dialect = le16_to_cpu(rsp->DialectRevision);
1219
1220 /*
1221 * Keep a copy of the hash after negprot. This hash will be
1222 * the starting hash value for all sessions made from this
1223 * server.
1224 */
1225 memcpy(server->preauth_sha_hash, ses->preauth_sha_hash,
1226 SMB2_PREAUTH_HASH_SIZE);
1227
1228 /* SMB2 only has an extended negflavor */
1229 server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
1230 /* set it to the maximum buffer size value we can send with 1 credit */
1231 server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize),
1232 SMB2_MAX_BUFFER_SIZE);
1233 server->max_read = le32_to_cpu(rsp->MaxReadSize);
1234 server->max_write = le32_to_cpu(rsp->MaxWriteSize);
1235 server->sec_mode = le16_to_cpu(rsp->SecurityMode);
1236 if ((server->sec_mode & SMB2_SEC_MODE_FLAGS_ALL) != server->sec_mode)
1237 cifs_dbg(FYI, "Server returned unexpected security mode 0x%x\n",
1238 server->sec_mode);
1239 server->capabilities = le32_to_cpu(rsp->Capabilities);
1240 /* Internal types */
1241 server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES;
1242
1243 /*
1244 * SMB3.0 supports only 1 cipher and doesn't have a encryption neg context
1245 * Set the cipher type manually.
1246 */
1247 if ((server->dialect == SMB30_PROT_ID ||
1248 server->dialect == SMB302_PROT_ID) &&
1249 (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
1250 server->cipher_type = SMB2_ENCRYPTION_AES128_CCM;
1251
1252 security_blob = smb2_get_data_area_len(&blob_offset, &blob_length,
1253 (struct smb2_hdr *)rsp);
1254 /*
1255 * See MS-SMB2 section 2.2.4: if no blob, client picks default which
1256 * for us will be
1257 * ses->sectype = RawNTLMSSP;
1258 * but for time being this is our only auth choice so doesn't matter.
1259 * We just found a server which sets blob length to zero expecting raw.
1260 */
1261 if (blob_length == 0) {
1262 cifs_dbg(FYI, "missing security blob on negprot\n");
1263 server->sec_ntlmssp = true;
1264 }
1265
1266 rc = cifs_enable_signing(server, ses->sign);
1267 if (rc)
1268 goto neg_exit;
1269 if (blob_length) {
1270 rc = decode_negTokenInit(security_blob, blob_length, server);
1271 if (rc == 1)
1272 rc = 0;
1273 else if (rc == 0)
1274 rc = -EIO;
1275 }
1276
1277 if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
1278 if (rsp->NegotiateContextCount)
1279 rc = smb311_decode_neg_context(rsp, server,
1280 rsp_iov.iov_len);
1281 else
1282 cifs_server_dbg(VFS, "Missing expected negotiate contexts\n");
1283 }
1284
1285 if (server->cipher_type && !rc)
1286 rc = smb3_crypto_aead_allocate(server);
1287 neg_exit:
1288 free_rsp_buf(resp_buftype, rsp);
1289 return rc;
1290 }
1291
smb3_validate_negotiate(const unsigned int xid,struct cifs_tcon * tcon)1292 int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
1293 {
1294 int rc;
1295 struct validate_negotiate_info_req *pneg_inbuf;
1296 struct validate_negotiate_info_rsp *pneg_rsp = NULL;
1297 u32 rsplen;
1298 u32 inbuflen; /* max of 4 dialects */
1299 struct TCP_Server_Info *server = tcon->ses->server;
1300
1301 cifs_dbg(FYI, "validate negotiate\n");
1302
1303 /* In SMB3.11 preauth integrity supersedes validate negotiate */
1304 if (server->dialect == SMB311_PROT_ID)
1305 return 0;
1306
1307 /*
1308 * validation ioctl must be signed, so no point sending this if we
1309 * can not sign it (ie are not known user). Even if signing is not
1310 * required (enabled but not negotiated), in those cases we selectively
1311 * sign just this, the first and only signed request on a connection.
1312 * Having validation of negotiate info helps reduce attack vectors.
1313 */
1314 if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST)
1315 return 0; /* validation requires signing */
1316
1317 if (tcon->ses->user_name == NULL) {
1318 cifs_dbg(FYI, "Can't validate negotiate: null user mount\n");
1319 return 0; /* validation requires signing */
1320 }
1321
1322 if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_NULL)
1323 cifs_tcon_dbg(VFS, "Unexpected null user (anonymous) auth flag sent by server\n");
1324
1325 pneg_inbuf = kmalloc(sizeof(*pneg_inbuf), GFP_NOFS);
1326 if (!pneg_inbuf)
1327 return -ENOMEM;
1328
1329 pneg_inbuf->Capabilities =
1330 cpu_to_le32(server->vals->req_capabilities);
1331 if (tcon->ses->chan_max > 1)
1332 pneg_inbuf->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL);
1333
1334 memcpy(pneg_inbuf->Guid, server->client_guid,
1335 SMB2_CLIENT_GUID_SIZE);
1336
1337 if (tcon->ses->sign)
1338 pneg_inbuf->SecurityMode =
1339 cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
1340 else if (global_secflags & CIFSSEC_MAY_SIGN)
1341 pneg_inbuf->SecurityMode =
1342 cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
1343 else
1344 pneg_inbuf->SecurityMode = 0;
1345
1346
1347 if (strcmp(server->vals->version_string,
1348 SMB3ANY_VERSION_STRING) == 0) {
1349 pneg_inbuf->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
1350 pneg_inbuf->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
1351 pneg_inbuf->Dialects[2] = cpu_to_le16(SMB311_PROT_ID);
1352 pneg_inbuf->DialectCount = cpu_to_le16(3);
1353 /* SMB 2.1 not included so subtract one dialect from len */
1354 inbuflen = sizeof(*pneg_inbuf) -
1355 (sizeof(pneg_inbuf->Dialects[0]));
1356 } else if (strcmp(server->vals->version_string,
1357 SMBDEFAULT_VERSION_STRING) == 0) {
1358 pneg_inbuf->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
1359 pneg_inbuf->Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
1360 pneg_inbuf->Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
1361 pneg_inbuf->Dialects[3] = cpu_to_le16(SMB311_PROT_ID);
1362 pneg_inbuf->DialectCount = cpu_to_le16(4);
1363 /* structure is big enough for 4 dialects */
1364 inbuflen = sizeof(*pneg_inbuf);
1365 } else {
1366 /* otherwise specific dialect was requested */
1367 pneg_inbuf->Dialects[0] =
1368 cpu_to_le16(server->vals->protocol_id);
1369 pneg_inbuf->DialectCount = cpu_to_le16(1);
1370 /* structure is big enough for 4 dialects, sending only 1 */
1371 inbuflen = sizeof(*pneg_inbuf) -
1372 sizeof(pneg_inbuf->Dialects[0]) * 3;
1373 }
1374
1375 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
1376 FSCTL_VALIDATE_NEGOTIATE_INFO,
1377 (char *)pneg_inbuf, inbuflen, CIFSMaxBufSize,
1378 (char **)&pneg_rsp, &rsplen);
1379 if (rc == -EOPNOTSUPP) {
1380 /*
1381 * Old Windows versions or Netapp SMB server can return
1382 * not supported error. Client should accept it.
1383 */
1384 cifs_tcon_dbg(VFS, "Server does not support validate negotiate\n");
1385 rc = 0;
1386 goto out_free_inbuf;
1387 } else if (rc != 0) {
1388 cifs_tcon_dbg(VFS, "validate protocol negotiate failed: %d\n",
1389 rc);
1390 rc = -EIO;
1391 goto out_free_inbuf;
1392 }
1393
1394 rc = -EIO;
1395 if (rsplen != sizeof(*pneg_rsp)) {
1396 cifs_tcon_dbg(VFS, "Invalid protocol negotiate response size: %d\n",
1397 rsplen);
1398
1399 /* relax check since Mac returns max bufsize allowed on ioctl */
1400 if (rsplen > CIFSMaxBufSize || rsplen < sizeof(*pneg_rsp))
1401 goto out_free_rsp;
1402 }
1403
1404 /* check validate negotiate info response matches what we got earlier */
1405 if (pneg_rsp->Dialect != cpu_to_le16(server->dialect))
1406 goto vneg_out;
1407
1408 if (pneg_rsp->SecurityMode != cpu_to_le16(server->sec_mode))
1409 goto vneg_out;
1410
1411 /* do not validate server guid because not saved at negprot time yet */
1412
1413 if ((le32_to_cpu(pneg_rsp->Capabilities) | SMB2_NT_FIND |
1414 SMB2_LARGE_FILES) != server->capabilities)
1415 goto vneg_out;
1416
1417 /* validate negotiate successful */
1418 rc = 0;
1419 cifs_dbg(FYI, "validate negotiate info successful\n");
1420 goto out_free_rsp;
1421
1422 vneg_out:
1423 cifs_tcon_dbg(VFS, "protocol revalidation - security settings mismatch\n");
1424 out_free_rsp:
1425 kfree(pneg_rsp);
1426 out_free_inbuf:
1427 kfree(pneg_inbuf);
1428 return rc;
1429 }
1430
1431 enum securityEnum
smb2_select_sectype(struct TCP_Server_Info * server,enum securityEnum requested)1432 smb2_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested)
1433 {
1434 switch (requested) {
1435 case Kerberos:
1436 case RawNTLMSSP:
1437 return requested;
1438 case NTLMv2:
1439 return RawNTLMSSP;
1440 case Unspecified:
1441 if (server->sec_ntlmssp &&
1442 (global_secflags & CIFSSEC_MAY_NTLMSSP))
1443 return RawNTLMSSP;
1444 if ((server->sec_kerberos || server->sec_mskerberos || server->sec_iakerb) &&
1445 (global_secflags & CIFSSEC_MAY_KRB5))
1446 return Kerberos;
1447 fallthrough;
1448 default:
1449 return Unspecified;
1450 }
1451 }
1452
1453 struct SMB2_sess_data {
1454 unsigned int xid;
1455 struct cifs_ses *ses;
1456 struct TCP_Server_Info *server;
1457 struct nls_table *nls_cp;
1458 void (*func)(struct SMB2_sess_data *);
1459 int result;
1460 u64 previous_session;
1461
1462 /* we will send the SMB in three pieces:
1463 * a fixed length beginning part, an optional
1464 * SPNEGO blob (which can be zero length), and a
1465 * last part which will include the strings
1466 * and rest of bcc area. This allows us to avoid
1467 * a large buffer 17K allocation
1468 */
1469 int buf0_type;
1470 struct kvec iov[2];
1471 };
1472
1473 static int
SMB2_sess_alloc_buffer(struct SMB2_sess_data * sess_data)1474 SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
1475 {
1476 int rc;
1477 struct cifs_ses *ses = sess_data->ses;
1478 struct TCP_Server_Info *server = sess_data->server;
1479 struct smb2_sess_setup_req *req;
1480 unsigned int total_len;
1481 bool is_binding = false;
1482
1483 rc = smb2_plain_req_init(SMB2_SESSION_SETUP, NULL, server,
1484 (void **) &req,
1485 &total_len);
1486 if (rc)
1487 return rc;
1488
1489 spin_lock(&ses->ses_lock);
1490 is_binding = (ses->ses_status == SES_GOOD);
1491 spin_unlock(&ses->ses_lock);
1492
1493 if (is_binding) {
1494 req->hdr.SessionId = cpu_to_le64(ses->Suid);
1495 req->hdr.Flags |= SMB2_FLAGS_SIGNED;
1496 req->PreviousSessionId = 0;
1497 req->Flags = SMB2_SESSION_REQ_FLAG_BINDING;
1498 cifs_dbg(FYI, "Binding to sess id: %llx\n", ses->Suid);
1499 } else {
1500 /* First session, not a reauthenticate */
1501 req->hdr.SessionId = 0;
1502 /*
1503 * if reconnect, we need to send previous sess id
1504 * otherwise it is 0
1505 */
1506 req->PreviousSessionId = cpu_to_le64(sess_data->previous_session);
1507 req->Flags = 0; /* MBZ */
1508 cifs_dbg(FYI, "Fresh session. Previous: %llx\n",
1509 sess_data->previous_session);
1510 }
1511
1512 /* enough to enable echos and oplocks and one max size write */
1513 if (server->credits >= server->max_credits)
1514 req->hdr.CreditRequest = cpu_to_le16(0);
1515 else
1516 req->hdr.CreditRequest = cpu_to_le16(
1517 min_t(int, server->max_credits -
1518 server->credits, 130));
1519
1520 /* only one of SMB2 signing flags may be set in SMB2 request */
1521 if (server->sign)
1522 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_REQUIRED;
1523 else if (global_secflags & CIFSSEC_MAY_SIGN) /* one flag unlike MUST_ */
1524 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED;
1525 else
1526 req->SecurityMode = 0;
1527
1528 #ifdef CONFIG_CIFS_DFS_UPCALL
1529 req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS);
1530 #else
1531 req->Capabilities = 0;
1532 #endif /* DFS_UPCALL */
1533
1534 req->Channel = 0; /* MBZ */
1535
1536 sess_data->iov[0].iov_base = (char *)req;
1537 /* 1 for pad */
1538 sess_data->iov[0].iov_len = total_len - 1;
1539 /*
1540 * This variable will be used to clear the buffer
1541 * allocated above in case of any error in the calling function.
1542 */
1543 sess_data->buf0_type = CIFS_SMALL_BUFFER;
1544
1545 return 0;
1546 }
1547
1548 static void
SMB2_sess_free_buffer(struct SMB2_sess_data * sess_data)1549 SMB2_sess_free_buffer(struct SMB2_sess_data *sess_data)
1550 {
1551 struct kvec *iov = sess_data->iov;
1552
1553 /* iov[1] is already freed by caller */
1554 if (sess_data->buf0_type != CIFS_NO_BUFFER && iov[0].iov_base)
1555 memzero_explicit(iov[0].iov_base, iov[0].iov_len);
1556
1557 free_rsp_buf(sess_data->buf0_type, iov[0].iov_base);
1558 sess_data->buf0_type = CIFS_NO_BUFFER;
1559 }
1560
1561 static int
SMB2_sess_sendreceive(struct SMB2_sess_data * sess_data)1562 SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
1563 {
1564 int rc;
1565 struct smb_rqst rqst;
1566 struct smb2_sess_setup_req *req = sess_data->iov[0].iov_base;
1567 struct kvec rsp_iov = { NULL, 0 };
1568
1569 /* Testing shows that buffer offset must be at location of Buffer[0] */
1570 req->SecurityBufferOffset =
1571 cpu_to_le16(sizeof(struct smb2_sess_setup_req));
1572 req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len);
1573
1574 memset(&rqst, 0, sizeof(struct smb_rqst));
1575 rqst.rq_iov = sess_data->iov;
1576 rqst.rq_nvec = 2;
1577
1578 /* BB add code to build os and lm fields */
1579 rc = cifs_send_recv(sess_data->xid, sess_data->ses,
1580 sess_data->server,
1581 &rqst,
1582 &sess_data->buf0_type,
1583 CIFS_LOG_ERROR | CIFS_SESS_OP, &rsp_iov);
1584 cifs_small_buf_release(sess_data->iov[0].iov_base);
1585 if (rc == 0)
1586 sess_data->ses->expired_pwd = false;
1587 else if ((rc == -EACCES) || (rc == -EKEYEXPIRED) || (rc == -EKEYREVOKED)) {
1588 if (sess_data->ses->expired_pwd == false)
1589 trace_smb3_key_expired(sess_data->server->hostname,
1590 sess_data->ses->user_name,
1591 sess_data->server->conn_id,
1592 &sess_data->server->dstaddr, rc);
1593 sess_data->ses->expired_pwd = true;
1594 }
1595
1596 memcpy(&sess_data->iov[0], &rsp_iov, sizeof(struct kvec));
1597
1598 return rc;
1599 }
1600
1601 static int
SMB2_sess_establish_session(struct SMB2_sess_data * sess_data)1602 SMB2_sess_establish_session(struct SMB2_sess_data *sess_data)
1603 {
1604 int rc = 0;
1605 struct cifs_ses *ses = sess_data->ses;
1606 struct TCP_Server_Info *server = sess_data->server;
1607
1608 cifs_server_lock(server);
1609 if (server->ops->generate_signingkey) {
1610 rc = server->ops->generate_signingkey(ses, server);
1611 if (rc) {
1612 cifs_dbg(FYI,
1613 "SMB3 session key generation failed\n");
1614 cifs_server_unlock(server);
1615 return rc;
1616 }
1617 }
1618 if (!server->session_estab) {
1619 server->sequence_number = 0x2;
1620 server->session_estab = true;
1621 }
1622 cifs_server_unlock(server);
1623
1624 cifs_dbg(FYI, "SMB2/3 session established successfully\n");
1625 return rc;
1626 }
1627
1628 #ifdef CONFIG_CIFS_UPCALL
1629 static void
SMB2_auth_kerberos(struct SMB2_sess_data * sess_data)1630 SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
1631 {
1632 int rc;
1633 struct cifs_ses *ses = sess_data->ses;
1634 struct TCP_Server_Info *server = sess_data->server;
1635 struct cifs_spnego_msg *msg;
1636 struct key *spnego_key = NULL;
1637 struct smb2_sess_setup_rsp *rsp = NULL;
1638 bool is_binding = false;
1639
1640 rc = SMB2_sess_alloc_buffer(sess_data);
1641 if (rc)
1642 goto out;
1643
1644 spnego_key = cifs_get_spnego_key(ses, server);
1645 if (IS_ERR(spnego_key)) {
1646 rc = PTR_ERR(spnego_key);
1647 if (rc == -ENOKEY)
1648 cifs_dbg(VFS, "Verify user has a krb5 ticket and keyutils is installed\n");
1649 spnego_key = NULL;
1650 goto out;
1651 }
1652
1653 msg = spnego_key->payload.data[0];
1654 /*
1655 * check version field to make sure that cifs.upcall is
1656 * sending us a response in an expected form
1657 */
1658 if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
1659 cifs_dbg(VFS, "bad cifs.upcall version. Expected %d got %d\n",
1660 CIFS_SPNEGO_UPCALL_VERSION, msg->version);
1661 rc = -EKEYREJECTED;
1662 goto out_put_spnego_key;
1663 }
1664
1665 spin_lock(&ses->ses_lock);
1666 is_binding = (ses->ses_status == SES_GOOD);
1667 spin_unlock(&ses->ses_lock);
1668
1669 /* keep session key if binding */
1670 if (!is_binding) {
1671 kfree_sensitive(ses->auth_key.response);
1672 ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
1673 GFP_KERNEL);
1674 if (!ses->auth_key.response) {
1675 cifs_dbg(VFS, "Kerberos can't allocate (%u bytes) memory\n",
1676 msg->sesskey_len);
1677 rc = -ENOMEM;
1678 goto out_put_spnego_key;
1679 }
1680 ses->auth_key.len = msg->sesskey_len;
1681 }
1682
1683 sess_data->iov[1].iov_base = msg->data + msg->sesskey_len;
1684 sess_data->iov[1].iov_len = msg->secblob_len;
1685
1686 rc = SMB2_sess_sendreceive(sess_data);
1687 if (rc)
1688 goto out_put_spnego_key;
1689
1690 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
1691 /* keep session id and flags if binding */
1692 if (!is_binding) {
1693 ses->Suid = le64_to_cpu(rsp->hdr.SessionId);
1694 ses->session_flags = le16_to_cpu(rsp->SessionFlags);
1695 }
1696
1697 rc = SMB2_sess_establish_session(sess_data);
1698 out_put_spnego_key:
1699 key_invalidate(spnego_key);
1700 key_put(spnego_key);
1701 if (rc) {
1702 kfree_sensitive(ses->auth_key.response);
1703 ses->auth_key.response = NULL;
1704 ses->auth_key.len = 0;
1705 }
1706 out:
1707 sess_data->result = rc;
1708 sess_data->func = NULL;
1709 SMB2_sess_free_buffer(sess_data);
1710 }
1711 #else
1712 static void
SMB2_auth_kerberos(struct SMB2_sess_data * sess_data)1713 SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
1714 {
1715 cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n");
1716 sess_data->result = -EOPNOTSUPP;
1717 sess_data->func = NULL;
1718 }
1719 #endif
1720
1721 static void
1722 SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data);
1723
1724 static void
SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data * sess_data)1725 SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data)
1726 {
1727 int rc;
1728 struct cifs_ses *ses = sess_data->ses;
1729 struct TCP_Server_Info *server = sess_data->server;
1730 struct smb2_sess_setup_rsp *rsp = NULL;
1731 unsigned char *ntlmssp_blob = NULL;
1732 bool use_spnego = false; /* else use raw ntlmssp */
1733 u16 blob_length = 0;
1734 bool is_binding = false;
1735
1736 /*
1737 * If memory allocation is successful, caller of this function
1738 * frees it.
1739 */
1740 ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL);
1741 if (!ses->ntlmssp) {
1742 rc = -ENOMEM;
1743 goto out_err;
1744 }
1745 ses->ntlmssp->sesskey_per_smbsess = true;
1746
1747 rc = SMB2_sess_alloc_buffer(sess_data);
1748 if (rc)
1749 goto out_err;
1750
1751 rc = build_ntlmssp_smb3_negotiate_blob(&ntlmssp_blob,
1752 &blob_length, ses, server,
1753 sess_data->nls_cp);
1754 if (rc)
1755 goto out;
1756
1757 if (use_spnego) {
1758 /* BB eventually need to add this */
1759 cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
1760 rc = -EOPNOTSUPP;
1761 goto out;
1762 }
1763 sess_data->iov[1].iov_base = ntlmssp_blob;
1764 sess_data->iov[1].iov_len = blob_length;
1765
1766 rc = SMB2_sess_sendreceive(sess_data);
1767 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
1768
1769 /* If true, rc here is expected and not an error */
1770 if (sess_data->buf0_type != CIFS_NO_BUFFER &&
1771 rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED)
1772 rc = 0;
1773
1774 if (rc)
1775 goto out;
1776
1777 if (offsetof(struct smb2_sess_setup_rsp, Buffer) !=
1778 le16_to_cpu(rsp->SecurityBufferOffset)) {
1779 cifs_dbg(VFS, "Invalid security buffer offset %d\n",
1780 le16_to_cpu(rsp->SecurityBufferOffset));
1781 rc = -EIO;
1782 goto out;
1783 }
1784 rc = decode_ntlmssp_challenge(rsp->Buffer,
1785 le16_to_cpu(rsp->SecurityBufferLength), ses);
1786 if (rc)
1787 goto out;
1788
1789 cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n");
1790
1791 spin_lock(&ses->ses_lock);
1792 is_binding = (ses->ses_status == SES_GOOD);
1793 spin_unlock(&ses->ses_lock);
1794
1795 /* keep existing ses id and flags if binding */
1796 if (!is_binding) {
1797 ses->Suid = le64_to_cpu(rsp->hdr.SessionId);
1798 ses->session_flags = le16_to_cpu(rsp->SessionFlags);
1799 }
1800
1801 out:
1802 kfree_sensitive(ntlmssp_blob);
1803 SMB2_sess_free_buffer(sess_data);
1804 if (!rc) {
1805 sess_data->result = 0;
1806 sess_data->func = SMB2_sess_auth_rawntlmssp_authenticate;
1807 return;
1808 }
1809 out_err:
1810 kfree_sensitive(ses->ntlmssp);
1811 ses->ntlmssp = NULL;
1812 sess_data->result = rc;
1813 sess_data->func = NULL;
1814 }
1815
1816 static void
SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data * sess_data)1817 SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data)
1818 {
1819 int rc;
1820 struct cifs_ses *ses = sess_data->ses;
1821 struct TCP_Server_Info *server = sess_data->server;
1822 struct smb2_sess_setup_req *req;
1823 struct smb2_sess_setup_rsp *rsp = NULL;
1824 unsigned char *ntlmssp_blob = NULL;
1825 bool use_spnego = false; /* else use raw ntlmssp */
1826 u16 blob_length = 0;
1827 bool is_binding = false;
1828
1829 rc = SMB2_sess_alloc_buffer(sess_data);
1830 if (rc)
1831 goto out;
1832
1833 req = (struct smb2_sess_setup_req *) sess_data->iov[0].iov_base;
1834 req->hdr.SessionId = cpu_to_le64(ses->Suid);
1835
1836 rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length,
1837 ses, server,
1838 sess_data->nls_cp);
1839 if (rc) {
1840 cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n", rc);
1841 goto out;
1842 }
1843
1844 if (use_spnego) {
1845 /* BB eventually need to add this */
1846 cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
1847 rc = -EOPNOTSUPP;
1848 goto out;
1849 }
1850 sess_data->iov[1].iov_base = ntlmssp_blob;
1851 sess_data->iov[1].iov_len = blob_length;
1852
1853 rc = SMB2_sess_sendreceive(sess_data);
1854 if (rc)
1855 goto out;
1856
1857 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
1858
1859 spin_lock(&ses->ses_lock);
1860 is_binding = (ses->ses_status == SES_GOOD);
1861 spin_unlock(&ses->ses_lock);
1862
1863 /* keep existing ses id and flags if binding */
1864 if (!is_binding) {
1865 ses->Suid = le64_to_cpu(rsp->hdr.SessionId);
1866 ses->session_flags = le16_to_cpu(rsp->SessionFlags);
1867 }
1868
1869 rc = SMB2_sess_establish_session(sess_data);
1870 #ifdef CONFIG_CIFS_DEBUG_DUMP_KEYS
1871 if (ses->server->dialect < SMB30_PROT_ID) {
1872 cifs_dbg(VFS, "%s: dumping generated SMB2 session keys\n", __func__);
1873 /*
1874 * The session id is opaque in terms of endianness, so we can't
1875 * print it as a long long. we dump it as we got it on the wire
1876 */
1877 cifs_dbg(VFS, "Session Id %*ph\n", (int)sizeof(ses->Suid),
1878 &ses->Suid);
1879 cifs_dbg(VFS, "Session Key %*ph\n",
1880 SMB2_NTLMV2_SESSKEY_SIZE, ses->auth_key.response);
1881 cifs_dbg(VFS, "Signing Key %*ph\n",
1882 SMB3_SIGN_KEY_SIZE, ses->auth_key.response);
1883 }
1884 #endif
1885 out:
1886 kfree_sensitive(ntlmssp_blob);
1887 SMB2_sess_free_buffer(sess_data);
1888 kfree_sensitive(ses->ntlmssp);
1889 ses->ntlmssp = NULL;
1890 sess_data->result = rc;
1891 sess_data->func = NULL;
1892 }
1893
1894 static int
SMB2_select_sec(struct SMB2_sess_data * sess_data)1895 SMB2_select_sec(struct SMB2_sess_data *sess_data)
1896 {
1897 int type;
1898 struct cifs_ses *ses = sess_data->ses;
1899 struct TCP_Server_Info *server = sess_data->server;
1900
1901 type = smb2_select_sectype(server, ses->sectype);
1902 cifs_dbg(FYI, "sess setup type %d\n", type);
1903 if (type == Unspecified) {
1904 cifs_dbg(VFS, "Unable to select appropriate authentication method!\n");
1905 return -EINVAL;
1906 }
1907
1908 switch (type) {
1909 case Kerberos:
1910 sess_data->func = SMB2_auth_kerberos;
1911 break;
1912 case RawNTLMSSP:
1913 sess_data->func = SMB2_sess_auth_rawntlmssp_negotiate;
1914 break;
1915 default:
1916 cifs_dbg(VFS, "secType %d not supported!\n", type);
1917 return -EOPNOTSUPP;
1918 }
1919
1920 return 0;
1921 }
1922
1923 int
SMB2_sess_setup(const unsigned int xid,struct cifs_ses * ses,struct TCP_Server_Info * server,const struct nls_table * nls_cp)1924 SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
1925 struct TCP_Server_Info *server,
1926 const struct nls_table *nls_cp)
1927 {
1928 int rc = 0;
1929 struct SMB2_sess_data *sess_data;
1930
1931 cifs_dbg(FYI, "Session Setup\n");
1932
1933 if (!server) {
1934 WARN(1, "%s: server is NULL!\n", __func__);
1935 return -EIO;
1936 }
1937
1938 sess_data = kzalloc(sizeof(struct SMB2_sess_data), GFP_KERNEL);
1939 if (!sess_data)
1940 return -ENOMEM;
1941
1942 sess_data->xid = xid;
1943 sess_data->ses = ses;
1944 sess_data->server = server;
1945 sess_data->buf0_type = CIFS_NO_BUFFER;
1946 sess_data->nls_cp = (struct nls_table *) nls_cp;
1947 sess_data->previous_session = ses->Suid;
1948
1949 rc = SMB2_select_sec(sess_data);
1950 if (rc)
1951 goto out;
1952
1953 /*
1954 * Initialize the session hash with the server one.
1955 */
1956 memcpy(ses->preauth_sha_hash, server->preauth_sha_hash,
1957 SMB2_PREAUTH_HASH_SIZE);
1958
1959 while (sess_data->func)
1960 sess_data->func(sess_data);
1961
1962 if ((ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) && (ses->sign))
1963 cifs_server_dbg(VFS, "signing requested but authenticated as guest\n");
1964 rc = sess_data->result;
1965 out:
1966 kfree_sensitive(sess_data);
1967 return rc;
1968 }
1969
1970 int
SMB2_logoff(const unsigned int xid,struct cifs_ses * ses)1971 SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
1972 {
1973 struct smb_rqst rqst;
1974 struct smb2_logoff_req *req; /* response is also trivial struct */
1975 int rc = 0;
1976 struct TCP_Server_Info *server;
1977 int flags = 0;
1978 unsigned int total_len;
1979 struct kvec iov[1];
1980 struct kvec rsp_iov;
1981 int resp_buf_type;
1982
1983 cifs_dbg(FYI, "disconnect session %p\n", ses);
1984
1985 if (ses && (ses->server))
1986 server = ses->server;
1987 else
1988 return -EIO;
1989
1990 /* no need to send SMB logoff if uid already closed due to reconnect */
1991 spin_lock(&ses->chan_lock);
1992 if (CIFS_ALL_CHANS_NEED_RECONNECT(ses)) {
1993 spin_unlock(&ses->chan_lock);
1994 goto smb2_session_already_dead;
1995 }
1996 spin_unlock(&ses->chan_lock);
1997
1998 rc = smb2_plain_req_init(SMB2_LOGOFF, NULL, ses->server,
1999 (void **) &req, &total_len);
2000 if (rc)
2001 return rc;
2002
2003 /* since no tcon, smb2_init can not do this, so do here */
2004 req->hdr.SessionId = cpu_to_le64(ses->Suid);
2005
2006 if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
2007 flags |= CIFS_TRANSFORM_REQ;
2008 else if (server->sign)
2009 req->hdr.Flags |= SMB2_FLAGS_SIGNED;
2010
2011 flags |= CIFS_NO_RSP_BUF;
2012
2013 iov[0].iov_base = (char *)req;
2014 iov[0].iov_len = total_len;
2015
2016 memset(&rqst, 0, sizeof(struct smb_rqst));
2017 rqst.rq_iov = iov;
2018 rqst.rq_nvec = 1;
2019
2020 rc = cifs_send_recv(xid, ses, ses->server,
2021 &rqst, &resp_buf_type, flags, &rsp_iov);
2022 cifs_small_buf_release(req);
2023 /*
2024 * No tcon so can't do
2025 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
2026 */
2027
2028 smb2_session_already_dead:
2029 return rc;
2030 }
2031
cifs_stats_fail_inc(struct cifs_tcon * tcon,uint16_t code)2032 static inline void cifs_stats_fail_inc(struct cifs_tcon *tcon, uint16_t code)
2033 {
2034 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_failed[code]);
2035 }
2036
2037 #define MAX_SHARENAME_LENGTH (255 /* server */ + 80 /* share */ + 1 /* NULL */)
2038
2039 /* These are similar values to what Windows uses */
init_copy_chunk_defaults(struct cifs_tcon * tcon)2040 static inline void init_copy_chunk_defaults(struct cifs_tcon *tcon)
2041 {
2042 tcon->max_chunks = 256;
2043 tcon->max_bytes_chunk = 1048576;
2044 tcon->max_bytes_copy = 16777216;
2045 }
2046
2047 int
SMB2_tcon(const unsigned int xid,struct cifs_ses * ses,const char * tree,struct cifs_tcon * tcon,const struct nls_table * cp)2048 SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
2049 struct cifs_tcon *tcon, const struct nls_table *cp)
2050 {
2051 struct smb_rqst rqst;
2052 struct smb2_tree_connect_req *req;
2053 struct smb2_tree_connect_rsp *rsp = NULL;
2054 struct kvec iov[2];
2055 struct kvec rsp_iov = { NULL, 0 };
2056 int rc = 0;
2057 int resp_buftype;
2058 int unc_path_len;
2059 __le16 *unc_path = NULL;
2060 int flags = 0;
2061 unsigned int total_len;
2062 struct TCP_Server_Info *server = cifs_pick_channel(ses);
2063
2064 cifs_dbg(FYI, "TCON\n");
2065
2066 if (!server || !tree)
2067 return -EIO;
2068
2069 unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
2070 if (unc_path == NULL)
2071 return -ENOMEM;
2072
2073 unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp);
2074 if (unc_path_len <= 0) {
2075 kfree(unc_path);
2076 return -EINVAL;
2077 }
2078 unc_path_len *= 2;
2079
2080 /* SMB2 TREE_CONNECT request must be called with TreeId == 0 */
2081 tcon->tid = 0;
2082 atomic_set(&tcon->num_remote_opens, 0);
2083 rc = smb2_plain_req_init(SMB2_TREE_CONNECT, tcon, server,
2084 (void **) &req, &total_len);
2085 if (rc) {
2086 kfree(unc_path);
2087 return rc;
2088 }
2089
2090 if (smb3_encryption_required(tcon))
2091 flags |= CIFS_TRANSFORM_REQ;
2092
2093 iov[0].iov_base = (char *)req;
2094 /* 1 for pad */
2095 iov[0].iov_len = total_len - 1;
2096
2097 /* Testing shows that buffer offset must be at location of Buffer[0] */
2098 req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req));
2099 req->PathLength = cpu_to_le16(unc_path_len);
2100 iov[1].iov_base = unc_path;
2101 iov[1].iov_len = unc_path_len;
2102
2103 /*
2104 * 3.11 tcon req must be signed if not encrypted. See MS-SMB2 3.2.4.1.1
2105 * unless it is guest or anonymous user. See MS-SMB2 3.2.5.3.1
2106 * (Samba servers don't always set the flag so also check if null user)
2107 */
2108 if ((server->dialect == SMB311_PROT_ID) &&
2109 !smb3_encryption_required(tcon) &&
2110 !(ses->session_flags &
2111 (SMB2_SESSION_FLAG_IS_GUEST|SMB2_SESSION_FLAG_IS_NULL)) &&
2112 ((ses->user_name != NULL) || (ses->sectype == Kerberos)))
2113 req->hdr.Flags |= SMB2_FLAGS_SIGNED;
2114
2115 memset(&rqst, 0, sizeof(struct smb_rqst));
2116 rqst.rq_iov = iov;
2117 rqst.rq_nvec = 2;
2118
2119 /* Need 64 for max size write so ask for more in case not there yet */
2120 if (server->credits >= server->max_credits)
2121 req->hdr.CreditRequest = cpu_to_le16(0);
2122 else
2123 req->hdr.CreditRequest = cpu_to_le16(
2124 min_t(int, server->max_credits -
2125 server->credits, 64));
2126
2127 rc = cifs_send_recv(xid, ses, server,
2128 &rqst, &resp_buftype, flags, &rsp_iov);
2129 cifs_small_buf_release(req);
2130 rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base;
2131 trace_smb3_tcon(xid, tcon->tid, ses->Suid, tree, rc);
2132 if ((rc != 0) || (rsp == NULL)) {
2133 cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE);
2134 tcon->need_reconnect = true;
2135 goto tcon_error_exit;
2136 }
2137
2138 switch (rsp->ShareType) {
2139 case SMB2_SHARE_TYPE_DISK:
2140 cifs_dbg(FYI, "connection to disk share\n");
2141 break;
2142 case SMB2_SHARE_TYPE_PIPE:
2143 tcon->pipe = true;
2144 cifs_dbg(FYI, "connection to pipe share\n");
2145 break;
2146 case SMB2_SHARE_TYPE_PRINT:
2147 tcon->print = true;
2148 cifs_dbg(FYI, "connection to printer\n");
2149 break;
2150 default:
2151 cifs_server_dbg(VFS, "unknown share type %d\n", rsp->ShareType);
2152 rc = -EOPNOTSUPP;
2153 goto tcon_error_exit;
2154 }
2155
2156 tcon->share_flags = le32_to_cpu(rsp->ShareFlags);
2157 tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */
2158 tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess);
2159 tcon->tid = le32_to_cpu(rsp->hdr.Id.SyncId.TreeId);
2160 strscpy(tcon->tree_name, tree, sizeof(tcon->tree_name));
2161
2162 if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) &&
2163 ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
2164 cifs_tcon_dbg(VFS, "DFS capability contradicts DFS flag\n");
2165
2166 if (tcon->seal &&
2167 !(server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
2168 cifs_tcon_dbg(VFS, "Encryption is requested but not supported\n");
2169
2170 init_copy_chunk_defaults(tcon);
2171 if (server->ops->validate_negotiate)
2172 rc = server->ops->validate_negotiate(xid, tcon);
2173 if (rc == 0) /* See MS-SMB2 2.2.10 and 3.2.5.5 */
2174 if (tcon->share_flags & SMB2_SHAREFLAG_ISOLATED_TRANSPORT)
2175 server->nosharesock = true;
2176 tcon_exit:
2177
2178 free_rsp_buf(resp_buftype, rsp);
2179 kfree(unc_path);
2180 return rc;
2181
2182 tcon_error_exit:
2183 if (rsp && rsp->hdr.Status == STATUS_BAD_NETWORK_NAME)
2184 cifs_dbg(VFS | ONCE, "BAD_NETWORK_NAME: %s\n", tree);
2185 goto tcon_exit;
2186 }
2187
2188 int
SMB2_tdis(const unsigned int xid,struct cifs_tcon * tcon)2189 SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
2190 {
2191 struct smb_rqst rqst;
2192 struct smb2_tree_disconnect_req *req; /* response is trivial */
2193 int rc = 0;
2194 struct cifs_ses *ses = tcon->ses;
2195 struct TCP_Server_Info *server = cifs_pick_channel(ses);
2196 int flags = 0;
2197 unsigned int total_len;
2198 struct kvec iov[1];
2199 struct kvec rsp_iov;
2200 int resp_buf_type;
2201
2202 cifs_dbg(FYI, "Tree Disconnect\n");
2203
2204 if (!ses || !(ses->server))
2205 return -EIO;
2206
2207 trace_smb3_tdis_enter(xid, tcon->tid, ses->Suid, tcon->tree_name);
2208 spin_lock(&ses->chan_lock);
2209 if ((tcon->need_reconnect) ||
2210 (CIFS_ALL_CHANS_NEED_RECONNECT(tcon->ses))) {
2211 spin_unlock(&ses->chan_lock);
2212 return 0;
2213 }
2214 spin_unlock(&ses->chan_lock);
2215
2216 invalidate_all_cached_dirs(tcon);
2217
2218 rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, server,
2219 (void **) &req,
2220 &total_len);
2221 if (rc)
2222 return rc;
2223
2224 if (smb3_encryption_required(tcon))
2225 flags |= CIFS_TRANSFORM_REQ;
2226
2227 flags |= CIFS_NO_RSP_BUF;
2228
2229 iov[0].iov_base = (char *)req;
2230 iov[0].iov_len = total_len;
2231
2232 memset(&rqst, 0, sizeof(struct smb_rqst));
2233 rqst.rq_iov = iov;
2234 rqst.rq_nvec = 1;
2235
2236 rc = cifs_send_recv(xid, ses, server,
2237 &rqst, &resp_buf_type, flags, &rsp_iov);
2238 cifs_small_buf_release(req);
2239 if (rc) {
2240 cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
2241 trace_smb3_tdis_err(xid, tcon->tid, ses->Suid, rc);
2242 }
2243 trace_smb3_tdis_done(xid, tcon->tid, ses->Suid);
2244
2245 return rc;
2246 }
2247
2248
2249 static struct create_durable *
create_durable_buf(void)2250 create_durable_buf(void)
2251 {
2252 struct create_durable *buf;
2253
2254 buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL);
2255 if (!buf)
2256 return NULL;
2257
2258 buf->ccontext.DataOffset = cpu_to_le16(offsetof
2259 (struct create_durable, Data));
2260 buf->ccontext.DataLength = cpu_to_le32(16);
2261 buf->ccontext.NameOffset = cpu_to_le16(offsetof
2262 (struct create_durable, Name));
2263 buf->ccontext.NameLength = cpu_to_le16(4);
2264 /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DHnQ" */
2265 buf->Name[0] = 'D';
2266 buf->Name[1] = 'H';
2267 buf->Name[2] = 'n';
2268 buf->Name[3] = 'Q';
2269 return buf;
2270 }
2271
2272 static struct create_durable *
create_reconnect_durable_buf(struct cifs_fid * fid)2273 create_reconnect_durable_buf(struct cifs_fid *fid)
2274 {
2275 struct create_durable *buf;
2276
2277 buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL);
2278 if (!buf)
2279 return NULL;
2280
2281 buf->ccontext.DataOffset = cpu_to_le16(offsetof
2282 (struct create_durable, Data));
2283 buf->ccontext.DataLength = cpu_to_le32(16);
2284 buf->ccontext.NameOffset = cpu_to_le16(offsetof
2285 (struct create_durable, Name));
2286 buf->ccontext.NameLength = cpu_to_le16(4);
2287 buf->Data.Fid.PersistentFileId = fid->persistent_fid;
2288 buf->Data.Fid.VolatileFileId = fid->volatile_fid;
2289 /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT is "DHnC" */
2290 buf->Name[0] = 'D';
2291 buf->Name[1] = 'H';
2292 buf->Name[2] = 'n';
2293 buf->Name[3] = 'C';
2294 return buf;
2295 }
2296
2297 static void
parse_query_id_ctxt(struct create_context * cc,struct smb2_file_all_info * buf)2298 parse_query_id_ctxt(struct create_context *cc, struct smb2_file_all_info *buf)
2299 {
2300 struct create_disk_id_rsp *pdisk_id = (struct create_disk_id_rsp *)cc;
2301
2302 cifs_dbg(FYI, "parse query id context 0x%llx 0x%llx\n",
2303 pdisk_id->DiskFileId, pdisk_id->VolumeId);
2304 buf->IndexNumber = pdisk_id->DiskFileId;
2305 }
2306
2307 static void
parse_posix_ctxt(struct create_context * cc,struct smb2_file_all_info * info,struct create_posix_rsp * posix)2308 parse_posix_ctxt(struct create_context *cc, struct smb2_file_all_info *info,
2309 struct create_posix_rsp *posix)
2310 {
2311 int sid_len;
2312 u8 *beg = (u8 *)cc + le16_to_cpu(cc->DataOffset);
2313 u8 *end = beg + le32_to_cpu(cc->DataLength);
2314 u8 *sid;
2315
2316 memset(posix, 0, sizeof(*posix));
2317
2318 posix->nlink = le32_to_cpu(*(__le32 *)(beg + 0));
2319 posix->reparse_tag = le32_to_cpu(*(__le32 *)(beg + 4));
2320 posix->mode = le32_to_cpu(*(__le32 *)(beg + 8));
2321
2322 sid = beg + 12;
2323 sid_len = posix_info_sid_size(sid, end);
2324 if (sid_len < 0) {
2325 cifs_dbg(VFS, "bad owner sid in posix create response\n");
2326 return;
2327 }
2328 memcpy(&posix->owner, sid, sid_len);
2329
2330 sid = sid + sid_len;
2331 sid_len = posix_info_sid_size(sid, end);
2332 if (sid_len < 0) {
2333 cifs_dbg(VFS, "bad group sid in posix create response\n");
2334 return;
2335 }
2336 memcpy(&posix->group, sid, sid_len);
2337
2338 cifs_dbg(FYI, "nlink=%d mode=%o reparse_tag=%x\n",
2339 posix->nlink, posix->mode, posix->reparse_tag);
2340 }
2341
smb2_parse_contexts(struct TCP_Server_Info * server,struct kvec * rsp_iov,__u16 * epoch,char * lease_key,__u8 * oplock,struct smb2_file_all_info * buf,struct create_posix_rsp * posix)2342 int smb2_parse_contexts(struct TCP_Server_Info *server,
2343 struct kvec *rsp_iov,
2344 __u16 *epoch,
2345 char *lease_key, __u8 *oplock,
2346 struct smb2_file_all_info *buf,
2347 struct create_posix_rsp *posix)
2348 {
2349 struct smb2_create_rsp *rsp = rsp_iov->iov_base;
2350 struct create_context *cc;
2351 size_t rem, off, len;
2352 size_t doff, dlen;
2353 size_t noff, nlen;
2354 char *name;
2355 static const char smb3_create_tag_posix[] = {
2356 0x93, 0xAD, 0x25, 0x50, 0x9C,
2357 0xB4, 0x11, 0xE7, 0xB4, 0x23, 0x83,
2358 0xDE, 0x96, 0x8B, 0xCD, 0x7C
2359 };
2360
2361 *oplock = 0;
2362
2363 off = le32_to_cpu(rsp->CreateContextsOffset);
2364 rem = le32_to_cpu(rsp->CreateContextsLength);
2365 if (check_add_overflow(off, rem, &len) || len > rsp_iov->iov_len)
2366 return -EINVAL;
2367 cc = (struct create_context *)((u8 *)rsp + off);
2368
2369 /* Initialize inode number to 0 in case no valid data in qfid context */
2370 if (buf)
2371 buf->IndexNumber = 0;
2372
2373 while (rem >= sizeof(*cc)) {
2374 doff = le16_to_cpu(cc->DataOffset);
2375 dlen = le32_to_cpu(cc->DataLength);
2376 if (check_add_overflow(doff, dlen, &len) || len > rem)
2377 return -EINVAL;
2378
2379 noff = le16_to_cpu(cc->NameOffset);
2380 nlen = le16_to_cpu(cc->NameLength);
2381 if (noff + nlen > doff)
2382 return -EINVAL;
2383
2384 name = (char *)cc + noff;
2385 switch (nlen) {
2386 case 4:
2387 if (!strncmp(name, SMB2_CREATE_REQUEST_LEASE, 4)) {
2388 *oplock = server->ops->parse_lease_buf(cc, epoch,
2389 lease_key);
2390 } else if (buf &&
2391 !strncmp(name, SMB2_CREATE_QUERY_ON_DISK_ID, 4)) {
2392 parse_query_id_ctxt(cc, buf);
2393 }
2394 break;
2395 case 16:
2396 if (posix && !memcmp(name, smb3_create_tag_posix, 16))
2397 parse_posix_ctxt(cc, buf, posix);
2398 break;
2399 default:
2400 cifs_dbg(FYI, "%s: unhandled context (nlen=%zu dlen=%zu)\n",
2401 __func__, nlen, dlen);
2402 if (IS_ENABLED(CONFIG_CIFS_DEBUG2))
2403 cifs_dump_mem("context data: ", cc, dlen);
2404 break;
2405 }
2406
2407 off = le32_to_cpu(cc->Next);
2408 if (!off)
2409 break;
2410 if (check_sub_overflow(rem, off, &rem))
2411 return -EINVAL;
2412 cc = (struct create_context *)((u8 *)cc + off);
2413 }
2414
2415 if (rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE)
2416 *oplock = rsp->OplockLevel;
2417
2418 return 0;
2419 }
2420
2421 static int
add_lease_context(struct TCP_Server_Info * server,struct smb2_create_req * req,struct kvec * iov,unsigned int * num_iovec,u8 * lease_key,__u8 * oplock)2422 add_lease_context(struct TCP_Server_Info *server,
2423 struct smb2_create_req *req,
2424 struct kvec *iov,
2425 unsigned int *num_iovec, u8 *lease_key, __u8 *oplock)
2426 {
2427 unsigned int num = *num_iovec;
2428
2429 iov[num].iov_base = server->ops->create_lease_buf(lease_key, *oplock);
2430 if (iov[num].iov_base == NULL)
2431 return -ENOMEM;
2432 iov[num].iov_len = server->vals->create_lease_size;
2433 req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE;
2434 *num_iovec = num + 1;
2435 return 0;
2436 }
2437
2438 static struct create_durable_v2 *
create_durable_v2_buf(struct cifs_open_parms * oparms)2439 create_durable_v2_buf(struct cifs_open_parms *oparms)
2440 {
2441 struct cifs_fid *pfid = oparms->fid;
2442 struct create_durable_v2 *buf;
2443
2444 buf = kzalloc(sizeof(struct create_durable_v2), GFP_KERNEL);
2445 if (!buf)
2446 return NULL;
2447
2448 buf->ccontext.DataOffset = cpu_to_le16(offsetof
2449 (struct create_durable_v2, dcontext));
2450 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct durable_context_v2));
2451 buf->ccontext.NameOffset = cpu_to_le16(offsetof
2452 (struct create_durable_v2, Name));
2453 buf->ccontext.NameLength = cpu_to_le16(4);
2454
2455 /*
2456 * NB: Handle timeout defaults to 0, which allows server to choose
2457 * (most servers default to 120 seconds) and most clients default to 0.
2458 * This can be overridden at mount ("handletimeout=") if the user wants
2459 * a different persistent (or resilient) handle timeout for all opens
2460 * on a particular SMB3 mount.
2461 */
2462 buf->dcontext.Timeout = cpu_to_le32(oparms->tcon->handle_timeout);
2463 buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
2464
2465 /* for replay, we should not overwrite the existing create guid */
2466 if (!oparms->replay) {
2467 generate_random_uuid(buf->dcontext.CreateGuid);
2468 memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16);
2469 } else
2470 memcpy(buf->dcontext.CreateGuid, pfid->create_guid, 16);
2471
2472 /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DH2Q" */
2473 buf->Name[0] = 'D';
2474 buf->Name[1] = 'H';
2475 buf->Name[2] = '2';
2476 buf->Name[3] = 'Q';
2477 return buf;
2478 }
2479
2480 static struct create_durable_handle_reconnect_v2 *
create_reconnect_durable_v2_buf(struct cifs_fid * fid)2481 create_reconnect_durable_v2_buf(struct cifs_fid *fid)
2482 {
2483 struct create_durable_handle_reconnect_v2 *buf;
2484
2485 buf = kzalloc(sizeof(struct create_durable_handle_reconnect_v2),
2486 GFP_KERNEL);
2487 if (!buf)
2488 return NULL;
2489
2490 buf->ccontext.DataOffset =
2491 cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2,
2492 dcontext));
2493 buf->ccontext.DataLength =
2494 cpu_to_le32(sizeof(struct durable_reconnect_context_v2));
2495 buf->ccontext.NameOffset =
2496 cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2,
2497 Name));
2498 buf->ccontext.NameLength = cpu_to_le16(4);
2499
2500 buf->dcontext.Fid.PersistentFileId = fid->persistent_fid;
2501 buf->dcontext.Fid.VolatileFileId = fid->volatile_fid;
2502 buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
2503 memcpy(buf->dcontext.CreateGuid, fid->create_guid, 16);
2504
2505 /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT_V2 is "DH2C" */
2506 buf->Name[0] = 'D';
2507 buf->Name[1] = 'H';
2508 buf->Name[2] = '2';
2509 buf->Name[3] = 'C';
2510 return buf;
2511 }
2512
2513 static int
add_durable_v2_context(struct kvec * iov,unsigned int * num_iovec,struct cifs_open_parms * oparms)2514 add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec,
2515 struct cifs_open_parms *oparms)
2516 {
2517 unsigned int num = *num_iovec;
2518
2519 iov[num].iov_base = create_durable_v2_buf(oparms);
2520 if (iov[num].iov_base == NULL)
2521 return -ENOMEM;
2522 iov[num].iov_len = sizeof(struct create_durable_v2);
2523 *num_iovec = num + 1;
2524 return 0;
2525 }
2526
2527 static int
add_durable_reconnect_v2_context(struct kvec * iov,unsigned int * num_iovec,struct cifs_open_parms * oparms)2528 add_durable_reconnect_v2_context(struct kvec *iov, unsigned int *num_iovec,
2529 struct cifs_open_parms *oparms)
2530 {
2531 unsigned int num = *num_iovec;
2532
2533 /* indicate that we don't need to relock the file */
2534 oparms->reconnect = false;
2535
2536 iov[num].iov_base = create_reconnect_durable_v2_buf(oparms->fid);
2537 if (iov[num].iov_base == NULL)
2538 return -ENOMEM;
2539 iov[num].iov_len = sizeof(struct create_durable_handle_reconnect_v2);
2540 *num_iovec = num + 1;
2541 return 0;
2542 }
2543
2544 static int
add_durable_context(struct kvec * iov,unsigned int * num_iovec,struct cifs_open_parms * oparms,bool use_persistent)2545 add_durable_context(struct kvec *iov, unsigned int *num_iovec,
2546 struct cifs_open_parms *oparms, bool use_persistent)
2547 {
2548 unsigned int num = *num_iovec;
2549
2550 if (use_persistent) {
2551 if (oparms->reconnect)
2552 return add_durable_reconnect_v2_context(iov, num_iovec,
2553 oparms);
2554 else
2555 return add_durable_v2_context(iov, num_iovec, oparms);
2556 }
2557
2558 if (oparms->reconnect) {
2559 iov[num].iov_base = create_reconnect_durable_buf(oparms->fid);
2560 /* indicate that we don't need to relock the file */
2561 oparms->reconnect = false;
2562 } else
2563 iov[num].iov_base = create_durable_buf();
2564 if (iov[num].iov_base == NULL)
2565 return -ENOMEM;
2566 iov[num].iov_len = sizeof(struct create_durable);
2567 *num_iovec = num + 1;
2568 return 0;
2569 }
2570
2571 /* See MS-SMB2 2.2.13.2.7 */
2572 static struct crt_twarp_ctxt *
create_twarp_buf(__u64 timewarp)2573 create_twarp_buf(__u64 timewarp)
2574 {
2575 struct crt_twarp_ctxt *buf;
2576
2577 buf = kzalloc(sizeof(struct crt_twarp_ctxt), GFP_KERNEL);
2578 if (!buf)
2579 return NULL;
2580
2581 buf->ccontext.DataOffset = cpu_to_le16(offsetof
2582 (struct crt_twarp_ctxt, Timestamp));
2583 buf->ccontext.DataLength = cpu_to_le32(8);
2584 buf->ccontext.NameOffset = cpu_to_le16(offsetof
2585 (struct crt_twarp_ctxt, Name));
2586 buf->ccontext.NameLength = cpu_to_le16(4);
2587 /* SMB2_CREATE_TIMEWARP_TOKEN is "TWrp" */
2588 buf->Name[0] = 'T';
2589 buf->Name[1] = 'W';
2590 buf->Name[2] = 'r';
2591 buf->Name[3] = 'p';
2592 buf->Timestamp = cpu_to_le64(timewarp);
2593 return buf;
2594 }
2595
2596 /* See MS-SMB2 2.2.13.2.7 */
2597 static int
add_twarp_context(struct kvec * iov,unsigned int * num_iovec,__u64 timewarp)2598 add_twarp_context(struct kvec *iov, unsigned int *num_iovec, __u64 timewarp)
2599 {
2600 unsigned int num = *num_iovec;
2601
2602 iov[num].iov_base = create_twarp_buf(timewarp);
2603 if (iov[num].iov_base == NULL)
2604 return -ENOMEM;
2605 iov[num].iov_len = sizeof(struct crt_twarp_ctxt);
2606 *num_iovec = num + 1;
2607 return 0;
2608 }
2609
2610 /* See http://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx */
setup_owner_group_sids(char * buf)2611 static void setup_owner_group_sids(char *buf)
2612 {
2613 struct owner_group_sids *sids = (struct owner_group_sids *)buf;
2614
2615 /* Populate the user ownership fields S-1-5-88-1 */
2616 sids->owner.Revision = 1;
2617 sids->owner.NumAuth = 3;
2618 sids->owner.Authority[5] = 5;
2619 sids->owner.SubAuthorities[0] = cpu_to_le32(88);
2620 sids->owner.SubAuthorities[1] = cpu_to_le32(1);
2621 sids->owner.SubAuthorities[2] = cpu_to_le32(current_fsuid().val);
2622
2623 /* Populate the group ownership fields S-1-5-88-2 */
2624 sids->group.Revision = 1;
2625 sids->group.NumAuth = 3;
2626 sids->group.Authority[5] = 5;
2627 sids->group.SubAuthorities[0] = cpu_to_le32(88);
2628 sids->group.SubAuthorities[1] = cpu_to_le32(2);
2629 sids->group.SubAuthorities[2] = cpu_to_le32(current_fsgid().val);
2630
2631 cifs_dbg(FYI, "owner S-1-5-88-1-%d, group S-1-5-88-2-%d\n", current_fsuid().val, current_fsgid().val);
2632 }
2633
2634 /* See MS-SMB2 2.2.13.2.2 and MS-DTYP 2.4.6 */
2635 static struct crt_sd_ctxt *
create_sd_buf(umode_t mode,bool set_owner,unsigned int * len)2636 create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
2637 {
2638 struct crt_sd_ctxt *buf;
2639 __u8 *ptr, *aclptr;
2640 unsigned int acelen, acl_size, ace_count;
2641 unsigned int owner_offset = 0;
2642 unsigned int group_offset = 0;
2643 struct smb3_acl acl = {};
2644
2645 *len = round_up(sizeof(struct crt_sd_ctxt) + (sizeof(struct smb_ace) * 4), 8);
2646
2647 if (set_owner) {
2648 /* sizeof(struct owner_group_sids) is already multiple of 8 so no need to round */
2649 *len += sizeof(struct owner_group_sids);
2650 }
2651
2652 buf = kzalloc(*len, GFP_KERNEL);
2653 if (buf == NULL)
2654 return buf;
2655
2656 ptr = (__u8 *)&buf[1];
2657 if (set_owner) {
2658 /* offset fields are from beginning of security descriptor not of create context */
2659 owner_offset = ptr - (__u8 *)&buf->sd;
2660 buf->sd.OffsetOwner = cpu_to_le32(owner_offset);
2661 group_offset = owner_offset + offsetof(struct owner_group_sids, group);
2662 buf->sd.OffsetGroup = cpu_to_le32(group_offset);
2663
2664 setup_owner_group_sids(ptr);
2665 ptr += sizeof(struct owner_group_sids);
2666 } else {
2667 buf->sd.OffsetOwner = 0;
2668 buf->sd.OffsetGroup = 0;
2669 }
2670
2671 buf->ccontext.DataOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, sd));
2672 buf->ccontext.NameOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, Name));
2673 buf->ccontext.NameLength = cpu_to_le16(4);
2674 /* SMB2_CREATE_SD_BUFFER_TOKEN is "SecD" */
2675 buf->Name[0] = 'S';
2676 buf->Name[1] = 'e';
2677 buf->Name[2] = 'c';
2678 buf->Name[3] = 'D';
2679 buf->sd.Revision = 1; /* Must be one see MS-DTYP 2.4.6 */
2680
2681 /*
2682 * ACL is "self relative" ie ACL is stored in contiguous block of memory
2683 * and "DP" ie the DACL is present
2684 */
2685 buf->sd.Control = cpu_to_le16(ACL_CONTROL_SR | ACL_CONTROL_DP);
2686
2687 /* offset owner, group and Sbz1 and SACL are all zero */
2688 buf->sd.OffsetDacl = cpu_to_le32(ptr - (__u8 *)&buf->sd);
2689 /* Ship the ACL for now. we will copy it into buf later. */
2690 aclptr = ptr;
2691 ptr += sizeof(struct smb3_acl);
2692
2693 /* create one ACE to hold the mode embedded in reserved special SID */
2694 acelen = setup_special_mode_ACE((struct smb_ace *)ptr, false, (__u64)mode);
2695 ptr += acelen;
2696 acl_size = acelen + sizeof(struct smb3_acl);
2697 ace_count = 1;
2698
2699 if (set_owner) {
2700 /* we do not need to reallocate buffer to add the two more ACEs. plenty of space */
2701 acelen = setup_special_user_owner_ACE((struct smb_ace *)ptr);
2702 ptr += acelen;
2703 acl_size += acelen;
2704 ace_count += 1;
2705 }
2706
2707 /* and one more ACE to allow access for authenticated users */
2708 acelen = setup_authusers_ACE((struct smb_ace *)ptr);
2709 ptr += acelen;
2710 acl_size += acelen;
2711 ace_count += 1;
2712
2713 acl.AclRevision = ACL_REVISION; /* See 2.4.4.1 of MS-DTYP */
2714 acl.AclSize = cpu_to_le16(acl_size);
2715 acl.AceCount = cpu_to_le16(ace_count);
2716 /* acl.Sbz1 and Sbz2 MBZ so are not set here, but initialized above */
2717 memcpy(aclptr, &acl, sizeof(struct smb3_acl));
2718
2719 buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd);
2720 *len = round_up((unsigned int)(ptr - (__u8 *)buf), 8);
2721
2722 return buf;
2723 }
2724
2725 static int
add_sd_context(struct kvec * iov,unsigned int * num_iovec,umode_t mode,bool set_owner)2726 add_sd_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode, bool set_owner)
2727 {
2728 unsigned int num = *num_iovec;
2729 unsigned int len = 0;
2730
2731 iov[num].iov_base = create_sd_buf(mode, set_owner, &len);
2732 if (iov[num].iov_base == NULL)
2733 return -ENOMEM;
2734 iov[num].iov_len = len;
2735 *num_iovec = num + 1;
2736 return 0;
2737 }
2738
2739 static struct crt_query_id_ctxt *
create_query_id_buf(void)2740 create_query_id_buf(void)
2741 {
2742 struct crt_query_id_ctxt *buf;
2743
2744 buf = kzalloc(sizeof(struct crt_query_id_ctxt), GFP_KERNEL);
2745 if (!buf)
2746 return NULL;
2747
2748 buf->ccontext.DataOffset = cpu_to_le16(0);
2749 buf->ccontext.DataLength = cpu_to_le32(0);
2750 buf->ccontext.NameOffset = cpu_to_le16(offsetof
2751 (struct crt_query_id_ctxt, Name));
2752 buf->ccontext.NameLength = cpu_to_le16(4);
2753 /* SMB2_CREATE_QUERY_ON_DISK_ID is "QFid" */
2754 buf->Name[0] = 'Q';
2755 buf->Name[1] = 'F';
2756 buf->Name[2] = 'i';
2757 buf->Name[3] = 'd';
2758 return buf;
2759 }
2760
2761 /* See MS-SMB2 2.2.13.2.9 */
2762 static int
add_query_id_context(struct kvec * iov,unsigned int * num_iovec)2763 add_query_id_context(struct kvec *iov, unsigned int *num_iovec)
2764 {
2765 unsigned int num = *num_iovec;
2766
2767 iov[num].iov_base = create_query_id_buf();
2768 if (iov[num].iov_base == NULL)
2769 return -ENOMEM;
2770 iov[num].iov_len = sizeof(struct crt_query_id_ctxt);
2771 *num_iovec = num + 1;
2772 return 0;
2773 }
2774
add_ea_context(struct cifs_open_parms * oparms,struct kvec * rq_iov,unsigned int * num_iovs)2775 static void add_ea_context(struct cifs_open_parms *oparms,
2776 struct kvec *rq_iov, unsigned int *num_iovs)
2777 {
2778 struct kvec *iov = oparms->ea_cctx;
2779
2780 if (iov && iov->iov_base && iov->iov_len) {
2781 rq_iov[(*num_iovs)++] = *iov;
2782 memset(iov, 0, sizeof(*iov));
2783 }
2784 }
2785
2786 static int
alloc_path_with_tree_prefix(__le16 ** out_path,int * out_size,int * out_len,const char * treename,const __le16 * path)2787 alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len,
2788 const char *treename, const __le16 *path)
2789 {
2790 int treename_len, path_len;
2791 struct nls_table *cp;
2792 const __le16 sep[] = {cpu_to_le16('\\'), cpu_to_le16(0x0000)};
2793
2794 /*
2795 * skip leading "\\"
2796 */
2797 treename_len = strlen(treename);
2798 if (treename_len < 2 || !(treename[0] == '\\' && treename[1] == '\\'))
2799 return -EINVAL;
2800
2801 treename += 2;
2802 treename_len -= 2;
2803
2804 path_len = UniStrnlen((wchar_t *)path, PATH_MAX);
2805
2806 /* make room for one path separator only if @path isn't empty */
2807 *out_len = treename_len + (path[0] ? 1 : 0) + path_len;
2808
2809 /*
2810 * final path needs to be 8-byte aligned as specified in
2811 * MS-SMB2 2.2.13 SMB2 CREATE Request.
2812 */
2813 *out_size = round_up(*out_len * sizeof(__le16), 8);
2814 *out_path = kzalloc(*out_size + sizeof(__le16) /* null */, GFP_KERNEL);
2815 if (!*out_path)
2816 return -ENOMEM;
2817
2818 cp = load_nls_default();
2819 cifs_strtoUTF16(*out_path, treename, treename_len, cp);
2820
2821 /* Do not append the separator if the path is empty */
2822 if (path[0] != cpu_to_le16(0x0000)) {
2823 UniStrcat((wchar_t *)*out_path, (wchar_t *)sep);
2824 UniStrcat((wchar_t *)*out_path, (wchar_t *)path);
2825 }
2826
2827 unload_nls(cp);
2828
2829 return 0;
2830 }
2831
smb311_posix_mkdir(const unsigned int xid,struct inode * inode,umode_t mode,struct cifs_tcon * tcon,const char * full_path,struct cifs_sb_info * cifs_sb)2832 int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
2833 umode_t mode, struct cifs_tcon *tcon,
2834 const char *full_path,
2835 struct cifs_sb_info *cifs_sb)
2836 {
2837 struct smb_rqst rqst;
2838 struct smb2_create_req *req;
2839 struct smb2_create_rsp *rsp = NULL;
2840 struct cifs_ses *ses = tcon->ses;
2841 struct kvec iov[3]; /* make sure at least one for each open context */
2842 struct kvec rsp_iov = {NULL, 0};
2843 int resp_buftype;
2844 int uni_path_len;
2845 __le16 *copy_path = NULL;
2846 int copy_size;
2847 int rc = 0;
2848 unsigned int n_iov = 2;
2849 __u32 file_attributes = 0;
2850 char *pc_buf = NULL;
2851 int flags = 0;
2852 unsigned int total_len;
2853 __le16 *utf16_path = NULL;
2854 struct TCP_Server_Info *server;
2855 int retries = 0, cur_sleep = 1;
2856
2857 replay_again:
2858 /* reinitialize for possible replay */
2859 flags = 0;
2860 n_iov = 2;
2861 server = cifs_pick_channel(ses);
2862
2863 cifs_dbg(FYI, "mkdir\n");
2864
2865 /* resource #1: path allocation */
2866 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
2867 if (!utf16_path)
2868 return -ENOMEM;
2869
2870 if (!ses || !server) {
2871 rc = -EIO;
2872 goto err_free_path;
2873 }
2874
2875 /* resource #2: request */
2876 rc = smb2_plain_req_init(SMB2_CREATE, tcon, server,
2877 (void **) &req, &total_len);
2878 if (rc)
2879 goto err_free_path;
2880
2881
2882 if (smb3_encryption_required(tcon))
2883 flags |= CIFS_TRANSFORM_REQ;
2884
2885 req->ImpersonationLevel = IL_IMPERSONATION;
2886 req->DesiredAccess = cpu_to_le32(FILE_WRITE_ATTRIBUTES);
2887 /* File attributes ignored on open (used in create though) */
2888 req->FileAttributes = cpu_to_le32(file_attributes);
2889 req->ShareAccess = FILE_SHARE_ALL_LE;
2890 req->CreateDisposition = cpu_to_le32(FILE_CREATE);
2891 req->CreateOptions = cpu_to_le32(CREATE_NOT_FILE);
2892
2893 iov[0].iov_base = (char *)req;
2894 /* -1 since last byte is buf[0] which is sent below (path) */
2895 iov[0].iov_len = total_len - 1;
2896
2897 req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
2898
2899 /* [MS-SMB2] 2.2.13 NameOffset:
2900 * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of
2901 * the SMB2 header, the file name includes a prefix that will
2902 * be processed during DFS name normalization as specified in
2903 * section 3.3.5.9. Otherwise, the file name is relative to
2904 * the share that is identified by the TreeId in the SMB2
2905 * header.
2906 */
2907 if (tcon->share_flags & SHI1005_FLAGS_DFS) {
2908 int name_len;
2909
2910 req->hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
2911 rc = alloc_path_with_tree_prefix(©_path, ©_size,
2912 &name_len,
2913 tcon->tree_name, utf16_path);
2914 if (rc)
2915 goto err_free_req;
2916
2917 req->NameLength = cpu_to_le16(name_len * 2);
2918 uni_path_len = copy_size;
2919 /* free before overwriting resource */
2920 kfree(utf16_path);
2921 utf16_path = copy_path;
2922 } else {
2923 uni_path_len = (2 * UniStrnlen((wchar_t *)utf16_path, PATH_MAX)) + 2;
2924 /* MUST set path len (NameLength) to 0 opening root of share */
2925 req->NameLength = cpu_to_le16(uni_path_len - 2);
2926 if (uni_path_len % 8 != 0) {
2927 copy_size = roundup(uni_path_len, 8);
2928 copy_path = kzalloc(copy_size, GFP_KERNEL);
2929 if (!copy_path) {
2930 rc = -ENOMEM;
2931 goto err_free_req;
2932 }
2933 memcpy((char *)copy_path, (const char *)utf16_path,
2934 uni_path_len);
2935 uni_path_len = copy_size;
2936 /* free before overwriting resource */
2937 kfree(utf16_path);
2938 utf16_path = copy_path;
2939 }
2940 }
2941
2942 iov[1].iov_len = uni_path_len;
2943 iov[1].iov_base = utf16_path;
2944 req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_NONE;
2945
2946 if (tcon->posix_extensions) {
2947 /* resource #3: posix buf */
2948 rc = add_posix_context(iov, &n_iov, mode);
2949 if (rc)
2950 goto err_free_req;
2951 req->CreateContextsOffset = cpu_to_le32(
2952 sizeof(struct smb2_create_req) +
2953 iov[1].iov_len);
2954 le32_add_cpu(&req->CreateContextsLength, iov[n_iov-1].iov_len);
2955 pc_buf = iov[n_iov-1].iov_base;
2956 }
2957
2958
2959 memset(&rqst, 0, sizeof(struct smb_rqst));
2960 rqst.rq_iov = iov;
2961 rqst.rq_nvec = n_iov;
2962
2963 /* no need to inc num_remote_opens because we close it just below */
2964 trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, full_path, CREATE_NOT_FILE,
2965 FILE_WRITE_ATTRIBUTES);
2966
2967 if (retries)
2968 smb2_set_replay(server, &rqst);
2969
2970 /* resource #4: response buffer */
2971 rc = cifs_send_recv(xid, ses, server,
2972 &rqst, &resp_buftype, flags, &rsp_iov);
2973 if (rc) {
2974 cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
2975 trace_smb3_posix_mkdir_err(xid, tcon->tid, ses->Suid,
2976 CREATE_NOT_FILE,
2977 FILE_WRITE_ATTRIBUTES, rc);
2978 goto err_free_rsp_buf;
2979 }
2980
2981 /*
2982 * Although unlikely to be possible for rsp to be null and rc not set,
2983 * adding check below is slightly safer long term (and quiets Coverity
2984 * warning)
2985 */
2986 rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
2987 if (rsp == NULL) {
2988 rc = -EIO;
2989 kfree(pc_buf);
2990 goto err_free_req;
2991 }
2992
2993 trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid, ses->Suid,
2994 CREATE_NOT_FILE, FILE_WRITE_ATTRIBUTES);
2995
2996 SMB2_close(xid, tcon, rsp->PersistentFileId, rsp->VolatileFileId);
2997
2998 /* Eventually save off posix specific response info and timestamps */
2999
3000 err_free_rsp_buf:
3001 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
3002 kfree(pc_buf);
3003 err_free_req:
3004 cifs_small_buf_release(req);
3005 err_free_path:
3006 kfree(utf16_path);
3007
3008 if (is_replayable_error(rc) &&
3009 smb2_should_replay(tcon, &retries, &cur_sleep))
3010 goto replay_again;
3011
3012 return rc;
3013 }
3014
3015 int
SMB2_open_init(struct cifs_tcon * tcon,struct TCP_Server_Info * server,struct smb_rqst * rqst,__u8 * oplock,struct cifs_open_parms * oparms,__le16 * path)3016 SMB2_open_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
3017 struct smb_rqst *rqst, __u8 *oplock,
3018 struct cifs_open_parms *oparms, __le16 *path)
3019 {
3020 struct smb2_create_req *req;
3021 unsigned int n_iov = 2;
3022 __u32 file_attributes = 0;
3023 int copy_size;
3024 int uni_path_len;
3025 unsigned int total_len;
3026 struct kvec *iov = rqst->rq_iov;
3027 __le16 *copy_path;
3028 int rc;
3029
3030 rc = smb2_plain_req_init(SMB2_CREATE, tcon, server,
3031 (void **) &req, &total_len);
3032 if (rc)
3033 return rc;
3034
3035 iov[0].iov_base = (char *)req;
3036 /* -1 since last byte is buf[0] which is sent below (path) */
3037 iov[0].iov_len = total_len - 1;
3038
3039 if (oparms->create_options & CREATE_OPTION_READONLY)
3040 file_attributes |= ATTR_READONLY;
3041 if (oparms->create_options & CREATE_OPTION_SPECIAL)
3042 file_attributes |= ATTR_SYSTEM;
3043
3044 req->ImpersonationLevel = IL_IMPERSONATION;
3045 req->DesiredAccess = cpu_to_le32(oparms->desired_access);
3046 /* File attributes ignored on open (used in create though) */
3047 req->FileAttributes = cpu_to_le32(file_attributes);
3048 req->ShareAccess = FILE_SHARE_ALL_LE;
3049
3050 req->CreateDisposition = cpu_to_le32(oparms->disposition);
3051 req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK);
3052 req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
3053
3054 /* [MS-SMB2] 2.2.13 NameOffset:
3055 * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of
3056 * the SMB2 header, the file name includes a prefix that will
3057 * be processed during DFS name normalization as specified in
3058 * section 3.3.5.9. Otherwise, the file name is relative to
3059 * the share that is identified by the TreeId in the SMB2
3060 * header.
3061 */
3062 if (tcon->share_flags & SHI1005_FLAGS_DFS) {
3063 int name_len;
3064
3065 req->hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
3066 rc = alloc_path_with_tree_prefix(©_path, ©_size,
3067 &name_len,
3068 tcon->tree_name, path);
3069 if (rc)
3070 return rc;
3071 req->NameLength = cpu_to_le16(name_len * 2);
3072 uni_path_len = copy_size;
3073 path = copy_path;
3074 } else {
3075 uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
3076 /* MUST set path len (NameLength) to 0 opening root of share */
3077 req->NameLength = cpu_to_le16(uni_path_len - 2);
3078 copy_size = round_up(uni_path_len, 8);
3079 copy_path = kzalloc(copy_size, GFP_KERNEL);
3080 if (!copy_path)
3081 return -ENOMEM;
3082 memcpy((char *)copy_path, (const char *)path,
3083 uni_path_len);
3084 uni_path_len = copy_size;
3085 path = copy_path;
3086 }
3087
3088 iov[1].iov_len = uni_path_len;
3089 iov[1].iov_base = path;
3090
3091 if ((!server->oplocks) || (tcon->no_lease))
3092 *oplock = SMB2_OPLOCK_LEVEL_NONE;
3093
3094 if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
3095 *oplock == SMB2_OPLOCK_LEVEL_NONE)
3096 req->RequestedOplockLevel = *oplock;
3097 else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) &&
3098 (oparms->create_options & CREATE_NOT_FILE))
3099 req->RequestedOplockLevel = *oplock; /* no srv lease support */
3100 else {
3101 rc = add_lease_context(server, req, iov, &n_iov,
3102 oparms->fid->lease_key, oplock);
3103 if (rc)
3104 return rc;
3105 }
3106
3107 if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) {
3108 rc = add_durable_context(iov, &n_iov, oparms,
3109 tcon->use_persistent);
3110 if (rc)
3111 return rc;
3112 }
3113
3114 if (tcon->posix_extensions) {
3115 rc = add_posix_context(iov, &n_iov, oparms->mode);
3116 if (rc)
3117 return rc;
3118 }
3119
3120 if (tcon->snapshot_time) {
3121 cifs_dbg(FYI, "adding snapshot context\n");
3122 rc = add_twarp_context(iov, &n_iov, tcon->snapshot_time);
3123 if (rc)
3124 return rc;
3125 }
3126
3127 if ((oparms->disposition != FILE_OPEN) && (oparms->cifs_sb)) {
3128 bool set_mode;
3129 bool set_owner;
3130
3131 if ((oparms->cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) &&
3132 (oparms->mode != ACL_NO_MODE))
3133 set_mode = true;
3134 else {
3135 set_mode = false;
3136 oparms->mode = ACL_NO_MODE;
3137 }
3138
3139 if (oparms->cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
3140 set_owner = true;
3141 else
3142 set_owner = false;
3143
3144 if (set_owner | set_mode) {
3145 cifs_dbg(FYI, "add sd with mode 0x%x\n", oparms->mode);
3146 rc = add_sd_context(iov, &n_iov, oparms->mode, set_owner);
3147 if (rc)
3148 return rc;
3149 }
3150 }
3151
3152 add_query_id_context(iov, &n_iov);
3153 add_ea_context(oparms, iov, &n_iov);
3154
3155 if (n_iov > 2) {
3156 /*
3157 * We have create contexts behind iov[1] (the file
3158 * name), point at them from the main create request
3159 */
3160 req->CreateContextsOffset = cpu_to_le32(
3161 sizeof(struct smb2_create_req) +
3162 iov[1].iov_len);
3163 req->CreateContextsLength = 0;
3164
3165 for (unsigned int i = 2; i < (n_iov-1); i++) {
3166 struct kvec *v = &iov[i];
3167 size_t len = v->iov_len;
3168 struct create_context *cctx =
3169 (struct create_context *)v->iov_base;
3170
3171 cctx->Next = cpu_to_le32(len);
3172 le32_add_cpu(&req->CreateContextsLength, len);
3173 }
3174 le32_add_cpu(&req->CreateContextsLength,
3175 iov[n_iov-1].iov_len);
3176 }
3177
3178 rqst->rq_nvec = n_iov;
3179 return 0;
3180 }
3181
3182 /* rq_iov[0] is the request and is released by cifs_small_buf_release().
3183 * All other vectors are freed by kfree().
3184 */
3185 void
SMB2_open_free(struct smb_rqst * rqst)3186 SMB2_open_free(struct smb_rqst *rqst)
3187 {
3188 int i;
3189
3190 if (rqst && rqst->rq_iov) {
3191 cifs_small_buf_release(rqst->rq_iov[0].iov_base);
3192 for (i = 1; i < rqst->rq_nvec; i++)
3193 if (rqst->rq_iov[i].iov_base != smb2_padding)
3194 kfree(rqst->rq_iov[i].iov_base);
3195 }
3196 }
3197
3198 int
SMB2_open(const unsigned int xid,struct cifs_open_parms * oparms,__le16 * path,__u8 * oplock,struct smb2_file_all_info * buf,struct create_posix_rsp * posix,struct kvec * err_iov,int * buftype)3199 SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
3200 __u8 *oplock, struct smb2_file_all_info *buf,
3201 struct create_posix_rsp *posix,
3202 struct kvec *err_iov, int *buftype)
3203 {
3204 struct smb_rqst rqst;
3205 struct smb2_create_rsp *rsp = NULL;
3206 struct cifs_tcon *tcon = oparms->tcon;
3207 struct cifs_ses *ses = tcon->ses;
3208 struct TCP_Server_Info *server;
3209 struct kvec iov[SMB2_CREATE_IOV_SIZE];
3210 struct kvec rsp_iov = {NULL, 0};
3211 int resp_buftype = CIFS_NO_BUFFER;
3212 int rc = 0;
3213 int flags = 0;
3214 int retries = 0, cur_sleep = 1;
3215
3216 replay_again:
3217 /* reinitialize for possible replay */
3218 flags = 0;
3219 server = cifs_pick_channel(ses);
3220 oparms->replay = !!(retries);
3221
3222 cifs_dbg(FYI, "create/open\n");
3223 if (!ses || !server)
3224 return -EIO;
3225
3226 if (smb3_encryption_required(tcon))
3227 flags |= CIFS_TRANSFORM_REQ;
3228
3229 memset(&rqst, 0, sizeof(struct smb_rqst));
3230 memset(&iov, 0, sizeof(iov));
3231 rqst.rq_iov = iov;
3232 rqst.rq_nvec = SMB2_CREATE_IOV_SIZE;
3233
3234 rc = SMB2_open_init(tcon, server,
3235 &rqst, oplock, oparms, path);
3236 if (rc)
3237 goto creat_exit;
3238
3239 trace_smb3_open_enter(xid, tcon->tid, tcon->ses->Suid, oparms->path,
3240 oparms->create_options, oparms->desired_access);
3241
3242 if (retries)
3243 smb2_set_replay(server, &rqst);
3244
3245 rc = cifs_send_recv(xid, ses, server,
3246 &rqst, &resp_buftype, flags,
3247 &rsp_iov);
3248 rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
3249
3250 if (rc != 0) {
3251 cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
3252 if (err_iov && rsp) {
3253 *err_iov = rsp_iov;
3254 *buftype = resp_buftype;
3255 resp_buftype = CIFS_NO_BUFFER;
3256 rsp = NULL;
3257 }
3258 trace_smb3_open_err(xid, tcon->tid, ses->Suid,
3259 oparms->create_options, oparms->desired_access, rc);
3260 if (rc == -EREMCHG) {
3261 pr_warn_once("server share %s deleted\n",
3262 tcon->tree_name);
3263 tcon->need_reconnect = true;
3264 }
3265 goto creat_exit;
3266 } else if (rsp == NULL) /* unlikely to happen, but safer to check */
3267 goto creat_exit;
3268 else
3269 trace_smb3_open_done(xid, rsp->PersistentFileId, tcon->tid, ses->Suid,
3270 oparms->create_options, oparms->desired_access);
3271
3272 atomic_inc(&tcon->num_remote_opens);
3273 oparms->fid->persistent_fid = rsp->PersistentFileId;
3274 oparms->fid->volatile_fid = rsp->VolatileFileId;
3275 oparms->fid->access = oparms->desired_access;
3276 #ifdef CONFIG_CIFS_DEBUG2
3277 oparms->fid->mid = le64_to_cpu(rsp->hdr.MessageId);
3278 #endif /* CIFS_DEBUG2 */
3279
3280 if (buf) {
3281 buf->CreationTime = rsp->CreationTime;
3282 buf->LastAccessTime = rsp->LastAccessTime;
3283 buf->LastWriteTime = rsp->LastWriteTime;
3284 buf->ChangeTime = rsp->ChangeTime;
3285 buf->AllocationSize = rsp->AllocationSize;
3286 buf->EndOfFile = rsp->EndofFile;
3287 buf->Attributes = rsp->FileAttributes;
3288 buf->NumberOfLinks = cpu_to_le32(1);
3289 buf->DeletePending = 0;
3290 }
3291
3292
3293 rc = smb2_parse_contexts(server, &rsp_iov, &oparms->fid->epoch,
3294 oparms->fid->lease_key, oplock, buf, posix);
3295 creat_exit:
3296 SMB2_open_free(&rqst);
3297 free_rsp_buf(resp_buftype, rsp);
3298
3299 if (is_replayable_error(rc) &&
3300 smb2_should_replay(tcon, &retries, &cur_sleep))
3301 goto replay_again;
3302
3303 return rc;
3304 }
3305
3306 int
SMB2_ioctl_init(struct cifs_tcon * tcon,struct TCP_Server_Info * server,struct smb_rqst * rqst,u64 persistent_fid,u64 volatile_fid,u32 opcode,char * in_data,u32 indatalen,__u32 max_response_size)3307 SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
3308 struct smb_rqst *rqst,
3309 u64 persistent_fid, u64 volatile_fid, u32 opcode,
3310 char *in_data, u32 indatalen,
3311 __u32 max_response_size)
3312 {
3313 struct smb2_ioctl_req *req;
3314 struct kvec *iov = rqst->rq_iov;
3315 unsigned int total_len;
3316 int rc;
3317 char *in_data_buf;
3318
3319 rc = smb2_ioctl_req_init(opcode, tcon, server,
3320 (void **) &req, &total_len);
3321 if (rc)
3322 return rc;
3323
3324 if (indatalen) {
3325 unsigned int len;
3326
3327 if (WARN_ON_ONCE(smb3_encryption_required(tcon) &&
3328 (check_add_overflow(total_len - 1,
3329 ALIGN(indatalen, 8), &len) ||
3330 len > MAX_CIFS_SMALL_BUFFER_SIZE))) {
3331 cifs_small_buf_release(req);
3332 return -EIO;
3333 }
3334 /*
3335 * indatalen is usually small at a couple of bytes max, so
3336 * just allocate through generic pool
3337 */
3338 in_data_buf = kmemdup(in_data, indatalen, GFP_NOFS);
3339 if (!in_data_buf) {
3340 cifs_small_buf_release(req);
3341 return -ENOMEM;
3342 }
3343 }
3344
3345 req->CtlCode = cpu_to_le32(opcode);
3346 req->PersistentFileId = persistent_fid;
3347 req->VolatileFileId = volatile_fid;
3348
3349 iov[0].iov_base = (char *)req;
3350 /*
3351 * If no input data, the size of ioctl struct in
3352 * protocol spec still includes a 1 byte data buffer,
3353 * but if input data passed to ioctl, we do not
3354 * want to double count this, so we do not send
3355 * the dummy one byte of data in iovec[0] if sending
3356 * input data (in iovec[1]).
3357 */
3358 if (indatalen) {
3359 req->InputCount = cpu_to_le32(indatalen);
3360 /* do not set InputOffset if no input data */
3361 req->InputOffset =
3362 cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer));
3363 rqst->rq_nvec = 2;
3364 iov[0].iov_len = total_len - 1;
3365 iov[1].iov_base = in_data_buf;
3366 iov[1].iov_len = indatalen;
3367 } else {
3368 rqst->rq_nvec = 1;
3369 iov[0].iov_len = total_len;
3370 }
3371
3372 req->OutputOffset = 0;
3373 req->OutputCount = 0; /* MBZ */
3374
3375 /*
3376 * In most cases max_response_size is set to 16K (CIFSMaxBufSize)
3377 * We Could increase default MaxOutputResponse, but that could require
3378 * more credits. Windows typically sets this smaller, but for some
3379 * ioctls it may be useful to allow server to send more. No point
3380 * limiting what the server can send as long as fits in one credit
3381 * We can not handle more than CIFS_MAX_BUF_SIZE yet but may want
3382 * to increase this limit up in the future.
3383 * Note that for snapshot queries that servers like Azure expect that
3384 * the first query be minimal size (and just used to get the number/size
3385 * of previous versions) so response size must be specified as EXACTLY
3386 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
3387 * of eight bytes. Currently that is the only case where we set max
3388 * response size smaller.
3389 */
3390 req->MaxOutputResponse = cpu_to_le32(max_response_size);
3391 req->hdr.CreditCharge =
3392 cpu_to_le16(DIV_ROUND_UP(max(indatalen, max_response_size),
3393 SMB2_MAX_BUFFER_SIZE));
3394 /* always an FSCTL (for now) */
3395 req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
3396
3397 /* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */
3398 if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
3399 req->hdr.Flags |= SMB2_FLAGS_SIGNED;
3400
3401 return 0;
3402 }
3403
3404 void
SMB2_ioctl_free(struct smb_rqst * rqst)3405 SMB2_ioctl_free(struct smb_rqst *rqst)
3406 {
3407 int i;
3408
3409 if (rqst && rqst->rq_iov) {
3410 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
3411 for (i = 1; i < rqst->rq_nvec; i++)
3412 if (rqst->rq_iov[i].iov_base != smb2_padding)
3413 kfree(rqst->rq_iov[i].iov_base);
3414 }
3415 }
3416
3417
3418 /*
3419 * SMB2 IOCTL is used for both IOCTLs and FSCTLs
3420 */
3421 int
SMB2_ioctl(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,u32 opcode,char * in_data,u32 indatalen,u32 max_out_data_len,char ** out_data,u32 * plen)3422 SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
3423 u64 volatile_fid, u32 opcode, char *in_data, u32 indatalen,
3424 u32 max_out_data_len, char **out_data,
3425 u32 *plen /* returned data len */)
3426 {
3427 struct smb_rqst rqst;
3428 struct smb2_ioctl_rsp *rsp = NULL;
3429 struct cifs_ses *ses;
3430 struct TCP_Server_Info *server;
3431 struct kvec iov[SMB2_IOCTL_IOV_SIZE];
3432 struct kvec rsp_iov = {NULL, 0};
3433 int resp_buftype = CIFS_NO_BUFFER;
3434 int rc = 0;
3435 int flags = 0;
3436 int retries = 0, cur_sleep = 1;
3437
3438 if (!tcon)
3439 return -EIO;
3440
3441 ses = tcon->ses;
3442 if (!ses)
3443 return -EIO;
3444
3445 replay_again:
3446 /* reinitialize for possible replay */
3447 flags = 0;
3448 server = cifs_pick_channel(ses);
3449
3450 if (!server)
3451 return -EIO;
3452
3453 cifs_dbg(FYI, "SMB2 IOCTL\n");
3454
3455 if (out_data != NULL)
3456 *out_data = NULL;
3457
3458 /* zero out returned data len, in case of error */
3459 if (plen)
3460 *plen = 0;
3461
3462 if (smb3_encryption_required(tcon))
3463 flags |= CIFS_TRANSFORM_REQ;
3464
3465 memset(&rqst, 0, sizeof(struct smb_rqst));
3466 memset(&iov, 0, sizeof(iov));
3467 rqst.rq_iov = iov;
3468 rqst.rq_nvec = SMB2_IOCTL_IOV_SIZE;
3469
3470 rc = SMB2_ioctl_init(tcon, server,
3471 &rqst, persistent_fid, volatile_fid, opcode,
3472 in_data, indatalen, max_out_data_len);
3473 if (rc)
3474 goto ioctl_exit;
3475
3476 if (retries)
3477 smb2_set_replay(server, &rqst);
3478
3479 rc = cifs_send_recv(xid, ses, server,
3480 &rqst, &resp_buftype, flags,
3481 &rsp_iov);
3482 rsp = (struct smb2_ioctl_rsp *)rsp_iov.iov_base;
3483
3484 if (rc != 0)
3485 trace_smb3_fsctl_err(xid, persistent_fid, tcon->tid,
3486 ses->Suid, 0, opcode, rc);
3487
3488 if ((rc != 0) && (rc != -EINVAL) && (rc != -E2BIG)) {
3489 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
3490 goto ioctl_exit;
3491 } else if (rc == -EINVAL) {
3492 if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) &&
3493 (opcode != FSCTL_SRV_COPYCHUNK)) {
3494 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
3495 goto ioctl_exit;
3496 }
3497 } else if (rc == -E2BIG) {
3498 if (opcode != FSCTL_QUERY_ALLOCATED_RANGES) {
3499 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
3500 goto ioctl_exit;
3501 }
3502 }
3503
3504 /* check if caller wants to look at return data or just return rc */
3505 if ((plen == NULL) || (out_data == NULL))
3506 goto ioctl_exit;
3507
3508 /*
3509 * Although unlikely to be possible for rsp to be null and rc not set,
3510 * adding check below is slightly safer long term (and quiets Coverity
3511 * warning)
3512 */
3513 if (rsp == NULL) {
3514 rc = -EIO;
3515 goto ioctl_exit;
3516 }
3517
3518 *plen = le32_to_cpu(rsp->OutputCount);
3519
3520 /* We check for obvious errors in the output buffer length and offset */
3521 if (*plen == 0)
3522 goto ioctl_exit; /* server returned no data */
3523 else if (*plen > rsp_iov.iov_len || *plen > 0xFF00) {
3524 cifs_tcon_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen);
3525 *plen = 0;
3526 rc = -EIO;
3527 goto ioctl_exit;
3528 }
3529
3530 if (rsp_iov.iov_len - *plen < le32_to_cpu(rsp->OutputOffset)) {
3531 cifs_tcon_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen,
3532 le32_to_cpu(rsp->OutputOffset));
3533 *plen = 0;
3534 rc = -EIO;
3535 goto ioctl_exit;
3536 }
3537
3538 *out_data = kmemdup((char *)rsp + le32_to_cpu(rsp->OutputOffset),
3539 *plen, GFP_KERNEL);
3540 if (*out_data == NULL) {
3541 rc = -ENOMEM;
3542 goto ioctl_exit;
3543 }
3544
3545 ioctl_exit:
3546 SMB2_ioctl_free(&rqst);
3547 free_rsp_buf(resp_buftype, rsp);
3548
3549 if (is_replayable_error(rc) &&
3550 smb2_should_replay(tcon, &retries, &cur_sleep))
3551 goto replay_again;
3552
3553 return rc;
3554 }
3555
3556 /*
3557 * Individual callers to ioctl worker function follow
3558 */
3559
3560 int
SMB2_set_compression(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid)3561 SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
3562 u64 persistent_fid, u64 volatile_fid)
3563 {
3564 int rc;
3565 struct compress_ioctl fsctl_input;
3566 char *ret_data = NULL;
3567
3568 fsctl_input.CompressionState =
3569 cpu_to_le16(COMPRESSION_FORMAT_DEFAULT);
3570
3571 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
3572 FSCTL_SET_COMPRESSION,
3573 (char *)&fsctl_input /* data input */,
3574 2 /* in data len */, CIFSMaxBufSize /* max out data */,
3575 &ret_data /* out data */, NULL);
3576
3577 cifs_dbg(FYI, "set compression rc %d\n", rc);
3578
3579 return rc;
3580 }
3581
3582 int
SMB2_close_init(struct cifs_tcon * tcon,struct TCP_Server_Info * server,struct smb_rqst * rqst,u64 persistent_fid,u64 volatile_fid,bool query_attrs)3583 SMB2_close_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
3584 struct smb_rqst *rqst,
3585 u64 persistent_fid, u64 volatile_fid, bool query_attrs)
3586 {
3587 struct smb2_close_req *req;
3588 struct kvec *iov = rqst->rq_iov;
3589 unsigned int total_len;
3590 int rc;
3591
3592 rc = smb2_plain_req_init(SMB2_CLOSE, tcon, server,
3593 (void **) &req, &total_len);
3594 if (rc)
3595 return rc;
3596
3597 req->PersistentFileId = persistent_fid;
3598 req->VolatileFileId = volatile_fid;
3599 if (query_attrs)
3600 req->Flags = SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB;
3601 else
3602 req->Flags = 0;
3603 iov[0].iov_base = (char *)req;
3604 iov[0].iov_len = total_len;
3605
3606 return 0;
3607 }
3608
3609 void
SMB2_close_free(struct smb_rqst * rqst)3610 SMB2_close_free(struct smb_rqst *rqst)
3611 {
3612 if (rqst && rqst->rq_iov)
3613 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
3614 }
3615
3616 int
__SMB2_close(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,struct smb2_file_network_open_info * pbuf)3617 __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
3618 u64 persistent_fid, u64 volatile_fid,
3619 struct smb2_file_network_open_info *pbuf)
3620 {
3621 struct smb_rqst rqst;
3622 struct smb2_close_rsp *rsp = NULL;
3623 struct cifs_ses *ses = tcon->ses;
3624 struct TCP_Server_Info *server;
3625 struct kvec iov[1];
3626 struct kvec rsp_iov;
3627 int resp_buftype = CIFS_NO_BUFFER;
3628 int rc = 0;
3629 int flags = 0;
3630 bool query_attrs = false;
3631 int retries = 0, cur_sleep = 1;
3632
3633 replay_again:
3634 /* reinitialize for possible replay */
3635 flags = 0;
3636 query_attrs = false;
3637 server = cifs_pick_channel(ses);
3638
3639 cifs_dbg(FYI, "Close\n");
3640
3641 if (!ses || !server)
3642 return -EIO;
3643
3644 if (smb3_encryption_required(tcon))
3645 flags |= CIFS_TRANSFORM_REQ;
3646
3647 memset(&rqst, 0, sizeof(struct smb_rqst));
3648 memset(&iov, 0, sizeof(iov));
3649 rqst.rq_iov = iov;
3650 rqst.rq_nvec = 1;
3651
3652 /* check if need to ask server to return timestamps in close response */
3653 if (pbuf)
3654 query_attrs = true;
3655
3656 trace_smb3_close_enter(xid, persistent_fid, tcon->tid, ses->Suid);
3657 rc = SMB2_close_init(tcon, server,
3658 &rqst, persistent_fid, volatile_fid,
3659 query_attrs);
3660 if (rc)
3661 goto close_exit;
3662
3663 if (retries)
3664 smb2_set_replay(server, &rqst);
3665
3666 rc = cifs_send_recv(xid, ses, server,
3667 &rqst, &resp_buftype, flags, &rsp_iov);
3668 rsp = (struct smb2_close_rsp *)rsp_iov.iov_base;
3669
3670 if (rc != 0) {
3671 cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
3672 trace_smb3_close_err(xid, persistent_fid, tcon->tid, ses->Suid,
3673 rc);
3674 goto close_exit;
3675 } else {
3676 trace_smb3_close_done(xid, persistent_fid, tcon->tid,
3677 ses->Suid);
3678 if (pbuf)
3679 memcpy(&pbuf->network_open_info,
3680 &rsp->network_open_info,
3681 sizeof(pbuf->network_open_info));
3682 atomic_dec(&tcon->num_remote_opens);
3683 }
3684
3685 close_exit:
3686 SMB2_close_free(&rqst);
3687 free_rsp_buf(resp_buftype, rsp);
3688
3689 /* retry close in a worker thread if this one is interrupted */
3690 if (is_interrupt_error(rc)) {
3691 int tmp_rc;
3692
3693 tmp_rc = smb2_handle_cancelled_close(tcon, persistent_fid,
3694 volatile_fid);
3695 if (tmp_rc)
3696 cifs_dbg(VFS, "handle cancelled close fid 0x%llx returned error %d\n",
3697 persistent_fid, tmp_rc);
3698 }
3699
3700 if (is_replayable_error(rc) &&
3701 smb2_should_replay(tcon, &retries, &cur_sleep))
3702 goto replay_again;
3703
3704 return rc;
3705 }
3706
3707 int
SMB2_close(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid)3708 SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
3709 u64 persistent_fid, u64 volatile_fid)
3710 {
3711 return __SMB2_close(xid, tcon, persistent_fid, volatile_fid, NULL);
3712 }
3713
3714 int
smb2_validate_iov(unsigned int offset,unsigned int buffer_length,struct kvec * iov,unsigned int min_buf_size)3715 smb2_validate_iov(unsigned int offset, unsigned int buffer_length,
3716 struct kvec *iov, unsigned int min_buf_size)
3717 {
3718 unsigned int smb_len = iov->iov_len;
3719 char *end_of_smb = smb_len + (char *)iov->iov_base;
3720 char *begin_of_buf = offset + (char *)iov->iov_base;
3721 char *end_of_buf = begin_of_buf + buffer_length;
3722
3723
3724 if (buffer_length < min_buf_size) {
3725 cifs_dbg(VFS, "buffer length %d smaller than minimum size %d\n",
3726 buffer_length, min_buf_size);
3727 return -EINVAL;
3728 }
3729
3730 /* check if beyond RFC1001 maximum length */
3731 if ((smb_len > 0x7FFFFF) || (buffer_length > 0x7FFFFF)) {
3732 cifs_dbg(VFS, "buffer length %d or smb length %d too large\n",
3733 buffer_length, smb_len);
3734 return -EINVAL;
3735 }
3736
3737 if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) {
3738 cifs_dbg(VFS, "Invalid server response, bad offset to data\n");
3739 return -EINVAL;
3740 }
3741
3742 return 0;
3743 }
3744
3745 /*
3746 * If SMB buffer fields are valid, copy into temporary buffer to hold result.
3747 * Caller must free buffer.
3748 */
3749 int
smb2_validate_and_copy_iov(unsigned int offset,unsigned int buffer_length,struct kvec * iov,unsigned int minbufsize,char * data)3750 smb2_validate_and_copy_iov(unsigned int offset, unsigned int buffer_length,
3751 struct kvec *iov, unsigned int minbufsize,
3752 char *data)
3753 {
3754 char *begin_of_buf = offset + (char *)iov->iov_base;
3755 int rc;
3756
3757 if (!data)
3758 return -EINVAL;
3759
3760 rc = smb2_validate_iov(offset, buffer_length, iov, minbufsize);
3761 if (rc)
3762 return rc;
3763
3764 memcpy(data, begin_of_buf, minbufsize);
3765
3766 return 0;
3767 }
3768
3769 int
SMB2_query_info_init(struct cifs_tcon * tcon,struct TCP_Server_Info * server,struct smb_rqst * rqst,u64 persistent_fid,u64 volatile_fid,u8 info_class,u8 info_type,u32 additional_info,size_t output_len,size_t input_len,void * input)3770 SMB2_query_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
3771 struct smb_rqst *rqst,
3772 u64 persistent_fid, u64 volatile_fid,
3773 u8 info_class, u8 info_type, u32 additional_info,
3774 size_t output_len, size_t input_len, void *input)
3775 {
3776 struct smb2_query_info_req *req;
3777 struct kvec *iov = rqst->rq_iov;
3778 unsigned int total_len;
3779 size_t len;
3780 int rc;
3781
3782 if (unlikely(check_add_overflow(input_len, sizeof(*req), &len) ||
3783 len > CIFSMaxBufSize))
3784 return -EINVAL;
3785
3786 rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server,
3787 (void **) &req, &total_len);
3788 if (rc)
3789 return rc;
3790
3791 req->InfoType = info_type;
3792 req->FileInfoClass = info_class;
3793 req->PersistentFileId = persistent_fid;
3794 req->VolatileFileId = volatile_fid;
3795 req->AdditionalInformation = cpu_to_le32(additional_info);
3796
3797 req->OutputBufferLength = cpu_to_le32(output_len);
3798 if (input_len) {
3799 req->InputBufferLength = cpu_to_le32(input_len);
3800 /* total_len for smb query request never close to le16 max */
3801 req->InputBufferOffset = cpu_to_le16(total_len - 1);
3802 memcpy(req->Buffer, input, input_len);
3803 }
3804
3805 iov[0].iov_base = (char *)req;
3806 /* 1 for Buffer */
3807 iov[0].iov_len = len;
3808 return 0;
3809 }
3810
3811 void
SMB2_query_info_free(struct smb_rqst * rqst)3812 SMB2_query_info_free(struct smb_rqst *rqst)
3813 {
3814 if (rqst && rqst->rq_iov)
3815 cifs_buf_release(rqst->rq_iov[0].iov_base); /* request */
3816 }
3817
3818 static int
query_info(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,u8 info_class,u8 info_type,u32 additional_info,size_t output_len,size_t min_len,void ** data,u32 * dlen)3819 query_info(const unsigned int xid, struct cifs_tcon *tcon,
3820 u64 persistent_fid, u64 volatile_fid, u8 info_class, u8 info_type,
3821 u32 additional_info, size_t output_len, size_t min_len, void **data,
3822 u32 *dlen)
3823 {
3824 struct smb_rqst rqst;
3825 struct smb2_query_info_rsp *rsp = NULL;
3826 struct kvec iov[1];
3827 struct kvec rsp_iov;
3828 int rc = 0;
3829 int resp_buftype = CIFS_NO_BUFFER;
3830 struct cifs_ses *ses = tcon->ses;
3831 struct TCP_Server_Info *server;
3832 int flags = 0;
3833 bool allocated = false;
3834 int retries = 0, cur_sleep = 1;
3835
3836 cifs_dbg(FYI, "Query Info\n");
3837
3838 if (!ses)
3839 return -EIO;
3840
3841 replay_again:
3842 /* reinitialize for possible replay */
3843 flags = 0;
3844 allocated = false;
3845 server = cifs_pick_channel(ses);
3846
3847 if (!server)
3848 return -EIO;
3849
3850 if (smb3_encryption_required(tcon))
3851 flags |= CIFS_TRANSFORM_REQ;
3852
3853 memset(&rqst, 0, sizeof(struct smb_rqst));
3854 memset(&iov, 0, sizeof(iov));
3855 rqst.rq_iov = iov;
3856 rqst.rq_nvec = 1;
3857
3858 rc = SMB2_query_info_init(tcon, server,
3859 &rqst, persistent_fid, volatile_fid,
3860 info_class, info_type, additional_info,
3861 output_len, 0, NULL);
3862 if (rc)
3863 goto qinf_exit;
3864
3865 trace_smb3_query_info_enter(xid, persistent_fid, tcon->tid,
3866 ses->Suid, info_class, (__u32)info_type);
3867
3868 if (retries)
3869 smb2_set_replay(server, &rqst);
3870
3871 rc = cifs_send_recv(xid, ses, server,
3872 &rqst, &resp_buftype, flags, &rsp_iov);
3873 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
3874
3875 if (rc) {
3876 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
3877 trace_smb3_query_info_err(xid, persistent_fid, tcon->tid,
3878 ses->Suid, info_class, (__u32)info_type, rc);
3879 goto qinf_exit;
3880 }
3881
3882 trace_smb3_query_info_done(xid, persistent_fid, tcon->tid,
3883 ses->Suid, info_class, (__u32)info_type);
3884
3885 if (dlen) {
3886 *dlen = le32_to_cpu(rsp->OutputBufferLength);
3887 if (!*data) {
3888 *data = kmalloc(*dlen, GFP_KERNEL);
3889 if (!*data) {
3890 cifs_tcon_dbg(VFS,
3891 "Error %d allocating memory for acl\n",
3892 rc);
3893 *dlen = 0;
3894 rc = -ENOMEM;
3895 goto qinf_exit;
3896 }
3897 allocated = true;
3898 }
3899 }
3900
3901 rc = smb2_validate_and_copy_iov(le16_to_cpu(rsp->OutputBufferOffset),
3902 le32_to_cpu(rsp->OutputBufferLength),
3903 &rsp_iov, dlen ? *dlen : min_len, *data);
3904 if (rc && allocated) {
3905 kfree(*data);
3906 *data = NULL;
3907 *dlen = 0;
3908 }
3909
3910 qinf_exit:
3911 SMB2_query_info_free(&rqst);
3912 free_rsp_buf(resp_buftype, rsp);
3913
3914 if (is_replayable_error(rc) &&
3915 smb2_should_replay(tcon, &retries, &cur_sleep))
3916 goto replay_again;
3917
3918 return rc;
3919 }
3920
SMB2_query_info(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,struct smb2_file_all_info * data)3921 int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
3922 u64 persistent_fid, u64 volatile_fid, struct smb2_file_all_info *data)
3923 {
3924 return query_info(xid, tcon, persistent_fid, volatile_fid,
3925 FILE_ALL_INFORMATION, SMB2_O_INFO_FILE, 0,
3926 sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
3927 sizeof(struct smb2_file_all_info), (void **)&data,
3928 NULL);
3929 }
3930
3931 #if 0
3932 /* currently unused, as now we are doing compounding instead (see smb311_posix_query_path_info) */
3933 int
3934 SMB311_posix_query_info(const unsigned int xid, struct cifs_tcon *tcon,
3935 u64 persistent_fid, u64 volatile_fid, struct smb311_posix_qinfo *data, u32 *plen)
3936 {
3937 size_t output_len = sizeof(struct smb311_posix_qinfo *) +
3938 (sizeof(struct smb_sid) * 2) + (PATH_MAX * 2);
3939 *plen = 0;
3940
3941 return query_info(xid, tcon, persistent_fid, volatile_fid,
3942 SMB_FIND_FILE_POSIX_INFO, SMB2_O_INFO_FILE, 0,
3943 output_len, sizeof(struct smb311_posix_qinfo), (void **)&data, plen);
3944 /* Note caller must free "data" (passed in above). It may be allocated in query_info call */
3945 }
3946 #endif
3947
3948 int
SMB2_query_acl(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,void ** data,u32 * plen,u32 extra_info)3949 SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon,
3950 u64 persistent_fid, u64 volatile_fid,
3951 void **data, u32 *plen, u32 extra_info)
3952 {
3953 __u32 additional_info = OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO |
3954 extra_info;
3955 *plen = 0;
3956
3957 return query_info(xid, tcon, persistent_fid, volatile_fid,
3958 0, SMB2_O_INFO_SECURITY, additional_info,
3959 SMB2_MAX_BUFFER_SIZE, MIN_SEC_DESC_LEN, data, plen);
3960 }
3961
3962 int
SMB2_get_srv_num(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,__le64 * uniqueid)3963 SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
3964 u64 persistent_fid, u64 volatile_fid, __le64 *uniqueid)
3965 {
3966 return query_info(xid, tcon, persistent_fid, volatile_fid,
3967 FILE_INTERNAL_INFORMATION, SMB2_O_INFO_FILE, 0,
3968 sizeof(struct smb2_file_internal_info),
3969 sizeof(struct smb2_file_internal_info),
3970 (void **)&uniqueid, NULL);
3971 }
3972
3973 /*
3974 * CHANGE_NOTIFY Request is sent to get notifications on changes to a directory
3975 * See MS-SMB2 2.2.35 and 2.2.36
3976 */
3977
3978 static int
SMB2_notify_init(const unsigned int xid,struct smb_rqst * rqst,struct cifs_tcon * tcon,struct TCP_Server_Info * server,u64 persistent_fid,u64 volatile_fid,u32 completion_filter,bool watch_tree)3979 SMB2_notify_init(const unsigned int xid, struct smb_rqst *rqst,
3980 struct cifs_tcon *tcon, struct TCP_Server_Info *server,
3981 u64 persistent_fid, u64 volatile_fid,
3982 u32 completion_filter, bool watch_tree)
3983 {
3984 struct smb2_change_notify_req *req;
3985 struct kvec *iov = rqst->rq_iov;
3986 unsigned int total_len;
3987 int rc;
3988
3989 rc = smb2_plain_req_init(SMB2_CHANGE_NOTIFY, tcon, server,
3990 (void **) &req, &total_len);
3991 if (rc)
3992 return rc;
3993
3994 req->PersistentFileId = persistent_fid;
3995 req->VolatileFileId = volatile_fid;
3996 /* See note 354 of MS-SMB2, 64K max */
3997 req->OutputBufferLength =
3998 cpu_to_le32(SMB2_MAX_BUFFER_SIZE - MAX_SMB2_HDR_SIZE);
3999 req->CompletionFilter = cpu_to_le32(completion_filter);
4000 if (watch_tree)
4001 req->Flags = cpu_to_le16(SMB2_WATCH_TREE);
4002 else
4003 req->Flags = 0;
4004
4005 iov[0].iov_base = (char *)req;
4006 iov[0].iov_len = total_len;
4007
4008 return 0;
4009 }
4010
4011 int
SMB2_change_notify(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,bool watch_tree,u32 completion_filter,u32 max_out_data_len,char ** out_data,u32 * plen)4012 SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
4013 u64 persistent_fid, u64 volatile_fid, bool watch_tree,
4014 u32 completion_filter, u32 max_out_data_len, char **out_data,
4015 u32 *plen /* returned data len */)
4016 {
4017 struct cifs_ses *ses = tcon->ses;
4018 struct TCP_Server_Info *server;
4019 struct smb_rqst rqst;
4020 struct smb2_change_notify_rsp *smb_rsp;
4021 struct kvec iov[1];
4022 struct kvec rsp_iov = {NULL, 0};
4023 int resp_buftype = CIFS_NO_BUFFER;
4024 int flags = 0;
4025 int rc = 0;
4026 int retries = 0, cur_sleep = 1;
4027
4028 replay_again:
4029 /* reinitialize for possible replay */
4030 flags = 0;
4031 server = cifs_pick_channel(ses);
4032
4033 cifs_dbg(FYI, "change notify\n");
4034 if (!ses || !server)
4035 return -EIO;
4036
4037 if (smb3_encryption_required(tcon))
4038 flags |= CIFS_TRANSFORM_REQ;
4039
4040 memset(&rqst, 0, sizeof(struct smb_rqst));
4041 memset(&iov, 0, sizeof(iov));
4042 if (plen)
4043 *plen = 0;
4044
4045 rqst.rq_iov = iov;
4046 rqst.rq_nvec = 1;
4047
4048 rc = SMB2_notify_init(xid, &rqst, tcon, server,
4049 persistent_fid, volatile_fid,
4050 completion_filter, watch_tree);
4051 if (rc)
4052 goto cnotify_exit;
4053
4054 trace_smb3_notify_enter(xid, persistent_fid, tcon->tid, ses->Suid,
4055 (u8)watch_tree, completion_filter);
4056
4057 if (retries)
4058 smb2_set_replay(server, &rqst);
4059
4060 rc = cifs_send_recv(xid, ses, server,
4061 &rqst, &resp_buftype, flags, &rsp_iov);
4062
4063 if (rc != 0) {
4064 cifs_stats_fail_inc(tcon, SMB2_CHANGE_NOTIFY_HE);
4065 trace_smb3_notify_err(xid, persistent_fid, tcon->tid, ses->Suid,
4066 (u8)watch_tree, completion_filter, rc);
4067 } else {
4068 trace_smb3_notify_done(xid, persistent_fid, tcon->tid,
4069 ses->Suid, (u8)watch_tree, completion_filter);
4070 /* validate that notify information is plausible */
4071 if ((rsp_iov.iov_base == NULL) ||
4072 (rsp_iov.iov_len < sizeof(struct smb2_change_notify_rsp) + 1))
4073 goto cnotify_exit;
4074
4075 smb_rsp = (struct smb2_change_notify_rsp *)rsp_iov.iov_base;
4076
4077 smb2_validate_iov(le16_to_cpu(smb_rsp->OutputBufferOffset),
4078 le32_to_cpu(smb_rsp->OutputBufferLength), &rsp_iov,
4079 sizeof(struct file_notify_information));
4080
4081 *out_data = kmemdup((char *)smb_rsp + le16_to_cpu(smb_rsp->OutputBufferOffset),
4082 le32_to_cpu(smb_rsp->OutputBufferLength), GFP_KERNEL);
4083 if (*out_data == NULL) {
4084 rc = -ENOMEM;
4085 goto cnotify_exit;
4086 } else if (plen)
4087 *plen = le32_to_cpu(smb_rsp->OutputBufferLength);
4088 }
4089
4090 cnotify_exit:
4091 if (rqst.rq_iov)
4092 cifs_small_buf_release(rqst.rq_iov[0].iov_base); /* request */
4093 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
4094
4095 if (is_replayable_error(rc) &&
4096 smb2_should_replay(tcon, &retries, &cur_sleep))
4097 goto replay_again;
4098
4099 return rc;
4100 }
4101
4102
4103
4104 /*
4105 * This is a no-op for now. We're not really interested in the reply, but
4106 * rather in the fact that the server sent one and that server->lstrp
4107 * gets updated.
4108 *
4109 * FIXME: maybe we should consider checking that the reply matches request?
4110 */
4111 static void
smb2_echo_callback(struct mid_q_entry * mid)4112 smb2_echo_callback(struct mid_q_entry *mid)
4113 {
4114 struct TCP_Server_Info *server = mid->callback_data;
4115 struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf;
4116 struct cifs_credits credits = { .value = 0, .instance = 0 };
4117
4118 if (mid->mid_state == MID_RESPONSE_RECEIVED
4119 || mid->mid_state == MID_RESPONSE_MALFORMED) {
4120 credits.value = le16_to_cpu(rsp->hdr.CreditRequest);
4121 credits.instance = server->reconnect_instance;
4122 }
4123
4124 release_mid(mid);
4125 add_credits(server, &credits, CIFS_ECHO_OP);
4126 }
4127
smb2_reconnect_server(struct work_struct * work)4128 void smb2_reconnect_server(struct work_struct *work)
4129 {
4130 struct TCP_Server_Info *server = container_of(work,
4131 struct TCP_Server_Info, reconnect.work);
4132 struct TCP_Server_Info *pserver;
4133 struct cifs_ses *ses, *ses2;
4134 struct cifs_tcon *tcon, *tcon2;
4135 struct list_head tmp_list, tmp_ses_list;
4136 bool ses_exist = false;
4137 bool tcon_selected = false;
4138 int rc;
4139 bool resched = false;
4140
4141 /* first check if ref count has reached 0, if not inc ref count */
4142 spin_lock(&cifs_tcp_ses_lock);
4143 if (!server->srv_count) {
4144 spin_unlock(&cifs_tcp_ses_lock);
4145 return;
4146 }
4147 server->srv_count++;
4148 spin_unlock(&cifs_tcp_ses_lock);
4149
4150 /* If server is a channel, select the primary channel */
4151 pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
4152
4153 /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
4154 mutex_lock(&pserver->reconnect_mutex);
4155
4156 /* if the server is marked for termination, drop the ref count here */
4157 if (server->terminate) {
4158 cifs_put_tcp_session(server, true);
4159 mutex_unlock(&pserver->reconnect_mutex);
4160 return;
4161 }
4162
4163 INIT_LIST_HEAD(&tmp_list);
4164 INIT_LIST_HEAD(&tmp_ses_list);
4165 cifs_dbg(FYI, "Reconnecting tcons and channels\n");
4166
4167 spin_lock(&cifs_tcp_ses_lock);
4168 list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
4169 spin_lock(&ses->ses_lock);
4170 if (ses->ses_status == SES_EXITING) {
4171 spin_unlock(&ses->ses_lock);
4172 continue;
4173 }
4174 spin_unlock(&ses->ses_lock);
4175
4176 tcon_selected = false;
4177
4178 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
4179 if (tcon->need_reconnect || tcon->need_reopen_files) {
4180 tcon->tc_count++;
4181 trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
4182 netfs_trace_tcon_ref_get_reconnect_server);
4183 list_add_tail(&tcon->rlist, &tmp_list);
4184 tcon_selected = true;
4185 }
4186 }
4187 /*
4188 * IPC has the same lifetime as its session and uses its
4189 * refcount.
4190 */
4191 if (ses->tcon_ipc && ses->tcon_ipc->need_reconnect) {
4192 list_add_tail(&ses->tcon_ipc->rlist, &tmp_list);
4193 tcon_selected = true;
4194 cifs_smb_ses_inc_refcount(ses);
4195 }
4196 /*
4197 * handle the case where channel needs to reconnect
4198 * binding session, but tcon is healthy (some other channel
4199 * is active)
4200 */
4201 spin_lock(&ses->chan_lock);
4202 if (!tcon_selected && cifs_chan_needs_reconnect(ses, server)) {
4203 list_add_tail(&ses->rlist, &tmp_ses_list);
4204 ses_exist = true;
4205 cifs_smb_ses_inc_refcount(ses);
4206 }
4207 spin_unlock(&ses->chan_lock);
4208 }
4209 spin_unlock(&cifs_tcp_ses_lock);
4210
4211 list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
4212 rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server, true);
4213 if (!rc)
4214 cifs_reopen_persistent_handles(tcon);
4215 else
4216 resched = true;
4217 list_del_init(&tcon->rlist);
4218 if (tcon->ipc)
4219 cifs_put_smb_ses(tcon->ses);
4220 else
4221 cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_reconnect_server);
4222 }
4223
4224 if (!ses_exist)
4225 goto done;
4226
4227 /* allocate a dummy tcon struct used for reconnect */
4228 tcon = tcon_info_alloc(false, netfs_trace_tcon_ref_new_reconnect_server);
4229 if (!tcon) {
4230 resched = true;
4231 list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {
4232 list_del_init(&ses->rlist);
4233 cifs_put_smb_ses(ses);
4234 }
4235 goto done;
4236 }
4237 tcon->status = TID_GOOD;
4238 tcon->dummy = true;
4239
4240 /* now reconnect sessions for necessary channels */
4241 list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {
4242 tcon->ses = ses;
4243 rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server, true);
4244 if (rc)
4245 resched = true;
4246 list_del_init(&ses->rlist);
4247 cifs_put_smb_ses(ses);
4248 }
4249 tconInfoFree(tcon, netfs_trace_tcon_ref_free_reconnect_server);
4250
4251 done:
4252 cifs_dbg(FYI, "Reconnecting tcons and channels finished\n");
4253 if (resched)
4254 queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ);
4255 mutex_unlock(&pserver->reconnect_mutex);
4256
4257 /* now we can safely release srv struct */
4258 cifs_put_tcp_session(server, true);
4259 }
4260
4261 int
SMB2_echo(struct TCP_Server_Info * server)4262 SMB2_echo(struct TCP_Server_Info *server)
4263 {
4264 struct smb2_echo_req *req;
4265 int rc = 0;
4266 struct kvec iov[1];
4267 struct smb_rqst rqst = { .rq_iov = iov,
4268 .rq_nvec = 1 };
4269 unsigned int total_len;
4270
4271 cifs_dbg(FYI, "In echo request for conn_id %lld\n", server->conn_id);
4272
4273 spin_lock(&server->srv_lock);
4274 if (server->ops->need_neg &&
4275 server->ops->need_neg(server)) {
4276 spin_unlock(&server->srv_lock);
4277 /* No need to send echo on newly established connections */
4278 mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
4279 return rc;
4280 }
4281 spin_unlock(&server->srv_lock);
4282
4283 rc = smb2_plain_req_init(SMB2_ECHO, NULL, server,
4284 (void **)&req, &total_len);
4285 if (rc)
4286 return rc;
4287
4288 req->hdr.CreditRequest = cpu_to_le16(1);
4289
4290 iov[0].iov_len = total_len;
4291 iov[0].iov_base = (char *)req;
4292
4293 rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, NULL,
4294 server, CIFS_ECHO_OP, NULL);
4295 if (rc)
4296 cifs_dbg(FYI, "Echo request failed: %d\n", rc);
4297
4298 cifs_small_buf_release(req);
4299 return rc;
4300 }
4301
4302 void
SMB2_flush_free(struct smb_rqst * rqst)4303 SMB2_flush_free(struct smb_rqst *rqst)
4304 {
4305 if (rqst && rqst->rq_iov)
4306 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
4307 }
4308
4309 int
SMB2_flush_init(const unsigned int xid,struct smb_rqst * rqst,struct cifs_tcon * tcon,struct TCP_Server_Info * server,u64 persistent_fid,u64 volatile_fid)4310 SMB2_flush_init(const unsigned int xid, struct smb_rqst *rqst,
4311 struct cifs_tcon *tcon, struct TCP_Server_Info *server,
4312 u64 persistent_fid, u64 volatile_fid)
4313 {
4314 struct smb2_flush_req *req;
4315 struct kvec *iov = rqst->rq_iov;
4316 unsigned int total_len;
4317 int rc;
4318
4319 rc = smb2_plain_req_init(SMB2_FLUSH, tcon, server,
4320 (void **) &req, &total_len);
4321 if (rc)
4322 return rc;
4323
4324 req->PersistentFileId = persistent_fid;
4325 req->VolatileFileId = volatile_fid;
4326
4327 iov[0].iov_base = (char *)req;
4328 iov[0].iov_len = total_len;
4329
4330 return 0;
4331 }
4332
4333 int
SMB2_flush(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid)4334 SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
4335 u64 volatile_fid)
4336 {
4337 struct cifs_ses *ses = tcon->ses;
4338 struct smb_rqst rqst;
4339 struct kvec iov[1];
4340 struct kvec rsp_iov = {NULL, 0};
4341 struct TCP_Server_Info *server;
4342 int resp_buftype = CIFS_NO_BUFFER;
4343 int flags = 0;
4344 int rc = 0;
4345 int retries = 0, cur_sleep = 1;
4346
4347 replay_again:
4348 /* reinitialize for possible replay */
4349 flags = 0;
4350 server = cifs_pick_channel(ses);
4351
4352 cifs_dbg(FYI, "flush\n");
4353 if (!ses || !(ses->server))
4354 return -EIO;
4355
4356 if (smb3_encryption_required(tcon))
4357 flags |= CIFS_TRANSFORM_REQ;
4358
4359 memset(&rqst, 0, sizeof(struct smb_rqst));
4360 memset(&iov, 0, sizeof(iov));
4361 rqst.rq_iov = iov;
4362 rqst.rq_nvec = 1;
4363
4364 rc = SMB2_flush_init(xid, &rqst, tcon, server,
4365 persistent_fid, volatile_fid);
4366 if (rc)
4367 goto flush_exit;
4368
4369 trace_smb3_flush_enter(xid, persistent_fid, tcon->tid, ses->Suid);
4370
4371 if (retries)
4372 smb2_set_replay(server, &rqst);
4373
4374 rc = cifs_send_recv(xid, ses, server,
4375 &rqst, &resp_buftype, flags, &rsp_iov);
4376
4377 if (rc != 0) {
4378 cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE);
4379 trace_smb3_flush_err(xid, persistent_fid, tcon->tid, ses->Suid,
4380 rc);
4381 } else
4382 trace_smb3_flush_done(xid, persistent_fid, tcon->tid,
4383 ses->Suid);
4384
4385 flush_exit:
4386 SMB2_flush_free(&rqst);
4387 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
4388
4389 if (is_replayable_error(rc) &&
4390 smb2_should_replay(tcon, &retries, &cur_sleep))
4391 goto replay_again;
4392
4393 return rc;
4394 }
4395
4396 #ifdef CONFIG_CIFS_SMB_DIRECT
smb3_use_rdma_offload(struct cifs_io_parms * io_parms)4397 static inline bool smb3_use_rdma_offload(struct cifs_io_parms *io_parms)
4398 {
4399 struct TCP_Server_Info *server = io_parms->server;
4400 struct cifs_tcon *tcon = io_parms->tcon;
4401
4402 /* we can only offload if we're connected */
4403 if (!server || !tcon)
4404 return false;
4405
4406 /* we can only offload on an rdma connection */
4407 if (!server->rdma || !server->smbd_conn)
4408 return false;
4409
4410 /* we don't support signed offload yet */
4411 if (server->sign)
4412 return false;
4413
4414 /* we don't support encrypted offload yet */
4415 if (smb3_encryption_required(tcon))
4416 return false;
4417
4418 /* offload also has its overhead, so only do it if desired */
4419 if (io_parms->length < server->smbd_conn->rdma_readwrite_threshold)
4420 return false;
4421
4422 return true;
4423 }
4424 #endif /* CONFIG_CIFS_SMB_DIRECT */
4425
4426 /*
4427 * To form a chain of read requests, any read requests after the first should
4428 * have the end_of_chain boolean set to true.
4429 */
4430 static int
smb2_new_read_req(void ** buf,unsigned int * total_len,struct cifs_io_parms * io_parms,struct cifs_io_subrequest * rdata,unsigned int remaining_bytes,int request_type)4431 smb2_new_read_req(void **buf, unsigned int *total_len,
4432 struct cifs_io_parms *io_parms, struct cifs_io_subrequest *rdata,
4433 unsigned int remaining_bytes, int request_type)
4434 {
4435 int rc = -EACCES;
4436 struct smb2_read_req *req = NULL;
4437 struct smb2_hdr *shdr;
4438 struct TCP_Server_Info *server = io_parms->server;
4439
4440 rc = smb2_plain_req_init(SMB2_READ, io_parms->tcon, server,
4441 (void **) &req, total_len);
4442 if (rc)
4443 return rc;
4444
4445 if (server == NULL)
4446 return -ECONNABORTED;
4447
4448 shdr = &req->hdr;
4449 shdr->Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid);
4450
4451 req->PersistentFileId = io_parms->persistent_fid;
4452 req->VolatileFileId = io_parms->volatile_fid;
4453 req->ReadChannelInfoOffset = 0; /* reserved */
4454 req->ReadChannelInfoLength = 0; /* reserved */
4455 req->Channel = 0; /* reserved */
4456 req->MinimumCount = 0;
4457 req->Length = cpu_to_le32(io_parms->length);
4458 req->Offset = cpu_to_le64(io_parms->offset);
4459
4460 trace_smb3_read_enter(rdata ? rdata->rreq->debug_id : 0,
4461 rdata ? rdata->subreq.debug_index : 0,
4462 rdata ? rdata->xid : 0,
4463 io_parms->persistent_fid,
4464 io_parms->tcon->tid, io_parms->tcon->ses->Suid,
4465 io_parms->offset, io_parms->length);
4466 #ifdef CONFIG_CIFS_SMB_DIRECT
4467 /*
4468 * If we want to do a RDMA write, fill in and append
4469 * smbd_buffer_descriptor_v1 to the end of read request
4470 */
4471 if (rdata && smb3_use_rdma_offload(io_parms)) {
4472 struct smbd_buffer_descriptor_v1 *v1;
4473 bool need_invalidate = server->dialect == SMB30_PROT_ID;
4474
4475 rdata->mr = smbd_register_mr(server->smbd_conn, &rdata->subreq.io_iter,
4476 true, need_invalidate);
4477 if (!rdata->mr)
4478 return -EAGAIN;
4479
4480 req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE;
4481 if (need_invalidate)
4482 req->Channel = SMB2_CHANNEL_RDMA_V1;
4483 req->ReadChannelInfoOffset =
4484 cpu_to_le16(offsetof(struct smb2_read_req, Buffer));
4485 req->ReadChannelInfoLength =
4486 cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1));
4487 v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0];
4488 v1->offset = cpu_to_le64(rdata->mr->mr->iova);
4489 v1->token = cpu_to_le32(rdata->mr->mr->rkey);
4490 v1->length = cpu_to_le32(rdata->mr->mr->length);
4491
4492 *total_len += sizeof(*v1) - 1;
4493 }
4494 #endif
4495 if (request_type & CHAINED_REQUEST) {
4496 if (!(request_type & END_OF_CHAIN)) {
4497 /* next 8-byte aligned request */
4498 *total_len = ALIGN(*total_len, 8);
4499 shdr->NextCommand = cpu_to_le32(*total_len);
4500 } else /* END_OF_CHAIN */
4501 shdr->NextCommand = 0;
4502 if (request_type & RELATED_REQUEST) {
4503 shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
4504 /*
4505 * Related requests use info from previous read request
4506 * in chain.
4507 */
4508 shdr->SessionId = cpu_to_le64(0xFFFFFFFFFFFFFFFF);
4509 shdr->Id.SyncId.TreeId = cpu_to_le32(0xFFFFFFFF);
4510 req->PersistentFileId = (u64)-1;
4511 req->VolatileFileId = (u64)-1;
4512 }
4513 }
4514 if (remaining_bytes > io_parms->length)
4515 req->RemainingBytes = cpu_to_le32(remaining_bytes);
4516 else
4517 req->RemainingBytes = 0;
4518
4519 *buf = req;
4520 return rc;
4521 }
4522
smb2_readv_worker(struct work_struct * work)4523 static void smb2_readv_worker(struct work_struct *work)
4524 {
4525 struct cifs_io_subrequest *rdata =
4526 container_of(work, struct cifs_io_subrequest, subreq.work);
4527
4528 netfs_read_subreq_terminated(&rdata->subreq, rdata->result, false);
4529 }
4530
4531 static void
smb2_readv_callback(struct mid_q_entry * mid)4532 smb2_readv_callback(struct mid_q_entry *mid)
4533 {
4534 struct cifs_io_subrequest *rdata = mid->callback_data;
4535 struct netfs_inode *ictx = netfs_inode(rdata->rreq->inode);
4536 struct cifs_tcon *tcon = tlink_tcon(rdata->req->cfile->tlink);
4537 struct TCP_Server_Info *server = rdata->server;
4538 struct smb2_hdr *shdr =
4539 (struct smb2_hdr *)rdata->iov[0].iov_base;
4540 struct cifs_credits credits = {
4541 .value = 0,
4542 .instance = 0,
4543 .rreq_debug_id = rdata->rreq->debug_id,
4544 .rreq_debug_index = rdata->subreq.debug_index,
4545 };
4546 struct smb_rqst rqst = { .rq_iov = &rdata->iov[1], .rq_nvec = 1 };
4547 unsigned int rreq_debug_id = rdata->rreq->debug_id;
4548 unsigned int subreq_debug_index = rdata->subreq.debug_index;
4549
4550 if (rdata->got_bytes) {
4551 rqst.rq_iter = rdata->subreq.io_iter;
4552 }
4553
4554 WARN_ONCE(rdata->server != mid->server,
4555 "rdata server %p != mid server %p",
4556 rdata->server, mid->server);
4557
4558 cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%zu/%zu\n",
4559 __func__, mid->mid, mid->mid_state, rdata->result,
4560 rdata->got_bytes, rdata->subreq.len - rdata->subreq.transferred);
4561
4562 switch (mid->mid_state) {
4563 case MID_RESPONSE_RECEIVED:
4564 credits.value = le16_to_cpu(shdr->CreditRequest);
4565 credits.instance = server->reconnect_instance;
4566 /* result already set, check signature */
4567 if (server->sign && !mid->decrypted) {
4568 int rc;
4569
4570 iov_iter_truncate(&rqst.rq_iter, rdata->got_bytes);
4571 rc = smb2_verify_signature(&rqst, server);
4572 if (rc)
4573 cifs_tcon_dbg(VFS, "SMB signature verification returned error = %d\n",
4574 rc);
4575 }
4576 /* FIXME: should this be counted toward the initiating task? */
4577 task_io_account_read(rdata->got_bytes);
4578 cifs_stats_bytes_read(tcon, rdata->got_bytes);
4579 break;
4580 case MID_REQUEST_SUBMITTED:
4581 case MID_RETRY_NEEDED:
4582 __set_bit(NETFS_SREQ_NEED_RETRY, &rdata->subreq.flags);
4583 rdata->result = -EAGAIN;
4584 if (server->sign && rdata->got_bytes)
4585 /* reset bytes number since we can not check a sign */
4586 rdata->got_bytes = 0;
4587 /* FIXME: should this be counted toward the initiating task? */
4588 task_io_account_read(rdata->got_bytes);
4589 cifs_stats_bytes_read(tcon, rdata->got_bytes);
4590 break;
4591 case MID_RESPONSE_MALFORMED:
4592 credits.value = le16_to_cpu(shdr->CreditRequest);
4593 credits.instance = server->reconnect_instance;
4594 fallthrough;
4595 default:
4596 rdata->result = -EIO;
4597 }
4598 #ifdef CONFIG_CIFS_SMB_DIRECT
4599 /*
4600 * If this rdata has a memory registered, the MR can be freed
4601 * MR needs to be freed as soon as I/O finishes to prevent deadlock
4602 * because they have limited number and are used for future I/Os
4603 */
4604 if (rdata->mr) {
4605 smbd_deregister_mr(rdata->mr);
4606 rdata->mr = NULL;
4607 }
4608 #endif
4609 if (rdata->result && rdata->result != -ENODATA) {
4610 cifs_stats_fail_inc(tcon, SMB2_READ_HE);
4611 trace_smb3_read_err(rdata->rreq->debug_id,
4612 rdata->subreq.debug_index,
4613 rdata->xid,
4614 rdata->req->cfile->fid.persistent_fid,
4615 tcon->tid, tcon->ses->Suid,
4616 rdata->subreq.start + rdata->subreq.transferred,
4617 rdata->subreq.len - rdata->subreq.transferred,
4618 rdata->result);
4619 } else
4620 trace_smb3_read_done(rdata->rreq->debug_id,
4621 rdata->subreq.debug_index,
4622 rdata->xid,
4623 rdata->req->cfile->fid.persistent_fid,
4624 tcon->tid, tcon->ses->Suid,
4625 rdata->subreq.start + rdata->subreq.transferred,
4626 rdata->got_bytes);
4627
4628 if (rdata->result == -ENODATA) {
4629 __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
4630 rdata->result = 0;
4631 } else {
4632 size_t trans = rdata->subreq.transferred + rdata->got_bytes;
4633 if (trans < rdata->subreq.len &&
4634 rdata->subreq.start + trans == ictx->remote_i_size) {
4635 __set_bit(NETFS_SREQ_HIT_EOF, &rdata->subreq.flags);
4636 rdata->result = 0;
4637 }
4638 }
4639 trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, rdata->credits.value,
4640 server->credits, server->in_flight,
4641 0, cifs_trace_rw_credits_read_response_clear);
4642 rdata->credits.value = 0;
4643 rdata->subreq.transferred += rdata->got_bytes;
4644 trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_progress);
4645 INIT_WORK(&rdata->subreq.work, smb2_readv_worker);
4646 queue_work(cifsiod_wq, &rdata->subreq.work);
4647 release_mid(mid);
4648 trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, 0,
4649 server->credits, server->in_flight,
4650 credits.value, cifs_trace_rw_credits_read_response_add);
4651 add_credits(server, &credits, 0);
4652 }
4653
4654 /* smb2_async_readv - send an async read, and set up mid to handle result */
4655 int
smb2_async_readv(struct cifs_io_subrequest * rdata)4656 smb2_async_readv(struct cifs_io_subrequest *rdata)
4657 {
4658 int rc, flags = 0;
4659 char *buf;
4660 struct netfs_io_subrequest *subreq = &rdata->subreq;
4661 struct smb2_hdr *shdr;
4662 struct cifs_io_parms io_parms;
4663 struct smb_rqst rqst = { .rq_iov = rdata->iov,
4664 .rq_nvec = 1 };
4665 struct TCP_Server_Info *server;
4666 struct cifs_tcon *tcon = tlink_tcon(rdata->req->cfile->tlink);
4667 unsigned int total_len;
4668 int credit_request;
4669
4670 cifs_dbg(FYI, "%s: offset=%llu bytes=%zu\n",
4671 __func__, subreq->start, subreq->len);
4672
4673 if (!rdata->server)
4674 rdata->server = cifs_pick_channel(tcon->ses);
4675
4676 io_parms.tcon = tlink_tcon(rdata->req->cfile->tlink);
4677 io_parms.server = server = rdata->server;
4678 io_parms.offset = subreq->start + subreq->transferred;
4679 io_parms.length = subreq->len - subreq->transferred;
4680 io_parms.persistent_fid = rdata->req->cfile->fid.persistent_fid;
4681 io_parms.volatile_fid = rdata->req->cfile->fid.volatile_fid;
4682 io_parms.pid = rdata->req->pid;
4683
4684 rc = smb2_new_read_req(
4685 (void **) &buf, &total_len, &io_parms, rdata, 0, 0);
4686 if (rc)
4687 return rc;
4688
4689 if (smb3_encryption_required(io_parms.tcon))
4690 flags |= CIFS_TRANSFORM_REQ;
4691
4692 rdata->iov[0].iov_base = buf;
4693 rdata->iov[0].iov_len = total_len;
4694 rdata->got_bytes = 0;
4695 rdata->result = 0;
4696
4697 shdr = (struct smb2_hdr *)buf;
4698
4699 if (rdata->credits.value > 0) {
4700 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(io_parms.length,
4701 SMB2_MAX_BUFFER_SIZE));
4702 credit_request = le16_to_cpu(shdr->CreditCharge) + 8;
4703 if (server->credits >= server->max_credits)
4704 shdr->CreditRequest = cpu_to_le16(0);
4705 else
4706 shdr->CreditRequest = cpu_to_le16(
4707 min_t(int, server->max_credits -
4708 server->credits, credit_request));
4709
4710 rc = adjust_credits(server, rdata, cifs_trace_rw_credits_call_readv_adjust);
4711 if (rc)
4712 goto async_readv_out;
4713
4714 flags |= CIFS_HAS_CREDITS;
4715 }
4716
4717 rc = cifs_call_async(server, &rqst,
4718 cifs_readv_receive, smb2_readv_callback,
4719 smb3_handle_read_data, rdata, flags,
4720 &rdata->credits);
4721 if (rc) {
4722 cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
4723 trace_smb3_read_err(rdata->rreq->debug_id,
4724 subreq->debug_index,
4725 rdata->xid, io_parms.persistent_fid,
4726 io_parms.tcon->tid,
4727 io_parms.tcon->ses->Suid,
4728 io_parms.offset,
4729 subreq->len - subreq->transferred, rc);
4730 }
4731
4732 async_readv_out:
4733 cifs_small_buf_release(buf);
4734 return rc;
4735 }
4736
4737 int
SMB2_read(const unsigned int xid,struct cifs_io_parms * io_parms,unsigned int * nbytes,char ** buf,int * buf_type)4738 SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
4739 unsigned int *nbytes, char **buf, int *buf_type)
4740 {
4741 struct smb_rqst rqst;
4742 int resp_buftype, rc;
4743 struct smb2_read_req *req = NULL;
4744 struct smb2_read_rsp *rsp = NULL;
4745 struct kvec iov[1];
4746 struct kvec rsp_iov;
4747 unsigned int total_len;
4748 int flags = CIFS_LOG_ERROR;
4749 struct cifs_ses *ses = io_parms->tcon->ses;
4750
4751 if (!io_parms->server)
4752 io_parms->server = cifs_pick_channel(io_parms->tcon->ses);
4753
4754 *nbytes = 0;
4755 rc = smb2_new_read_req((void **)&req, &total_len, io_parms, NULL, 0, 0);
4756 if (rc)
4757 return rc;
4758
4759 if (smb3_encryption_required(io_parms->tcon))
4760 flags |= CIFS_TRANSFORM_REQ;
4761
4762 iov[0].iov_base = (char *)req;
4763 iov[0].iov_len = total_len;
4764
4765 memset(&rqst, 0, sizeof(struct smb_rqst));
4766 rqst.rq_iov = iov;
4767 rqst.rq_nvec = 1;
4768
4769 rc = cifs_send_recv(xid, ses, io_parms->server,
4770 &rqst, &resp_buftype, flags, &rsp_iov);
4771 rsp = (struct smb2_read_rsp *)rsp_iov.iov_base;
4772
4773 if (rc) {
4774 if (rc != -ENODATA) {
4775 cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
4776 cifs_dbg(VFS, "Send error in read = %d\n", rc);
4777 trace_smb3_read_err(0, 0, xid,
4778 req->PersistentFileId,
4779 io_parms->tcon->tid, ses->Suid,
4780 io_parms->offset, io_parms->length,
4781 rc);
4782 } else
4783 trace_smb3_read_done(0, 0, xid,
4784 req->PersistentFileId, io_parms->tcon->tid,
4785 ses->Suid, io_parms->offset, 0);
4786 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
4787 cifs_small_buf_release(req);
4788 return rc == -ENODATA ? 0 : rc;
4789 } else
4790 trace_smb3_read_done(0, 0, xid,
4791 req->PersistentFileId,
4792 io_parms->tcon->tid, ses->Suid,
4793 io_parms->offset, io_parms->length);
4794
4795 cifs_small_buf_release(req);
4796
4797 *nbytes = le32_to_cpu(rsp->DataLength);
4798 if ((*nbytes > CIFS_MAX_MSGSIZE) ||
4799 (*nbytes > io_parms->length)) {
4800 cifs_dbg(FYI, "bad length %d for count %d\n",
4801 *nbytes, io_parms->length);
4802 rc = -EIO;
4803 *nbytes = 0;
4804 }
4805
4806 if (*buf) {
4807 memcpy(*buf, (char *)rsp + rsp->DataOffset, *nbytes);
4808 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
4809 } else if (resp_buftype != CIFS_NO_BUFFER) {
4810 *buf = rsp_iov.iov_base;
4811 if (resp_buftype == CIFS_SMALL_BUFFER)
4812 *buf_type = CIFS_SMALL_BUFFER;
4813 else if (resp_buftype == CIFS_LARGE_BUFFER)
4814 *buf_type = CIFS_LARGE_BUFFER;
4815 }
4816 return rc;
4817 }
4818
4819 /*
4820 * Check the mid_state and signature on received buffer (if any), and queue the
4821 * workqueue completion task.
4822 */
4823 static void
smb2_writev_callback(struct mid_q_entry * mid)4824 smb2_writev_callback(struct mid_q_entry *mid)
4825 {
4826 struct cifs_io_subrequest *wdata = mid->callback_data;
4827 struct cifs_tcon *tcon = tlink_tcon(wdata->req->cfile->tlink);
4828 struct TCP_Server_Info *server = wdata->server;
4829 struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
4830 struct cifs_credits credits = {
4831 .value = 0,
4832 .instance = 0,
4833 .rreq_debug_id = wdata->rreq->debug_id,
4834 .rreq_debug_index = wdata->subreq.debug_index,
4835 };
4836 unsigned int rreq_debug_id = wdata->rreq->debug_id;
4837 unsigned int subreq_debug_index = wdata->subreq.debug_index;
4838 ssize_t result = 0;
4839 size_t written;
4840
4841 WARN_ONCE(wdata->server != mid->server,
4842 "wdata server %p != mid server %p",
4843 wdata->server, mid->server);
4844
4845 switch (mid->mid_state) {
4846 case MID_RESPONSE_RECEIVED:
4847 credits.value = le16_to_cpu(rsp->hdr.CreditRequest);
4848 credits.instance = server->reconnect_instance;
4849 result = smb2_check_receive(mid, server, 0);
4850 if (result != 0)
4851 break;
4852
4853 written = le32_to_cpu(rsp->DataLength);
4854 /*
4855 * Mask off high 16 bits when bytes written as returned
4856 * by the server is greater than bytes requested by the
4857 * client. OS/2 servers are known to set incorrect
4858 * CountHigh values.
4859 */
4860 if (written > wdata->subreq.len)
4861 written &= 0xFFFF;
4862
4863 cifs_stats_bytes_written(tcon, written);
4864
4865 if (written < wdata->subreq.len)
4866 wdata->result = -ENOSPC;
4867 else
4868 wdata->subreq.len = written;
4869 break;
4870 case MID_REQUEST_SUBMITTED:
4871 case MID_RETRY_NEEDED:
4872 __set_bit(NETFS_SREQ_NEED_RETRY, &wdata->subreq.flags);
4873 result = -EAGAIN;
4874 break;
4875 case MID_RESPONSE_MALFORMED:
4876 credits.value = le16_to_cpu(rsp->hdr.CreditRequest);
4877 credits.instance = server->reconnect_instance;
4878 fallthrough;
4879 default:
4880 result = -EIO;
4881 break;
4882 }
4883 #ifdef CONFIG_CIFS_SMB_DIRECT
4884 /*
4885 * If this wdata has a memory registered, the MR can be freed
4886 * The number of MRs available is limited, it's important to recover
4887 * used MR as soon as I/O is finished. Hold MR longer in the later
4888 * I/O process can possibly result in I/O deadlock due to lack of MR
4889 * to send request on I/O retry
4890 */
4891 if (wdata->mr) {
4892 smbd_deregister_mr(wdata->mr);
4893 wdata->mr = NULL;
4894 }
4895 #endif
4896 if (result) {
4897 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
4898 trace_smb3_write_err(wdata->rreq->debug_id,
4899 wdata->subreq.debug_index,
4900 wdata->xid,
4901 wdata->req->cfile->fid.persistent_fid,
4902 tcon->tid, tcon->ses->Suid, wdata->subreq.start,
4903 wdata->subreq.len, wdata->result);
4904 if (wdata->result == -ENOSPC)
4905 pr_warn_once("Out of space writing to %s\n",
4906 tcon->tree_name);
4907 } else
4908 trace_smb3_write_done(wdata->rreq->debug_id,
4909 wdata->subreq.debug_index,
4910 wdata->xid,
4911 wdata->req->cfile->fid.persistent_fid,
4912 tcon->tid, tcon->ses->Suid,
4913 wdata->subreq.start, wdata->subreq.len);
4914
4915 trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, wdata->credits.value,
4916 server->credits, server->in_flight,
4917 0, cifs_trace_rw_credits_write_response_clear);
4918 wdata->credits.value = 0;
4919 trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_progress);
4920 cifs_write_subrequest_terminated(wdata, result ?: written, true);
4921 release_mid(mid);
4922 trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, 0,
4923 server->credits, server->in_flight,
4924 credits.value, cifs_trace_rw_credits_write_response_add);
4925 add_credits(server, &credits, 0);
4926 }
4927
4928 /* smb2_async_writev - send an async write, and set up mid to handle result */
4929 void
smb2_async_writev(struct cifs_io_subrequest * wdata)4930 smb2_async_writev(struct cifs_io_subrequest *wdata)
4931 {
4932 int rc = -EACCES, flags = 0;
4933 struct smb2_write_req *req = NULL;
4934 struct smb2_hdr *shdr;
4935 struct cifs_tcon *tcon = tlink_tcon(wdata->req->cfile->tlink);
4936 struct TCP_Server_Info *server = wdata->server;
4937 struct kvec iov[1];
4938 struct smb_rqst rqst = { };
4939 unsigned int total_len, xid = wdata->xid;
4940 struct cifs_io_parms _io_parms;
4941 struct cifs_io_parms *io_parms = NULL;
4942 int credit_request;
4943
4944 /*
4945 * in future we may get cifs_io_parms passed in from the caller,
4946 * but for now we construct it here...
4947 */
4948 _io_parms = (struct cifs_io_parms) {
4949 .tcon = tcon,
4950 .server = server,
4951 .offset = wdata->subreq.start,
4952 .length = wdata->subreq.len,
4953 .persistent_fid = wdata->req->cfile->fid.persistent_fid,
4954 .volatile_fid = wdata->req->cfile->fid.volatile_fid,
4955 .pid = wdata->req->pid,
4956 };
4957 io_parms = &_io_parms;
4958
4959 rc = smb2_plain_req_init(SMB2_WRITE, tcon, server,
4960 (void **) &req, &total_len);
4961 if (rc)
4962 goto out;
4963
4964 rqst.rq_iov = iov;
4965 rqst.rq_iter = wdata->subreq.io_iter;
4966
4967 rqst.rq_iov[0].iov_len = total_len - 1;
4968 rqst.rq_iov[0].iov_base = (char *)req;
4969 rqst.rq_nvec += 1;
4970
4971 if (smb3_encryption_required(tcon))
4972 flags |= CIFS_TRANSFORM_REQ;
4973
4974 shdr = (struct smb2_hdr *)req;
4975 shdr->Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid);
4976
4977 req->PersistentFileId = io_parms->persistent_fid;
4978 req->VolatileFileId = io_parms->volatile_fid;
4979 req->WriteChannelInfoOffset = 0;
4980 req->WriteChannelInfoLength = 0;
4981 req->Channel = SMB2_CHANNEL_NONE;
4982 req->Length = cpu_to_le32(io_parms->length);
4983 req->Offset = cpu_to_le64(io_parms->offset);
4984 req->DataOffset = cpu_to_le16(
4985 offsetof(struct smb2_write_req, Buffer));
4986 req->RemainingBytes = 0;
4987
4988 trace_smb3_write_enter(wdata->rreq->debug_id,
4989 wdata->subreq.debug_index,
4990 wdata->xid,
4991 io_parms->persistent_fid,
4992 io_parms->tcon->tid,
4993 io_parms->tcon->ses->Suid,
4994 io_parms->offset,
4995 io_parms->length);
4996
4997 #ifdef CONFIG_CIFS_SMB_DIRECT
4998 /*
4999 * If we want to do a server RDMA read, fill in and append
5000 * smbd_buffer_descriptor_v1 to the end of write request
5001 */
5002 if (smb3_use_rdma_offload(io_parms)) {
5003 struct smbd_buffer_descriptor_v1 *v1;
5004 bool need_invalidate = server->dialect == SMB30_PROT_ID;
5005
5006 wdata->mr = smbd_register_mr(server->smbd_conn, &wdata->subreq.io_iter,
5007 false, need_invalidate);
5008 if (!wdata->mr) {
5009 rc = -EAGAIN;
5010 goto async_writev_out;
5011 }
5012 /* For RDMA read, I/O size is in RemainingBytes not in Length */
5013 req->RemainingBytes = req->Length;
5014 req->Length = 0;
5015 req->DataOffset = 0;
5016 req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE;
5017 if (need_invalidate)
5018 req->Channel = SMB2_CHANNEL_RDMA_V1;
5019 req->WriteChannelInfoOffset =
5020 cpu_to_le16(offsetof(struct smb2_write_req, Buffer));
5021 req->WriteChannelInfoLength =
5022 cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1));
5023 v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0];
5024 v1->offset = cpu_to_le64(wdata->mr->mr->iova);
5025 v1->token = cpu_to_le32(wdata->mr->mr->rkey);
5026 v1->length = cpu_to_le32(wdata->mr->mr->length);
5027
5028 rqst.rq_iov[0].iov_len += sizeof(*v1);
5029
5030 /*
5031 * We keep wdata->subreq.io_iter,
5032 * but we have to truncate rqst.rq_iter
5033 */
5034 iov_iter_truncate(&rqst.rq_iter, 0);
5035 }
5036 #endif
5037
5038 if (test_bit(NETFS_SREQ_RETRYING, &wdata->subreq.flags))
5039 smb2_set_replay(server, &rqst);
5040
5041 cifs_dbg(FYI, "async write at %llu %u bytes iter=%zx\n",
5042 io_parms->offset, io_parms->length, iov_iter_count(&wdata->subreq.io_iter));
5043
5044 if (wdata->credits.value > 0) {
5045 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->subreq.len,
5046 SMB2_MAX_BUFFER_SIZE));
5047 credit_request = le16_to_cpu(shdr->CreditCharge) + 8;
5048 if (server->credits >= server->max_credits)
5049 shdr->CreditRequest = cpu_to_le16(0);
5050 else
5051 shdr->CreditRequest = cpu_to_le16(
5052 min_t(int, server->max_credits -
5053 server->credits, credit_request));
5054
5055 rc = adjust_credits(server, wdata, cifs_trace_rw_credits_call_writev_adjust);
5056 if (rc)
5057 goto async_writev_out;
5058
5059 flags |= CIFS_HAS_CREDITS;
5060 }
5061
5062 /* XXX: compression + encryption is unsupported for now */
5063 if (((flags & CIFS_TRANSFORM_REQ) != CIFS_TRANSFORM_REQ) && should_compress(tcon, &rqst))
5064 flags |= CIFS_COMPRESS_REQ;
5065
5066 rc = cifs_call_async(server, &rqst, NULL, smb2_writev_callback, NULL,
5067 wdata, flags, &wdata->credits);
5068 /* Can't touch wdata if rc == 0 */
5069 if (rc) {
5070 trace_smb3_write_err(wdata->rreq->debug_id,
5071 wdata->subreq.debug_index,
5072 xid,
5073 io_parms->persistent_fid,
5074 io_parms->tcon->tid,
5075 io_parms->tcon->ses->Suid,
5076 io_parms->offset,
5077 io_parms->length,
5078 rc);
5079 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
5080 }
5081
5082 async_writev_out:
5083 cifs_small_buf_release(req);
5084 out:
5085 if (rc) {
5086 trace_smb3_rw_credits(wdata->rreq->debug_id,
5087 wdata->subreq.debug_index,
5088 wdata->credits.value,
5089 server->credits, server->in_flight,
5090 -(int)wdata->credits.value,
5091 cifs_trace_rw_credits_write_response_clear);
5092 add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
5093 cifs_write_subrequest_terminated(wdata, rc, true);
5094 }
5095 }
5096
5097 /*
5098 * SMB2_write function gets iov pointer to kvec array with n_vec as a length.
5099 * The length field from io_parms must be at least 1 and indicates a number of
5100 * elements with data to write that begins with position 1 in iov array. All
5101 * data length is specified by count.
5102 */
5103 int
SMB2_write(const unsigned int xid,struct cifs_io_parms * io_parms,unsigned int * nbytes,struct kvec * iov,int n_vec)5104 SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
5105 unsigned int *nbytes, struct kvec *iov, int n_vec)
5106 {
5107 struct smb_rqst rqst;
5108 int rc = 0;
5109 struct smb2_write_req *req = NULL;
5110 struct smb2_write_rsp *rsp = NULL;
5111 int resp_buftype;
5112 struct kvec rsp_iov;
5113 int flags = 0;
5114 unsigned int total_len;
5115 struct TCP_Server_Info *server;
5116 int retries = 0, cur_sleep = 1;
5117
5118 replay_again:
5119 /* reinitialize for possible replay */
5120 flags = 0;
5121 *nbytes = 0;
5122 if (!io_parms->server)
5123 io_parms->server = cifs_pick_channel(io_parms->tcon->ses);
5124 server = io_parms->server;
5125 if (server == NULL)
5126 return -ECONNABORTED;
5127
5128 if (n_vec < 1)
5129 return rc;
5130
5131 rc = smb2_plain_req_init(SMB2_WRITE, io_parms->tcon, server,
5132 (void **) &req, &total_len);
5133 if (rc)
5134 return rc;
5135
5136 if (smb3_encryption_required(io_parms->tcon))
5137 flags |= CIFS_TRANSFORM_REQ;
5138
5139 req->hdr.Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid);
5140
5141 req->PersistentFileId = io_parms->persistent_fid;
5142 req->VolatileFileId = io_parms->volatile_fid;
5143 req->WriteChannelInfoOffset = 0;
5144 req->WriteChannelInfoLength = 0;
5145 req->Channel = 0;
5146 req->Length = cpu_to_le32(io_parms->length);
5147 req->Offset = cpu_to_le64(io_parms->offset);
5148 req->DataOffset = cpu_to_le16(
5149 offsetof(struct smb2_write_req, Buffer));
5150 req->RemainingBytes = 0;
5151
5152 trace_smb3_write_enter(0, 0, xid, io_parms->persistent_fid,
5153 io_parms->tcon->tid, io_parms->tcon->ses->Suid,
5154 io_parms->offset, io_parms->length);
5155
5156 iov[0].iov_base = (char *)req;
5157 /* 1 for Buffer */
5158 iov[0].iov_len = total_len - 1;
5159
5160 memset(&rqst, 0, sizeof(struct smb_rqst));
5161 rqst.rq_iov = iov;
5162 rqst.rq_nvec = n_vec + 1;
5163
5164 if (retries)
5165 smb2_set_replay(server, &rqst);
5166
5167 rc = cifs_send_recv(xid, io_parms->tcon->ses, server,
5168 &rqst,
5169 &resp_buftype, flags, &rsp_iov);
5170 rsp = (struct smb2_write_rsp *)rsp_iov.iov_base;
5171
5172 if (rc) {
5173 trace_smb3_write_err(0, 0, xid,
5174 req->PersistentFileId,
5175 io_parms->tcon->tid,
5176 io_parms->tcon->ses->Suid,
5177 io_parms->offset, io_parms->length, rc);
5178 cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE);
5179 cifs_dbg(VFS, "Send error in write = %d\n", rc);
5180 } else {
5181 *nbytes = le32_to_cpu(rsp->DataLength);
5182 cifs_stats_bytes_written(io_parms->tcon, *nbytes);
5183 trace_smb3_write_done(0, 0, xid,
5184 req->PersistentFileId,
5185 io_parms->tcon->tid,
5186 io_parms->tcon->ses->Suid,
5187 io_parms->offset, *nbytes);
5188 }
5189
5190 cifs_small_buf_release(req);
5191 free_rsp_buf(resp_buftype, rsp);
5192
5193 if (is_replayable_error(rc) &&
5194 smb2_should_replay(io_parms->tcon, &retries, &cur_sleep))
5195 goto replay_again;
5196
5197 return rc;
5198 }
5199
posix_info_sid_size(const void * beg,const void * end)5200 int posix_info_sid_size(const void *beg, const void *end)
5201 {
5202 size_t subauth;
5203 int total;
5204
5205 if (beg + 1 > end)
5206 return -1;
5207
5208 subauth = *(u8 *)(beg+1);
5209 if (subauth < 1 || subauth > 15)
5210 return -1;
5211
5212 total = 1 + 1 + 6 + 4*subauth;
5213 if (beg + total > end)
5214 return -1;
5215
5216 return total;
5217 }
5218
posix_info_parse(const void * beg,const void * end,struct smb2_posix_info_parsed * out)5219 int posix_info_parse(const void *beg, const void *end,
5220 struct smb2_posix_info_parsed *out)
5221
5222 {
5223 int total_len = 0;
5224 int owner_len, group_len;
5225 int name_len;
5226 const void *owner_sid;
5227 const void *group_sid;
5228 const void *name;
5229
5230 /* if no end bound given, assume payload to be correct */
5231 if (!end) {
5232 const struct smb2_posix_info *p = beg;
5233
5234 end = beg + le32_to_cpu(p->NextEntryOffset);
5235 /* last element will have a 0 offset, pick a sensible bound */
5236 if (end == beg)
5237 end += 0xFFFF;
5238 }
5239
5240 /* check base buf */
5241 if (beg + sizeof(struct smb2_posix_info) > end)
5242 return -1;
5243 total_len = sizeof(struct smb2_posix_info);
5244
5245 /* check owner sid */
5246 owner_sid = beg + total_len;
5247 owner_len = posix_info_sid_size(owner_sid, end);
5248 if (owner_len < 0)
5249 return -1;
5250 total_len += owner_len;
5251
5252 /* check group sid */
5253 group_sid = beg + total_len;
5254 group_len = posix_info_sid_size(group_sid, end);
5255 if (group_len < 0)
5256 return -1;
5257 total_len += group_len;
5258
5259 /* check name len */
5260 if (beg + total_len + 4 > end)
5261 return -1;
5262 name_len = le32_to_cpu(*(__le32 *)(beg + total_len));
5263 if (name_len < 1 || name_len > 0xFFFF)
5264 return -1;
5265 total_len += 4;
5266
5267 /* check name */
5268 name = beg + total_len;
5269 if (name + name_len > end)
5270 return -1;
5271 total_len += name_len;
5272
5273 if (out) {
5274 out->base = beg;
5275 out->size = total_len;
5276 out->name_len = name_len;
5277 out->name = name;
5278 memcpy(&out->owner, owner_sid, owner_len);
5279 memcpy(&out->group, group_sid, group_len);
5280 }
5281 return total_len;
5282 }
5283
posix_info_extra_size(const void * beg,const void * end)5284 static int posix_info_extra_size(const void *beg, const void *end)
5285 {
5286 int len = posix_info_parse(beg, end, NULL);
5287
5288 if (len < 0)
5289 return -1;
5290 return len - sizeof(struct smb2_posix_info);
5291 }
5292
5293 static unsigned int
num_entries(int infotype,char * bufstart,char * end_of_buf,char ** lastentry,size_t size)5294 num_entries(int infotype, char *bufstart, char *end_of_buf, char **lastentry,
5295 size_t size)
5296 {
5297 int len;
5298 unsigned int entrycount = 0;
5299 unsigned int next_offset = 0;
5300 char *entryptr;
5301 FILE_DIRECTORY_INFO *dir_info;
5302
5303 if (bufstart == NULL)
5304 return 0;
5305
5306 entryptr = bufstart;
5307
5308 while (1) {
5309 if (entryptr + next_offset < entryptr ||
5310 entryptr + next_offset > end_of_buf ||
5311 entryptr + next_offset + size > end_of_buf) {
5312 cifs_dbg(VFS, "malformed search entry would overflow\n");
5313 break;
5314 }
5315
5316 entryptr = entryptr + next_offset;
5317 dir_info = (FILE_DIRECTORY_INFO *)entryptr;
5318
5319 if (infotype == SMB_FIND_FILE_POSIX_INFO)
5320 len = posix_info_extra_size(entryptr, end_of_buf);
5321 else
5322 len = le32_to_cpu(dir_info->FileNameLength);
5323
5324 if (len < 0 ||
5325 entryptr + len < entryptr ||
5326 entryptr + len > end_of_buf ||
5327 entryptr + len + size > end_of_buf) {
5328 cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n",
5329 end_of_buf);
5330 break;
5331 }
5332
5333 *lastentry = entryptr;
5334 entrycount++;
5335
5336 next_offset = le32_to_cpu(dir_info->NextEntryOffset);
5337 if (!next_offset)
5338 break;
5339 }
5340
5341 return entrycount;
5342 }
5343
5344 /*
5345 * Readdir/FindFirst
5346 */
SMB2_query_directory_init(const unsigned int xid,struct cifs_tcon * tcon,struct TCP_Server_Info * server,struct smb_rqst * rqst,u64 persistent_fid,u64 volatile_fid,int index,int info_level)5347 int SMB2_query_directory_init(const unsigned int xid,
5348 struct cifs_tcon *tcon,
5349 struct TCP_Server_Info *server,
5350 struct smb_rqst *rqst,
5351 u64 persistent_fid, u64 volatile_fid,
5352 int index, int info_level)
5353 {
5354 struct smb2_query_directory_req *req;
5355 unsigned char *bufptr;
5356 __le16 asteriks = cpu_to_le16('*');
5357 unsigned int output_size = CIFSMaxBufSize -
5358 MAX_SMB2_CREATE_RESPONSE_SIZE -
5359 MAX_SMB2_CLOSE_RESPONSE_SIZE;
5360 unsigned int total_len;
5361 struct kvec *iov = rqst->rq_iov;
5362 int len, rc;
5363
5364 rc = smb2_plain_req_init(SMB2_QUERY_DIRECTORY, tcon, server,
5365 (void **) &req, &total_len);
5366 if (rc)
5367 return rc;
5368
5369 switch (info_level) {
5370 case SMB_FIND_FILE_DIRECTORY_INFO:
5371 req->FileInformationClass = FILE_DIRECTORY_INFORMATION;
5372 break;
5373 case SMB_FIND_FILE_ID_FULL_DIR_INFO:
5374 req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION;
5375 break;
5376 case SMB_FIND_FILE_POSIX_INFO:
5377 req->FileInformationClass = SMB_FIND_FILE_POSIX_INFO;
5378 break;
5379 case SMB_FIND_FILE_FULL_DIRECTORY_INFO:
5380 req->FileInformationClass = FILE_FULL_DIRECTORY_INFORMATION;
5381 break;
5382 default:
5383 cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
5384 info_level);
5385 return -EINVAL;
5386 }
5387
5388 req->FileIndex = cpu_to_le32(index);
5389 req->PersistentFileId = persistent_fid;
5390 req->VolatileFileId = volatile_fid;
5391
5392 len = 0x2;
5393 bufptr = req->Buffer;
5394 memcpy(bufptr, &asteriks, len);
5395
5396 req->FileNameOffset =
5397 cpu_to_le16(sizeof(struct smb2_query_directory_req));
5398 req->FileNameLength = cpu_to_le16(len);
5399 /*
5400 * BB could be 30 bytes or so longer if we used SMB2 specific
5401 * buffer lengths, but this is safe and close enough.
5402 */
5403 output_size = min_t(unsigned int, output_size, server->maxBuf);
5404 output_size = min_t(unsigned int, output_size, 2 << 15);
5405 req->OutputBufferLength = cpu_to_le32(output_size);
5406
5407 iov[0].iov_base = (char *)req;
5408 /* 1 for Buffer */
5409 iov[0].iov_len = total_len - 1;
5410
5411 iov[1].iov_base = (char *)(req->Buffer);
5412 iov[1].iov_len = len;
5413
5414 trace_smb3_query_dir_enter(xid, persistent_fid, tcon->tid,
5415 tcon->ses->Suid, index, output_size);
5416
5417 return 0;
5418 }
5419
SMB2_query_directory_free(struct smb_rqst * rqst)5420 void SMB2_query_directory_free(struct smb_rqst *rqst)
5421 {
5422 if (rqst && rqst->rq_iov) {
5423 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
5424 }
5425 }
5426
5427 int
smb2_parse_query_directory(struct cifs_tcon * tcon,struct kvec * rsp_iov,int resp_buftype,struct cifs_search_info * srch_inf)5428 smb2_parse_query_directory(struct cifs_tcon *tcon,
5429 struct kvec *rsp_iov,
5430 int resp_buftype,
5431 struct cifs_search_info *srch_inf)
5432 {
5433 struct smb2_query_directory_rsp *rsp;
5434 size_t info_buf_size;
5435 char *end_of_smb;
5436 int rc;
5437
5438 rsp = (struct smb2_query_directory_rsp *)rsp_iov->iov_base;
5439
5440 switch (srch_inf->info_level) {
5441 case SMB_FIND_FILE_DIRECTORY_INFO:
5442 info_buf_size = sizeof(FILE_DIRECTORY_INFO);
5443 break;
5444 case SMB_FIND_FILE_ID_FULL_DIR_INFO:
5445 info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO);
5446 break;
5447 case SMB_FIND_FILE_POSIX_INFO:
5448 /* note that posix payload are variable size */
5449 info_buf_size = sizeof(struct smb2_posix_info);
5450 break;
5451 case SMB_FIND_FILE_FULL_DIRECTORY_INFO:
5452 info_buf_size = sizeof(FILE_FULL_DIRECTORY_INFO);
5453 break;
5454 default:
5455 cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
5456 srch_inf->info_level);
5457 return -EINVAL;
5458 }
5459
5460 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
5461 le32_to_cpu(rsp->OutputBufferLength), rsp_iov,
5462 info_buf_size);
5463 if (rc) {
5464 cifs_tcon_dbg(VFS, "bad info payload");
5465 return rc;
5466 }
5467
5468 srch_inf->unicode = true;
5469
5470 if (srch_inf->ntwrk_buf_start) {
5471 if (srch_inf->smallBuf)
5472 cifs_small_buf_release(srch_inf->ntwrk_buf_start);
5473 else
5474 cifs_buf_release(srch_inf->ntwrk_buf_start);
5475 }
5476 srch_inf->ntwrk_buf_start = (char *)rsp;
5477 srch_inf->srch_entries_start = srch_inf->last_entry =
5478 (char *)rsp + le16_to_cpu(rsp->OutputBufferOffset);
5479 end_of_smb = rsp_iov->iov_len + (char *)rsp;
5480
5481 srch_inf->entries_in_buffer = num_entries(
5482 srch_inf->info_level,
5483 srch_inf->srch_entries_start,
5484 end_of_smb,
5485 &srch_inf->last_entry,
5486 info_buf_size);
5487
5488 srch_inf->index_of_last_entry += srch_inf->entries_in_buffer;
5489 cifs_dbg(FYI, "num entries %d last_index %lld srch start %p srch end %p\n",
5490 srch_inf->entries_in_buffer, srch_inf->index_of_last_entry,
5491 srch_inf->srch_entries_start, srch_inf->last_entry);
5492 if (resp_buftype == CIFS_LARGE_BUFFER)
5493 srch_inf->smallBuf = false;
5494 else if (resp_buftype == CIFS_SMALL_BUFFER)
5495 srch_inf->smallBuf = true;
5496 else
5497 cifs_tcon_dbg(VFS, "Invalid search buffer type\n");
5498
5499 return 0;
5500 }
5501
5502 int
SMB2_query_directory(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,int index,struct cifs_search_info * srch_inf)5503 SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
5504 u64 persistent_fid, u64 volatile_fid, int index,
5505 struct cifs_search_info *srch_inf)
5506 {
5507 struct smb_rqst rqst;
5508 struct kvec iov[SMB2_QUERY_DIRECTORY_IOV_SIZE];
5509 struct smb2_query_directory_rsp *rsp = NULL;
5510 int resp_buftype = CIFS_NO_BUFFER;
5511 struct kvec rsp_iov;
5512 int rc = 0;
5513 struct cifs_ses *ses = tcon->ses;
5514 struct TCP_Server_Info *server;
5515 int flags = 0;
5516 int retries = 0, cur_sleep = 1;
5517
5518 replay_again:
5519 /* reinitialize for possible replay */
5520 flags = 0;
5521 server = cifs_pick_channel(ses);
5522
5523 if (!ses || !(ses->server))
5524 return -EIO;
5525
5526 if (smb3_encryption_required(tcon))
5527 flags |= CIFS_TRANSFORM_REQ;
5528
5529 memset(&rqst, 0, sizeof(struct smb_rqst));
5530 memset(&iov, 0, sizeof(iov));
5531 rqst.rq_iov = iov;
5532 rqst.rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE;
5533
5534 rc = SMB2_query_directory_init(xid, tcon, server,
5535 &rqst, persistent_fid,
5536 volatile_fid, index,
5537 srch_inf->info_level);
5538 if (rc)
5539 goto qdir_exit;
5540
5541 if (retries)
5542 smb2_set_replay(server, &rqst);
5543
5544 rc = cifs_send_recv(xid, ses, server,
5545 &rqst, &resp_buftype, flags, &rsp_iov);
5546 rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base;
5547
5548 if (rc) {
5549 if (rc == -ENODATA &&
5550 rsp->hdr.Status == STATUS_NO_MORE_FILES) {
5551 trace_smb3_query_dir_done(xid, persistent_fid,
5552 tcon->tid, tcon->ses->Suid, index, 0);
5553 srch_inf->endOfSearch = true;
5554 rc = 0;
5555 } else {
5556 trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid,
5557 tcon->ses->Suid, index, 0, rc);
5558 cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
5559 }
5560 goto qdir_exit;
5561 }
5562
5563 rc = smb2_parse_query_directory(tcon, &rsp_iov, resp_buftype,
5564 srch_inf);
5565 if (rc) {
5566 trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid,
5567 tcon->ses->Suid, index, 0, rc);
5568 goto qdir_exit;
5569 }
5570 resp_buftype = CIFS_NO_BUFFER;
5571
5572 trace_smb3_query_dir_done(xid, persistent_fid, tcon->tid,
5573 tcon->ses->Suid, index, srch_inf->entries_in_buffer);
5574
5575 qdir_exit:
5576 SMB2_query_directory_free(&rqst);
5577 free_rsp_buf(resp_buftype, rsp);
5578
5579 if (is_replayable_error(rc) &&
5580 smb2_should_replay(tcon, &retries, &cur_sleep))
5581 goto replay_again;
5582
5583 return rc;
5584 }
5585
5586 int
SMB2_set_info_init(struct cifs_tcon * tcon,struct TCP_Server_Info * server,struct smb_rqst * rqst,u64 persistent_fid,u64 volatile_fid,u32 pid,u8 info_class,u8 info_type,u32 additional_info,void ** data,unsigned int * size)5587 SMB2_set_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
5588 struct smb_rqst *rqst,
5589 u64 persistent_fid, u64 volatile_fid, u32 pid,
5590 u8 info_class, u8 info_type, u32 additional_info,
5591 void **data, unsigned int *size)
5592 {
5593 struct smb2_set_info_req *req;
5594 struct kvec *iov = rqst->rq_iov;
5595 unsigned int i, total_len;
5596 int rc;
5597
5598 rc = smb2_plain_req_init(SMB2_SET_INFO, tcon, server,
5599 (void **) &req, &total_len);
5600 if (rc)
5601 return rc;
5602
5603 req->hdr.Id.SyncId.ProcessId = cpu_to_le32(pid);
5604 req->InfoType = info_type;
5605 req->FileInfoClass = info_class;
5606 req->PersistentFileId = persistent_fid;
5607 req->VolatileFileId = volatile_fid;
5608 req->AdditionalInformation = cpu_to_le32(additional_info);
5609
5610 req->BufferOffset = cpu_to_le16(sizeof(struct smb2_set_info_req));
5611 req->BufferLength = cpu_to_le32(*size);
5612
5613 memcpy(req->Buffer, *data, *size);
5614 total_len += *size;
5615
5616 iov[0].iov_base = (char *)req;
5617 /* 1 for Buffer */
5618 iov[0].iov_len = total_len - 1;
5619
5620 for (i = 1; i < rqst->rq_nvec; i++) {
5621 le32_add_cpu(&req->BufferLength, size[i]);
5622 iov[i].iov_base = (char *)data[i];
5623 iov[i].iov_len = size[i];
5624 }
5625
5626 return 0;
5627 }
5628
5629 void
SMB2_set_info_free(struct smb_rqst * rqst)5630 SMB2_set_info_free(struct smb_rqst *rqst)
5631 {
5632 if (rqst && rqst->rq_iov)
5633 cifs_buf_release(rqst->rq_iov[0].iov_base); /* request */
5634 }
5635
5636 static int
send_set_info(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,u32 pid,u8 info_class,u8 info_type,u32 additional_info,unsigned int num,void ** data,unsigned int * size)5637 send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
5638 u64 persistent_fid, u64 volatile_fid, u32 pid, u8 info_class,
5639 u8 info_type, u32 additional_info, unsigned int num,
5640 void **data, unsigned int *size)
5641 {
5642 struct smb_rqst rqst;
5643 struct smb2_set_info_rsp *rsp = NULL;
5644 struct kvec *iov;
5645 struct kvec rsp_iov;
5646 int rc = 0;
5647 int resp_buftype;
5648 struct cifs_ses *ses = tcon->ses;
5649 struct TCP_Server_Info *server;
5650 int flags = 0;
5651 int retries = 0, cur_sleep = 1;
5652
5653 replay_again:
5654 /* reinitialize for possible replay */
5655 flags = 0;
5656 server = cifs_pick_channel(ses);
5657
5658 if (!ses || !server)
5659 return -EIO;
5660
5661 if (!num)
5662 return -EINVAL;
5663
5664 if (smb3_encryption_required(tcon))
5665 flags |= CIFS_TRANSFORM_REQ;
5666
5667 iov = kmalloc_array(num, sizeof(struct kvec), GFP_KERNEL);
5668 if (!iov)
5669 return -ENOMEM;
5670
5671 memset(&rqst, 0, sizeof(struct smb_rqst));
5672 rqst.rq_iov = iov;
5673 rqst.rq_nvec = num;
5674
5675 rc = SMB2_set_info_init(tcon, server,
5676 &rqst, persistent_fid, volatile_fid, pid,
5677 info_class, info_type, additional_info,
5678 data, size);
5679 if (rc) {
5680 kfree(iov);
5681 return rc;
5682 }
5683
5684 if (retries)
5685 smb2_set_replay(server, &rqst);
5686
5687 rc = cifs_send_recv(xid, ses, server,
5688 &rqst, &resp_buftype, flags,
5689 &rsp_iov);
5690 SMB2_set_info_free(&rqst);
5691 rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base;
5692
5693 if (rc != 0) {
5694 cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE);
5695 trace_smb3_set_info_err(xid, persistent_fid, tcon->tid,
5696 ses->Suid, info_class, (__u32)info_type, rc);
5697 }
5698
5699 free_rsp_buf(resp_buftype, rsp);
5700 kfree(iov);
5701
5702 if (is_replayable_error(rc) &&
5703 smb2_should_replay(tcon, &retries, &cur_sleep))
5704 goto replay_again;
5705
5706 return rc;
5707 }
5708
5709 int
SMB2_set_eof(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,u32 pid,loff_t new_eof)5710 SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
5711 u64 volatile_fid, u32 pid, loff_t new_eof)
5712 {
5713 struct smb2_file_eof_info info;
5714 void *data;
5715 unsigned int size;
5716
5717 info.EndOfFile = cpu_to_le64(new_eof);
5718
5719 data = &info;
5720 size = sizeof(struct smb2_file_eof_info);
5721
5722 trace_smb3_set_eof(xid, persistent_fid, tcon->tid, tcon->ses->Suid, new_eof);
5723
5724 return send_set_info(xid, tcon, persistent_fid, volatile_fid,
5725 pid, FILE_END_OF_FILE_INFORMATION, SMB2_O_INFO_FILE,
5726 0, 1, &data, &size);
5727 }
5728
5729 int
SMB2_set_acl(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,struct smb_ntsd * pnntsd,int pacllen,int aclflag)5730 SMB2_set_acl(const unsigned int xid, struct cifs_tcon *tcon,
5731 u64 persistent_fid, u64 volatile_fid,
5732 struct smb_ntsd *pnntsd, int pacllen, int aclflag)
5733 {
5734 return send_set_info(xid, tcon, persistent_fid, volatile_fid,
5735 current->tgid, 0, SMB2_O_INFO_SECURITY, aclflag,
5736 1, (void **)&pnntsd, &pacllen);
5737 }
5738
5739 int
SMB2_set_ea(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,struct smb2_file_full_ea_info * buf,int len)5740 SMB2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
5741 u64 persistent_fid, u64 volatile_fid,
5742 struct smb2_file_full_ea_info *buf, int len)
5743 {
5744 return send_set_info(xid, tcon, persistent_fid, volatile_fid,
5745 current->tgid, FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE,
5746 0, 1, (void **)&buf, &len);
5747 }
5748
5749 int
SMB2_oplock_break(const unsigned int xid,struct cifs_tcon * tcon,const u64 persistent_fid,const u64 volatile_fid,__u8 oplock_level)5750 SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
5751 const u64 persistent_fid, const u64 volatile_fid,
5752 __u8 oplock_level)
5753 {
5754 struct smb_rqst rqst;
5755 int rc;
5756 struct smb2_oplock_break *req = NULL;
5757 struct cifs_ses *ses = tcon->ses;
5758 struct TCP_Server_Info *server;
5759 int flags = CIFS_OBREAK_OP;
5760 unsigned int total_len;
5761 struct kvec iov[1];
5762 struct kvec rsp_iov;
5763 int resp_buf_type;
5764 int retries = 0, cur_sleep = 1;
5765
5766 replay_again:
5767 /* reinitialize for possible replay */
5768 flags = CIFS_OBREAK_OP;
5769 server = cifs_pick_channel(ses);
5770
5771 cifs_dbg(FYI, "SMB2_oplock_break\n");
5772 rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server,
5773 (void **) &req, &total_len);
5774 if (rc)
5775 return rc;
5776
5777 if (smb3_encryption_required(tcon))
5778 flags |= CIFS_TRANSFORM_REQ;
5779
5780 req->VolatileFid = volatile_fid;
5781 req->PersistentFid = persistent_fid;
5782 req->OplockLevel = oplock_level;
5783 req->hdr.CreditRequest = cpu_to_le16(1);
5784
5785 flags |= CIFS_NO_RSP_BUF;
5786
5787 iov[0].iov_base = (char *)req;
5788 iov[0].iov_len = total_len;
5789
5790 memset(&rqst, 0, sizeof(struct smb_rqst));
5791 rqst.rq_iov = iov;
5792 rqst.rq_nvec = 1;
5793
5794 if (retries)
5795 smb2_set_replay(server, &rqst);
5796
5797 rc = cifs_send_recv(xid, ses, server,
5798 &rqst, &resp_buf_type, flags, &rsp_iov);
5799 cifs_small_buf_release(req);
5800 if (rc) {
5801 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
5802 cifs_dbg(FYI, "Send error in Oplock Break = %d\n", rc);
5803 }
5804
5805 if (is_replayable_error(rc) &&
5806 smb2_should_replay(tcon, &retries, &cur_sleep))
5807 goto replay_again;
5808
5809 return rc;
5810 }
5811
5812 void
smb2_copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info * pfs_inf,struct kstatfs * kst)5813 smb2_copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
5814 struct kstatfs *kst)
5815 {
5816 kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
5817 le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
5818 kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
5819 kst->f_bfree = kst->f_bavail =
5820 le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
5821 return;
5822 }
5823
5824 static void
copy_posix_fs_info_to_kstatfs(FILE_SYSTEM_POSIX_INFO * response_data,struct kstatfs * kst)5825 copy_posix_fs_info_to_kstatfs(FILE_SYSTEM_POSIX_INFO *response_data,
5826 struct kstatfs *kst)
5827 {
5828 kst->f_bsize = le32_to_cpu(response_data->BlockSize);
5829 kst->f_blocks = le64_to_cpu(response_data->TotalBlocks);
5830 kst->f_bfree = le64_to_cpu(response_data->BlocksAvail);
5831 if (response_data->UserBlocksAvail == cpu_to_le64(-1))
5832 kst->f_bavail = kst->f_bfree;
5833 else
5834 kst->f_bavail = le64_to_cpu(response_data->UserBlocksAvail);
5835 if (response_data->TotalFileNodes != cpu_to_le64(-1))
5836 kst->f_files = le64_to_cpu(response_data->TotalFileNodes);
5837 if (response_data->FreeFileNodes != cpu_to_le64(-1))
5838 kst->f_ffree = le64_to_cpu(response_data->FreeFileNodes);
5839
5840 return;
5841 }
5842
5843 static int
build_qfs_info_req(struct kvec * iov,struct cifs_tcon * tcon,struct TCP_Server_Info * server,int level,int outbuf_len,u64 persistent_fid,u64 volatile_fid)5844 build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon,
5845 struct TCP_Server_Info *server,
5846 int level, int outbuf_len, u64 persistent_fid,
5847 u64 volatile_fid)
5848 {
5849 int rc;
5850 struct smb2_query_info_req *req;
5851 unsigned int total_len;
5852
5853 cifs_dbg(FYI, "Query FSInfo level %d\n", level);
5854
5855 if ((tcon->ses == NULL) || server == NULL)
5856 return -EIO;
5857
5858 rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server,
5859 (void **) &req, &total_len);
5860 if (rc)
5861 return rc;
5862
5863 req->InfoType = SMB2_O_INFO_FILESYSTEM;
5864 req->FileInfoClass = level;
5865 req->PersistentFileId = persistent_fid;
5866 req->VolatileFileId = volatile_fid;
5867 /* 1 for pad */
5868 req->InputBufferOffset =
5869 cpu_to_le16(sizeof(struct smb2_query_info_req));
5870 req->OutputBufferLength = cpu_to_le32(
5871 outbuf_len + sizeof(struct smb2_query_info_rsp));
5872
5873 iov->iov_base = (char *)req;
5874 iov->iov_len = total_len;
5875 return 0;
5876 }
5877
free_qfs_info_req(struct kvec * iov)5878 static inline void free_qfs_info_req(struct kvec *iov)
5879 {
5880 cifs_buf_release(iov->iov_base);
5881 }
5882
5883 int
SMB311_posix_qfs_info(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,struct kstatfs * fsdata)5884 SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
5885 u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
5886 {
5887 struct smb_rqst rqst;
5888 struct smb2_query_info_rsp *rsp = NULL;
5889 struct kvec iov;
5890 struct kvec rsp_iov;
5891 int rc = 0;
5892 int resp_buftype;
5893 struct cifs_ses *ses = tcon->ses;
5894 struct TCP_Server_Info *server;
5895 FILE_SYSTEM_POSIX_INFO *info = NULL;
5896 int flags = 0;
5897 int retries = 0, cur_sleep = 1;
5898
5899 replay_again:
5900 /* reinitialize for possible replay */
5901 flags = 0;
5902 server = cifs_pick_channel(ses);
5903
5904 rc = build_qfs_info_req(&iov, tcon, server,
5905 FS_POSIX_INFORMATION,
5906 sizeof(FILE_SYSTEM_POSIX_INFO),
5907 persistent_fid, volatile_fid);
5908 if (rc)
5909 return rc;
5910
5911 if (smb3_encryption_required(tcon))
5912 flags |= CIFS_TRANSFORM_REQ;
5913
5914 memset(&rqst, 0, sizeof(struct smb_rqst));
5915 rqst.rq_iov = &iov;
5916 rqst.rq_nvec = 1;
5917
5918 if (retries)
5919 smb2_set_replay(server, &rqst);
5920
5921 rc = cifs_send_recv(xid, ses, server,
5922 &rqst, &resp_buftype, flags, &rsp_iov);
5923 free_qfs_info_req(&iov);
5924 if (rc) {
5925 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
5926 goto posix_qfsinf_exit;
5927 }
5928 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
5929
5930 info = (FILE_SYSTEM_POSIX_INFO *)(
5931 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
5932 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
5933 le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
5934 sizeof(FILE_SYSTEM_POSIX_INFO));
5935 if (!rc)
5936 copy_posix_fs_info_to_kstatfs(info, fsdata);
5937
5938 posix_qfsinf_exit:
5939 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
5940
5941 if (is_replayable_error(rc) &&
5942 smb2_should_replay(tcon, &retries, &cur_sleep))
5943 goto replay_again;
5944
5945 return rc;
5946 }
5947
5948 int
SMB2_QFS_info(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,struct kstatfs * fsdata)5949 SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
5950 u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
5951 {
5952 struct smb_rqst rqst;
5953 struct smb2_query_info_rsp *rsp = NULL;
5954 struct kvec iov;
5955 struct kvec rsp_iov;
5956 int rc = 0;
5957 int resp_buftype;
5958 struct cifs_ses *ses = tcon->ses;
5959 struct TCP_Server_Info *server;
5960 struct smb2_fs_full_size_info *info = NULL;
5961 int flags = 0;
5962 int retries = 0, cur_sleep = 1;
5963
5964 replay_again:
5965 /* reinitialize for possible replay */
5966 flags = 0;
5967 server = cifs_pick_channel(ses);
5968
5969 rc = build_qfs_info_req(&iov, tcon, server,
5970 FS_FULL_SIZE_INFORMATION,
5971 sizeof(struct smb2_fs_full_size_info),
5972 persistent_fid, volatile_fid);
5973 if (rc)
5974 return rc;
5975
5976 if (smb3_encryption_required(tcon))
5977 flags |= CIFS_TRANSFORM_REQ;
5978
5979 memset(&rqst, 0, sizeof(struct smb_rqst));
5980 rqst.rq_iov = &iov;
5981 rqst.rq_nvec = 1;
5982
5983 if (retries)
5984 smb2_set_replay(server, &rqst);
5985
5986 rc = cifs_send_recv(xid, ses, server,
5987 &rqst, &resp_buftype, flags, &rsp_iov);
5988 free_qfs_info_req(&iov);
5989 if (rc) {
5990 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
5991 goto qfsinf_exit;
5992 }
5993 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
5994
5995 info = (struct smb2_fs_full_size_info *)(
5996 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
5997 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
5998 le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
5999 sizeof(struct smb2_fs_full_size_info));
6000 if (!rc)
6001 smb2_copy_fs_info_to_kstatfs(info, fsdata);
6002
6003 qfsinf_exit:
6004 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
6005
6006 if (is_replayable_error(rc) &&
6007 smb2_should_replay(tcon, &retries, &cur_sleep))
6008 goto replay_again;
6009
6010 return rc;
6011 }
6012
6013 int
SMB2_QFS_attr(const unsigned int xid,struct cifs_tcon * tcon,u64 persistent_fid,u64 volatile_fid,int level)6014 SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
6015 u64 persistent_fid, u64 volatile_fid, int level)
6016 {
6017 struct smb_rqst rqst;
6018 struct smb2_query_info_rsp *rsp = NULL;
6019 struct kvec iov;
6020 struct kvec rsp_iov;
6021 int rc = 0;
6022 int resp_buftype, max_len, min_len;
6023 struct cifs_ses *ses = tcon->ses;
6024 struct TCP_Server_Info *server;
6025 unsigned int rsp_len, offset;
6026 int flags = 0;
6027 int retries = 0, cur_sleep = 1;
6028
6029 replay_again:
6030 /* reinitialize for possible replay */
6031 flags = 0;
6032 server = cifs_pick_channel(ses);
6033
6034 if (level == FS_DEVICE_INFORMATION) {
6035 max_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
6036 min_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
6037 } else if (level == FS_ATTRIBUTE_INFORMATION) {
6038 max_len = sizeof(FILE_SYSTEM_ATTRIBUTE_INFO);
6039 min_len = MIN_FS_ATTR_INFO_SIZE;
6040 } else if (level == FS_SECTOR_SIZE_INFORMATION) {
6041 max_len = sizeof(struct smb3_fs_ss_info);
6042 min_len = sizeof(struct smb3_fs_ss_info);
6043 } else if (level == FS_VOLUME_INFORMATION) {
6044 max_len = sizeof(struct smb3_fs_vol_info) + MAX_VOL_LABEL_LEN;
6045 min_len = sizeof(struct smb3_fs_vol_info);
6046 } else {
6047 cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level);
6048 return -EINVAL;
6049 }
6050
6051 rc = build_qfs_info_req(&iov, tcon, server,
6052 level, max_len,
6053 persistent_fid, volatile_fid);
6054 if (rc)
6055 return rc;
6056
6057 if (smb3_encryption_required(tcon))
6058 flags |= CIFS_TRANSFORM_REQ;
6059
6060 memset(&rqst, 0, sizeof(struct smb_rqst));
6061 rqst.rq_iov = &iov;
6062 rqst.rq_nvec = 1;
6063
6064 if (retries)
6065 smb2_set_replay(server, &rqst);
6066
6067 rc = cifs_send_recv(xid, ses, server,
6068 &rqst, &resp_buftype, flags, &rsp_iov);
6069 free_qfs_info_req(&iov);
6070 if (rc) {
6071 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
6072 goto qfsattr_exit;
6073 }
6074 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
6075
6076 rsp_len = le32_to_cpu(rsp->OutputBufferLength);
6077 offset = le16_to_cpu(rsp->OutputBufferOffset);
6078 rc = smb2_validate_iov(offset, rsp_len, &rsp_iov, min_len);
6079 if (rc)
6080 goto qfsattr_exit;
6081
6082 if (level == FS_ATTRIBUTE_INFORMATION)
6083 memcpy(&tcon->fsAttrInfo, offset
6084 + (char *)rsp, min_t(unsigned int,
6085 rsp_len, max_len));
6086 else if (level == FS_DEVICE_INFORMATION)
6087 memcpy(&tcon->fsDevInfo, offset
6088 + (char *)rsp, sizeof(FILE_SYSTEM_DEVICE_INFO));
6089 else if (level == FS_SECTOR_SIZE_INFORMATION) {
6090 struct smb3_fs_ss_info *ss_info = (struct smb3_fs_ss_info *)
6091 (offset + (char *)rsp);
6092 tcon->ss_flags = le32_to_cpu(ss_info->Flags);
6093 tcon->perf_sector_size =
6094 le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf);
6095 } else if (level == FS_VOLUME_INFORMATION) {
6096 struct smb3_fs_vol_info *vol_info = (struct smb3_fs_vol_info *)
6097 (offset + (char *)rsp);
6098 tcon->vol_serial_number = vol_info->VolumeSerialNumber;
6099 tcon->vol_create_time = vol_info->VolumeCreationTime;
6100 }
6101
6102 qfsattr_exit:
6103 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
6104
6105 if (is_replayable_error(rc) &&
6106 smb2_should_replay(tcon, &retries, &cur_sleep))
6107 goto replay_again;
6108
6109 return rc;
6110 }
6111
6112 int
smb2_lockv(const unsigned int xid,struct cifs_tcon * tcon,const __u64 persist_fid,const __u64 volatile_fid,const __u32 pid,const __u32 num_lock,struct smb2_lock_element * buf)6113 smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
6114 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
6115 const __u32 num_lock, struct smb2_lock_element *buf)
6116 {
6117 struct smb_rqst rqst;
6118 int rc = 0;
6119 struct smb2_lock_req *req = NULL;
6120 struct kvec iov[2];
6121 struct kvec rsp_iov;
6122 int resp_buf_type;
6123 unsigned int count;
6124 int flags = CIFS_NO_RSP_BUF;
6125 unsigned int total_len;
6126 struct TCP_Server_Info *server;
6127 int retries = 0, cur_sleep = 1;
6128
6129 replay_again:
6130 /* reinitialize for possible replay */
6131 flags = CIFS_NO_RSP_BUF;
6132 server = cifs_pick_channel(tcon->ses);
6133
6134 cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock);
6135
6136 rc = smb2_plain_req_init(SMB2_LOCK, tcon, server,
6137 (void **) &req, &total_len);
6138 if (rc)
6139 return rc;
6140
6141 if (smb3_encryption_required(tcon))
6142 flags |= CIFS_TRANSFORM_REQ;
6143
6144 req->hdr.Id.SyncId.ProcessId = cpu_to_le32(pid);
6145 req->LockCount = cpu_to_le16(num_lock);
6146
6147 req->PersistentFileId = persist_fid;
6148 req->VolatileFileId = volatile_fid;
6149
6150 count = num_lock * sizeof(struct smb2_lock_element);
6151
6152 iov[0].iov_base = (char *)req;
6153 iov[0].iov_len = total_len - sizeof(struct smb2_lock_element);
6154 iov[1].iov_base = (char *)buf;
6155 iov[1].iov_len = count;
6156
6157 cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
6158
6159 memset(&rqst, 0, sizeof(struct smb_rqst));
6160 rqst.rq_iov = iov;
6161 rqst.rq_nvec = 2;
6162
6163 if (retries)
6164 smb2_set_replay(server, &rqst);
6165
6166 rc = cifs_send_recv(xid, tcon->ses, server,
6167 &rqst, &resp_buf_type, flags,
6168 &rsp_iov);
6169 cifs_small_buf_release(req);
6170 if (rc) {
6171 cifs_dbg(FYI, "Send error in smb2_lockv = %d\n", rc);
6172 cifs_stats_fail_inc(tcon, SMB2_LOCK_HE);
6173 trace_smb3_lock_err(xid, persist_fid, tcon->tid,
6174 tcon->ses->Suid, rc);
6175 }
6176
6177 if (is_replayable_error(rc) &&
6178 smb2_should_replay(tcon, &retries, &cur_sleep))
6179 goto replay_again;
6180
6181 return rc;
6182 }
6183
6184 int
SMB2_lock(const unsigned int xid,struct cifs_tcon * tcon,const __u64 persist_fid,const __u64 volatile_fid,const __u32 pid,const __u64 length,const __u64 offset,const __u32 lock_flags,const bool wait)6185 SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon,
6186 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
6187 const __u64 length, const __u64 offset, const __u32 lock_flags,
6188 const bool wait)
6189 {
6190 struct smb2_lock_element lock;
6191
6192 lock.Offset = cpu_to_le64(offset);
6193 lock.Length = cpu_to_le64(length);
6194 lock.Flags = cpu_to_le32(lock_flags);
6195 if (!wait && lock_flags != SMB2_LOCKFLAG_UNLOCK)
6196 lock.Flags |= cpu_to_le32(SMB2_LOCKFLAG_FAIL_IMMEDIATELY);
6197
6198 return smb2_lockv(xid, tcon, persist_fid, volatile_fid, pid, 1, &lock);
6199 }
6200
6201 int
SMB2_lease_break(const unsigned int xid,struct cifs_tcon * tcon,__u8 * lease_key,const __le32 lease_state)6202 SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
6203 __u8 *lease_key, const __le32 lease_state)
6204 {
6205 struct smb_rqst rqst;
6206 int rc;
6207 struct smb2_lease_ack *req = NULL;
6208 struct cifs_ses *ses = tcon->ses;
6209 int flags = CIFS_OBREAK_OP;
6210 unsigned int total_len;
6211 struct kvec iov[1];
6212 struct kvec rsp_iov;
6213 int resp_buf_type;
6214 __u64 *please_key_high;
6215 __u64 *please_key_low;
6216 struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
6217
6218 cifs_dbg(FYI, "SMB2_lease_break\n");
6219 rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server,
6220 (void **) &req, &total_len);
6221 if (rc)
6222 return rc;
6223
6224 if (smb3_encryption_required(tcon))
6225 flags |= CIFS_TRANSFORM_REQ;
6226
6227 req->hdr.CreditRequest = cpu_to_le16(1);
6228 req->StructureSize = cpu_to_le16(36);
6229 total_len += 12;
6230
6231 memcpy(req->LeaseKey, lease_key, 16);
6232 req->LeaseState = lease_state;
6233
6234 flags |= CIFS_NO_RSP_BUF;
6235
6236 iov[0].iov_base = (char *)req;
6237 iov[0].iov_len = total_len;
6238
6239 memset(&rqst, 0, sizeof(struct smb_rqst));
6240 rqst.rq_iov = iov;
6241 rqst.rq_nvec = 1;
6242
6243 rc = cifs_send_recv(xid, ses, server,
6244 &rqst, &resp_buf_type, flags, &rsp_iov);
6245 cifs_small_buf_release(req);
6246
6247 please_key_low = (__u64 *)lease_key;
6248 please_key_high = (__u64 *)(lease_key+8);
6249 if (rc) {
6250 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
6251 trace_smb3_lease_err(le32_to_cpu(lease_state), tcon->tid,
6252 ses->Suid, *please_key_low, *please_key_high, rc);
6253 cifs_dbg(FYI, "Send error in Lease Break = %d\n", rc);
6254 } else
6255 trace_smb3_lease_done(le32_to_cpu(lease_state), tcon->tid,
6256 ses->Suid, *please_key_low, *please_key_high);
6257
6258 return rc;
6259 }
6260