• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *
7  *   Common Internet FileSystem (CIFS) client
8  *
9  */
10 
11 /* Note that BB means BUGBUG (ie something to fix eventually) */
12 
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/mount.h>
16 #include <linux/slab.h>
17 #include <linux/init.h>
18 #include <linux/list.h>
19 #include <linux/seq_file.h>
20 #include <linux/vfs.h>
21 #include <linux/mempool.h>
22 #include <linux/delay.h>
23 #include <linux/kthread.h>
24 #include <linux/freezer.h>
25 #include <linux/namei.h>
26 #include <linux/random.h>
27 #include <linux/uuid.h>
28 #include <linux/xattr.h>
29 #include <uapi/linux/magic.h>
30 #include <net/ipv6.h>
31 #include "cifsfs.h"
32 #include "cifspdu.h"
33 #define DECLARE_GLOBALS_HERE
34 #include "cifsglob.h"
35 #include "cifsproto.h"
36 #include "cifs_debug.h"
37 #include "cifs_fs_sb.h"
38 #include <linux/mm.h>
39 #include <linux/key-type.h>
40 #include "cifs_spnego.h"
41 #include "fscache.h"
42 #ifdef CONFIG_CIFS_DFS_UPCALL
43 #include "dfs_cache.h"
44 #endif
45 #ifdef CONFIG_CIFS_SWN_UPCALL
46 #include "netlink.h"
47 #endif
48 #include "fs_context.h"
49 #include "cached_dir.h"
50 
51 /*
52  * DOS dates from 1980/1/1 through 2107/12/31
53  * Protocol specifications indicate the range should be to 119, which
54  * limits maximum year to 2099. But this range has not been checked.
55  */
56 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
57 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
58 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
59 
60 int cifsFYI = 0;
61 bool traceSMB;
62 bool enable_oplocks = true;
63 bool linuxExtEnabled = true;
64 bool lookupCacheEnabled = true;
65 bool disable_legacy_dialects; /* false by default */
66 bool enable_gcm_256 = true;
67 bool require_gcm_256; /* false by default */
68 bool enable_negotiate_signing; /* false by default */
69 unsigned int global_secflags = CIFSSEC_DEF;
70 /* unsigned int ntlmv2_support = 0; */
71 unsigned int sign_CIFS_PDUs = 1;
72 
73 /*
74  * Global transaction id (XID) information
75  */
76 unsigned int GlobalCurrentXid;	/* protected by GlobalMid_Sem */
77 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
78 unsigned int GlobalMaxActiveXid;	/* prot by GlobalMid_Sem */
79 spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
80 
81 /*
82  *  Global counters, updated atomically
83  */
84 atomic_t sesInfoAllocCount;
85 atomic_t tconInfoAllocCount;
86 atomic_t tcpSesNextId;
87 atomic_t tcpSesAllocCount;
88 atomic_t tcpSesReconnectCount;
89 atomic_t tconInfoReconnectCount;
90 
91 atomic_t mid_count;
92 atomic_t buf_alloc_count;
93 atomic_t small_buf_alloc_count;
94 #ifdef CONFIG_CIFS_STATS2
95 atomic_t total_buf_alloc_count;
96 atomic_t total_small_buf_alloc_count;
97 #endif/* STATS2 */
98 struct list_head	cifs_tcp_ses_list;
99 spinlock_t		cifs_tcp_ses_lock;
100 static const struct super_operations cifs_super_ops;
101 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
102 module_param(CIFSMaxBufSize, uint, 0444);
103 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
104 				 "for CIFS requests. "
105 				 "Default: 16384 Range: 8192 to 130048");
106 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
107 module_param(cifs_min_rcv, uint, 0444);
108 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
109 				"1 to 64");
110 unsigned int cifs_min_small = 30;
111 module_param(cifs_min_small, uint, 0444);
112 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
113 				 "Range: 2 to 256");
114 unsigned int cifs_max_pending = CIFS_MAX_REQ;
115 module_param(cifs_max_pending, uint, 0444);
116 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
117 				   "CIFS/SMB1 dialect (N/A for SMB3) "
118 				   "Default: 32767 Range: 2 to 32767.");
119 #ifdef CONFIG_CIFS_STATS2
120 unsigned int slow_rsp_threshold = 1;
121 module_param(slow_rsp_threshold, uint, 0644);
122 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
123 				   "before logging that a response is delayed. "
124 				   "Default: 1 (if set to 0 disables msg).");
125 #endif /* STATS2 */
126 
127 module_param(enable_oplocks, bool, 0644);
128 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
129 
130 module_param(enable_gcm_256, bool, 0644);
131 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0");
132 
133 module_param(require_gcm_256, bool, 0644);
134 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
135 
136 module_param(enable_negotiate_signing, bool, 0644);
137 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
138 
139 module_param(disable_legacy_dialects, bool, 0644);
140 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
141 				  "helpful to restrict the ability to "
142 				  "override the default dialects (SMB2.1, "
143 				  "SMB3 and SMB3.02) on mount with old "
144 				  "dialects (CIFS/SMB1 and SMB2) since "
145 				  "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
146 				  " and less secure. Default: n/N/0");
147 
148 extern mempool_t *cifs_sm_req_poolp;
149 extern mempool_t *cifs_req_poolp;
150 extern mempool_t *cifs_mid_poolp;
151 
152 struct workqueue_struct	*cifsiod_wq;
153 struct workqueue_struct	*decrypt_wq;
154 struct workqueue_struct	*fileinfo_put_wq;
155 struct workqueue_struct	*cifsoplockd_wq;
156 struct workqueue_struct	*deferredclose_wq;
157 __u32 cifs_lock_secret;
158 
159 /*
160  * Bumps refcount for cifs super block.
161  * Note that it should be only called if a referece to VFS super block is
162  * already held, e.g. in open-type syscalls context. Otherwise it can race with
163  * atomic_dec_and_test in deactivate_locked_super.
164  */
165 void
cifs_sb_active(struct super_block * sb)166 cifs_sb_active(struct super_block *sb)
167 {
168 	struct cifs_sb_info *server = CIFS_SB(sb);
169 
170 	if (atomic_inc_return(&server->active) == 1)
171 		atomic_inc(&sb->s_active);
172 }
173 
174 void
cifs_sb_deactive(struct super_block * sb)175 cifs_sb_deactive(struct super_block *sb)
176 {
177 	struct cifs_sb_info *server = CIFS_SB(sb);
178 
179 	if (atomic_dec_and_test(&server->active))
180 		deactivate_super(sb);
181 }
182 
183 static int
cifs_read_super(struct super_block * sb)184 cifs_read_super(struct super_block *sb)
185 {
186 	struct inode *inode;
187 	struct cifs_sb_info *cifs_sb;
188 	struct cifs_tcon *tcon;
189 	struct timespec64 ts;
190 	int rc = 0;
191 
192 	cifs_sb = CIFS_SB(sb);
193 	tcon = cifs_sb_master_tcon(cifs_sb);
194 
195 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
196 		sb->s_flags |= SB_POSIXACL;
197 
198 	if (tcon->snapshot_time)
199 		sb->s_flags |= SB_RDONLY;
200 
201 	if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
202 		sb->s_maxbytes = MAX_LFS_FILESIZE;
203 	else
204 		sb->s_maxbytes = MAX_NON_LFS;
205 
206 	/*
207 	 * Some very old servers like DOS and OS/2 used 2 second granularity
208 	 * (while all current servers use 100ns granularity - see MS-DTYP)
209 	 * but 1 second is the maximum allowed granularity for the VFS
210 	 * so for old servers set time granularity to 1 second while for
211 	 * everything else (current servers) set it to 100ns.
212 	 */
213 	if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
214 	    ((tcon->ses->capabilities &
215 	      tcon->ses->server->vals->cap_nt_find) == 0) &&
216 	    !tcon->unix_ext) {
217 		sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
218 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
219 		sb->s_time_min = ts.tv_sec;
220 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
221 				    cpu_to_le16(SMB_TIME_MAX), 0);
222 		sb->s_time_max = ts.tv_sec;
223 	} else {
224 		/*
225 		 * Almost every server, including all SMB2+, uses DCE TIME
226 		 * ie 100 nanosecond units, since 1601.  See MS-DTYP and MS-FSCC
227 		 */
228 		sb->s_time_gran = 100;
229 		ts = cifs_NTtimeToUnix(0);
230 		sb->s_time_min = ts.tv_sec;
231 		ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
232 		sb->s_time_max = ts.tv_sec;
233 	}
234 
235 	sb->s_magic = CIFS_SUPER_MAGIC;
236 	sb->s_op = &cifs_super_ops;
237 	sb->s_xattr = cifs_xattr_handlers;
238 	rc = super_setup_bdi(sb);
239 	if (rc)
240 		goto out_no_root;
241 	/* tune readahead according to rsize if readahead size not set on mount */
242 	if (cifs_sb->ctx->rsize == 0)
243 		cifs_sb->ctx->rsize =
244 			tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
245 	if (cifs_sb->ctx->rasize)
246 		sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
247 	else
248 		sb->s_bdi->ra_pages = cifs_sb->ctx->rsize / PAGE_SIZE;
249 
250 	sb->s_blocksize = CIFS_MAX_MSGSIZE;
251 	sb->s_blocksize_bits = 14;	/* default 2**14 = CIFS_MAX_MSGSIZE */
252 	inode = cifs_root_iget(sb);
253 
254 	if (IS_ERR(inode)) {
255 		rc = PTR_ERR(inode);
256 		goto out_no_root;
257 	}
258 
259 	if (tcon->nocase)
260 		sb->s_d_op = &cifs_ci_dentry_ops;
261 	else
262 		sb->s_d_op = &cifs_dentry_ops;
263 
264 	sb->s_root = d_make_root(inode);
265 	if (!sb->s_root) {
266 		rc = -ENOMEM;
267 		goto out_no_root;
268 	}
269 
270 #ifdef CONFIG_CIFS_NFSD_EXPORT
271 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
272 		cifs_dbg(FYI, "export ops supported\n");
273 		sb->s_export_op = &cifs_export_ops;
274 	}
275 #endif /* CONFIG_CIFS_NFSD_EXPORT */
276 
277 	return 0;
278 
279 out_no_root:
280 	cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
281 	return rc;
282 }
283 
cifs_kill_sb(struct super_block * sb)284 static void cifs_kill_sb(struct super_block *sb)
285 {
286 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
287 
288 	/*
289 	 * We ned to release all dentries for the cached directories
290 	 * before we kill the sb.
291 	 */
292 	if (cifs_sb->root) {
293 		close_all_cached_dirs(cifs_sb);
294 
295 		/* finally release root dentry */
296 		dput(cifs_sb->root);
297 		cifs_sb->root = NULL;
298 	}
299 
300 	kill_anon_super(sb);
301 	cifs_umount(cifs_sb);
302 }
303 
304 static int
cifs_statfs(struct dentry * dentry,struct kstatfs * buf)305 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
306 {
307 	struct super_block *sb = dentry->d_sb;
308 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
309 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
310 	struct TCP_Server_Info *server = tcon->ses->server;
311 	unsigned int xid;
312 	int rc = 0;
313 
314 	xid = get_xid();
315 
316 	if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
317 		buf->f_namelen =
318 		       le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
319 	else
320 		buf->f_namelen = PATH_MAX;
321 
322 	buf->f_fsid.val[0] = tcon->vol_serial_number;
323 	/* are using part of create time for more randomness, see man statfs */
324 	buf->f_fsid.val[1] =  (int)le64_to_cpu(tcon->vol_create_time);
325 
326 	buf->f_files = 0;	/* undefined */
327 	buf->f_ffree = 0;	/* unlimited */
328 
329 	if (server->ops->queryfs)
330 		rc = server->ops->queryfs(xid, tcon, cifs_sb, buf);
331 
332 	free_xid(xid);
333 	return rc;
334 }
335 
cifs_fallocate(struct file * file,int mode,loff_t off,loff_t len)336 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
337 {
338 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
339 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
340 	struct TCP_Server_Info *server = tcon->ses->server;
341 
342 	if (server->ops->fallocate)
343 		return server->ops->fallocate(file, tcon, mode, off, len);
344 
345 	return -EOPNOTSUPP;
346 }
347 
cifs_permission(struct user_namespace * mnt_userns,struct inode * inode,int mask)348 static int cifs_permission(struct user_namespace *mnt_userns,
349 			   struct inode *inode, int mask)
350 {
351 	struct cifs_sb_info *cifs_sb;
352 
353 	cifs_sb = CIFS_SB(inode->i_sb);
354 
355 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
356 		if ((mask & MAY_EXEC) && !execute_ok(inode))
357 			return -EACCES;
358 		else
359 			return 0;
360 	} else /* file mode might have been restricted at mount time
361 		on the client (above and beyond ACL on servers) for
362 		servers which do not support setting and viewing mode bits,
363 		so allowing client to check permissions is useful */
364 		return generic_permission(&init_user_ns, inode, mask);
365 }
366 
367 static struct kmem_cache *cifs_inode_cachep;
368 static struct kmem_cache *cifs_req_cachep;
369 static struct kmem_cache *cifs_mid_cachep;
370 static struct kmem_cache *cifs_sm_req_cachep;
371 mempool_t *cifs_sm_req_poolp;
372 mempool_t *cifs_req_poolp;
373 mempool_t *cifs_mid_poolp;
374 
375 static struct inode *
cifs_alloc_inode(struct super_block * sb)376 cifs_alloc_inode(struct super_block *sb)
377 {
378 	struct cifsInodeInfo *cifs_inode;
379 	cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
380 	if (!cifs_inode)
381 		return NULL;
382 	cifs_inode->cifsAttrs = 0x20;	/* default */
383 	cifs_inode->time = 0;
384 	/*
385 	 * Until the file is open and we have gotten oplock info back from the
386 	 * server, can not assume caching of file data or metadata.
387 	 */
388 	cifs_set_oplock_level(cifs_inode, 0);
389 	cifs_inode->flags = 0;
390 	spin_lock_init(&cifs_inode->writers_lock);
391 	cifs_inode->writers = 0;
392 	cifs_inode->netfs.inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
393 	cifs_inode->server_eof = 0;
394 	cifs_inode->uniqueid = 0;
395 	cifs_inode->createtime = 0;
396 	cifs_inode->epoch = 0;
397 	spin_lock_init(&cifs_inode->open_file_lock);
398 	generate_random_uuid(cifs_inode->lease_key);
399 	cifs_inode->symlink_target = NULL;
400 
401 	/*
402 	 * Can not set i_flags here - they get immediately overwritten to zero
403 	 * by the VFS.
404 	 */
405 	/* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
406 	INIT_LIST_HEAD(&cifs_inode->openFileList);
407 	INIT_LIST_HEAD(&cifs_inode->llist);
408 	INIT_LIST_HEAD(&cifs_inode->deferred_closes);
409 	spin_lock_init(&cifs_inode->deferred_lock);
410 	return &cifs_inode->netfs.inode;
411 }
412 
413 static void
cifs_free_inode(struct inode * inode)414 cifs_free_inode(struct inode *inode)
415 {
416 	struct cifsInodeInfo *cinode = CIFS_I(inode);
417 
418 	if (S_ISLNK(inode->i_mode))
419 		kfree(cinode->symlink_target);
420 	kmem_cache_free(cifs_inode_cachep, cinode);
421 }
422 
423 static void
cifs_evict_inode(struct inode * inode)424 cifs_evict_inode(struct inode *inode)
425 {
426 	truncate_inode_pages_final(&inode->i_data);
427 	if (inode->i_state & I_PINNING_FSCACHE_WB)
428 		cifs_fscache_unuse_inode_cookie(inode, true);
429 	cifs_fscache_release_inode_cookie(inode);
430 	clear_inode(inode);
431 }
432 
433 static void
cifs_show_address(struct seq_file * s,struct TCP_Server_Info * server)434 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
435 {
436 	struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
437 	struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
438 
439 	seq_puts(s, ",addr=");
440 
441 	switch (server->dstaddr.ss_family) {
442 	case AF_INET:
443 		seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
444 		break;
445 	case AF_INET6:
446 		seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
447 		if (sa6->sin6_scope_id)
448 			seq_printf(s, "%%%u", sa6->sin6_scope_id);
449 		break;
450 	default:
451 		seq_puts(s, "(unknown)");
452 	}
453 	if (server->rdma)
454 		seq_puts(s, ",rdma");
455 }
456 
457 static void
cifs_show_security(struct seq_file * s,struct cifs_ses * ses)458 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
459 {
460 	if (ses->sectype == Unspecified) {
461 		if (ses->user_name == NULL)
462 			seq_puts(s, ",sec=none");
463 		return;
464 	}
465 
466 	seq_puts(s, ",sec=");
467 
468 	switch (ses->sectype) {
469 	case NTLMv2:
470 		seq_puts(s, "ntlmv2");
471 		break;
472 	case Kerberos:
473 		seq_puts(s, "krb5");
474 		break;
475 	case RawNTLMSSP:
476 		seq_puts(s, "ntlmssp");
477 		break;
478 	default:
479 		/* shouldn't ever happen */
480 		seq_puts(s, "unknown");
481 		break;
482 	}
483 
484 	if (ses->sign)
485 		seq_puts(s, "i");
486 
487 	if (ses->sectype == Kerberos)
488 		seq_printf(s, ",cruid=%u",
489 			   from_kuid_munged(&init_user_ns, ses->cred_uid));
490 }
491 
492 static void
cifs_show_cache_flavor(struct seq_file * s,struct cifs_sb_info * cifs_sb)493 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
494 {
495 	seq_puts(s, ",cache=");
496 
497 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
498 		seq_puts(s, "strict");
499 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
500 		seq_puts(s, "none");
501 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
502 		seq_puts(s, "singleclient"); /* assume only one client access */
503 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
504 		seq_puts(s, "ro"); /* read only caching assumed */
505 	else
506 		seq_puts(s, "loose");
507 }
508 
509 /*
510  * cifs_show_devname() is used so we show the mount device name with correct
511  * format (e.g. forward slashes vs. back slashes) in /proc/mounts
512  */
cifs_show_devname(struct seq_file * m,struct dentry * root)513 static int cifs_show_devname(struct seq_file *m, struct dentry *root)
514 {
515 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
516 	char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
517 
518 	if (devname == NULL)
519 		seq_puts(m, "none");
520 	else {
521 		convert_delimiter(devname, '/');
522 		/* escape all spaces in share names */
523 		seq_escape(m, devname, " \t");
524 		kfree(devname);
525 	}
526 	return 0;
527 }
528 
529 /*
530  * cifs_show_options() is for displaying mount options in /proc/mounts.
531  * Not all settable options are displayed but most of the important
532  * ones are.
533  */
534 static int
cifs_show_options(struct seq_file * s,struct dentry * root)535 cifs_show_options(struct seq_file *s, struct dentry *root)
536 {
537 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
538 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
539 	struct sockaddr *srcaddr;
540 	srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
541 
542 	seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
543 	cifs_show_security(s, tcon->ses);
544 	cifs_show_cache_flavor(s, cifs_sb);
545 
546 	if (tcon->no_lease)
547 		seq_puts(s, ",nolease");
548 	if (cifs_sb->ctx->multiuser)
549 		seq_puts(s, ",multiuser");
550 	else if (tcon->ses->user_name)
551 		seq_show_option(s, "username", tcon->ses->user_name);
552 
553 	if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
554 		seq_show_option(s, "domain", tcon->ses->domainName);
555 
556 	if (srcaddr->sa_family != AF_UNSPEC) {
557 		struct sockaddr_in *saddr4;
558 		struct sockaddr_in6 *saddr6;
559 		saddr4 = (struct sockaddr_in *)srcaddr;
560 		saddr6 = (struct sockaddr_in6 *)srcaddr;
561 		if (srcaddr->sa_family == AF_INET6)
562 			seq_printf(s, ",srcaddr=%pI6c",
563 				   &saddr6->sin6_addr);
564 		else if (srcaddr->sa_family == AF_INET)
565 			seq_printf(s, ",srcaddr=%pI4",
566 				   &saddr4->sin_addr.s_addr);
567 		else
568 			seq_printf(s, ",srcaddr=BAD-AF:%i",
569 				   (int)(srcaddr->sa_family));
570 	}
571 
572 	seq_printf(s, ",uid=%u",
573 		   from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
574 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
575 		seq_puts(s, ",forceuid");
576 	else
577 		seq_puts(s, ",noforceuid");
578 
579 	seq_printf(s, ",gid=%u",
580 		   from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
581 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
582 		seq_puts(s, ",forcegid");
583 	else
584 		seq_puts(s, ",noforcegid");
585 
586 	cifs_show_address(s, tcon->ses->server);
587 
588 	if (!tcon->unix_ext)
589 		seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
590 					   cifs_sb->ctx->file_mode,
591 					   cifs_sb->ctx->dir_mode);
592 	if (cifs_sb->ctx->iocharset)
593 		seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
594 	if (tcon->seal)
595 		seq_puts(s, ",seal");
596 	else if (tcon->ses->server->ignore_signature)
597 		seq_puts(s, ",signloosely");
598 	if (tcon->nocase)
599 		seq_puts(s, ",nocase");
600 	if (tcon->nodelete)
601 		seq_puts(s, ",nodelete");
602 	if (cifs_sb->ctx->no_sparse)
603 		seq_puts(s, ",nosparse");
604 	if (tcon->local_lease)
605 		seq_puts(s, ",locallease");
606 	if (tcon->retry)
607 		seq_puts(s, ",hard");
608 	else
609 		seq_puts(s, ",soft");
610 	if (tcon->use_persistent)
611 		seq_puts(s, ",persistenthandles");
612 	else if (tcon->use_resilient)
613 		seq_puts(s, ",resilienthandles");
614 	if (tcon->posix_extensions)
615 		seq_puts(s, ",posix");
616 	else if (tcon->unix_ext)
617 		seq_puts(s, ",unix");
618 	else
619 		seq_puts(s, ",nounix");
620 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
621 		seq_puts(s, ",nodfs");
622 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
623 		seq_puts(s, ",posixpaths");
624 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
625 		seq_puts(s, ",setuids");
626 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
627 		seq_puts(s, ",idsfromsid");
628 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
629 		seq_puts(s, ",serverino");
630 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
631 		seq_puts(s, ",rwpidforward");
632 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
633 		seq_puts(s, ",forcemand");
634 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
635 		seq_puts(s, ",nouser_xattr");
636 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
637 		seq_puts(s, ",mapchars");
638 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
639 		seq_puts(s, ",mapposix");
640 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
641 		seq_puts(s, ",sfu");
642 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
643 		seq_puts(s, ",nobrl");
644 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
645 		seq_puts(s, ",nohandlecache");
646 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
647 		seq_puts(s, ",modefromsid");
648 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
649 		seq_puts(s, ",cifsacl");
650 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
651 		seq_puts(s, ",dynperm");
652 	if (root->d_sb->s_flags & SB_POSIXACL)
653 		seq_puts(s, ",acl");
654 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
655 		seq_puts(s, ",mfsymlinks");
656 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
657 		seq_puts(s, ",fsc");
658 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
659 		seq_puts(s, ",nostrictsync");
660 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
661 		seq_puts(s, ",noperm");
662 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
663 		seq_printf(s, ",backupuid=%u",
664 			   from_kuid_munged(&init_user_ns,
665 					    cifs_sb->ctx->backupuid));
666 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
667 		seq_printf(s, ",backupgid=%u",
668 			   from_kgid_munged(&init_user_ns,
669 					    cifs_sb->ctx->backupgid));
670 
671 	seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
672 	seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
673 	seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
674 	if (cifs_sb->ctx->rasize)
675 		seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
676 	if (tcon->ses->server->min_offload)
677 		seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
678 	seq_printf(s, ",echo_interval=%lu",
679 			tcon->ses->server->echo_interval / HZ);
680 
681 	/* Only display the following if overridden on mount */
682 	if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
683 		seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
684 	if (tcon->ses->server->tcp_nodelay)
685 		seq_puts(s, ",tcpnodelay");
686 	if (tcon->ses->server->noautotune)
687 		seq_puts(s, ",noautotune");
688 	if (tcon->ses->server->noblocksnd)
689 		seq_puts(s, ",noblocksend");
690 
691 	if (tcon->snapshot_time)
692 		seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
693 	if (tcon->handle_timeout)
694 		seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
695 
696 	/*
697 	 * Display file and directory attribute timeout in seconds.
698 	 * If file and directory attribute timeout the same then actimeo
699 	 * was likely specified on mount
700 	 */
701 	if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
702 		seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
703 	else {
704 		seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
705 		seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
706 	}
707 	seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
708 
709 	if (tcon->ses->chan_max > 1)
710 		seq_printf(s, ",multichannel,max_channels=%zu",
711 			   tcon->ses->chan_max);
712 
713 	if (tcon->use_witness)
714 		seq_puts(s, ",witness");
715 
716 	return 0;
717 }
718 
cifs_umount_begin(struct super_block * sb)719 static void cifs_umount_begin(struct super_block *sb)
720 {
721 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
722 	struct cifs_tcon *tcon;
723 
724 	if (cifs_sb == NULL)
725 		return;
726 
727 	tcon = cifs_sb_master_tcon(cifs_sb);
728 
729 	spin_lock(&cifs_tcp_ses_lock);
730 	spin_lock(&tcon->tc_lock);
731 	if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
732 		/* we have other mounts to same share or we have
733 		   already tried to umount this and woken up
734 		   all waiting network requests, nothing to do */
735 		spin_unlock(&tcon->tc_lock);
736 		spin_unlock(&cifs_tcp_ses_lock);
737 		return;
738 	}
739 	/*
740 	 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
741 	 * fail later (e.g. due to open files).  TID_EXITING will be set just before tdis req sent
742 	 */
743 	spin_unlock(&tcon->tc_lock);
744 	spin_unlock(&cifs_tcp_ses_lock);
745 
746 	cifs_close_all_deferred_files(tcon);
747 	/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
748 	/* cancel_notify_requests(tcon); */
749 	if (tcon->ses && tcon->ses->server) {
750 		cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
751 		wake_up_all(&tcon->ses->server->request_q);
752 		wake_up_all(&tcon->ses->server->response_q);
753 		msleep(1); /* yield */
754 		/* we have to kick the requests once more */
755 		wake_up_all(&tcon->ses->server->response_q);
756 		msleep(1);
757 	}
758 
759 	return;
760 }
761 
cifs_freeze(struct super_block * sb)762 static int cifs_freeze(struct super_block *sb)
763 {
764 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
765 	struct cifs_tcon *tcon;
766 
767 	if (cifs_sb == NULL)
768 		return 0;
769 
770 	tcon = cifs_sb_master_tcon(cifs_sb);
771 
772 	cifs_close_all_deferred_files(tcon);
773 	return 0;
774 }
775 
776 #ifdef CONFIG_CIFS_STATS2
cifs_show_stats(struct seq_file * s,struct dentry * root)777 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
778 {
779 	/* BB FIXME */
780 	return 0;
781 }
782 #endif
783 
cifs_write_inode(struct inode * inode,struct writeback_control * wbc)784 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
785 {
786 	fscache_unpin_writeback(wbc, cifs_inode_cookie(inode));
787 	return 0;
788 }
789 
cifs_drop_inode(struct inode * inode)790 static int cifs_drop_inode(struct inode *inode)
791 {
792 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
793 
794 	/* no serverino => unconditional eviction */
795 	return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
796 		generic_drop_inode(inode);
797 }
798 
799 static const struct super_operations cifs_super_ops = {
800 	.statfs = cifs_statfs,
801 	.alloc_inode = cifs_alloc_inode,
802 	.write_inode	= cifs_write_inode,
803 	.free_inode = cifs_free_inode,
804 	.drop_inode	= cifs_drop_inode,
805 	.evict_inode	= cifs_evict_inode,
806 /*	.show_path	= cifs_show_path, */ /* Would we ever need show path? */
807 	.show_devname   = cifs_show_devname,
808 /*	.delete_inode	= cifs_delete_inode,  */  /* Do not need above
809 	function unless later we add lazy close of inodes or unless the
810 	kernel forgets to call us with the same number of releases (closes)
811 	as opens */
812 	.show_options = cifs_show_options,
813 	.umount_begin   = cifs_umount_begin,
814 	.freeze_fs      = cifs_freeze,
815 #ifdef CONFIG_CIFS_STATS2
816 	.show_stats = cifs_show_stats,
817 #endif
818 };
819 
820 /*
821  * Get root dentry from superblock according to prefix path mount option.
822  * Return dentry with refcount + 1 on success and NULL otherwise.
823  */
824 static struct dentry *
cifs_get_root(struct smb3_fs_context * ctx,struct super_block * sb)825 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
826 {
827 	struct dentry *dentry;
828 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
829 	char *full_path = NULL;
830 	char *s, *p;
831 	char sep;
832 
833 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
834 		return dget(sb->s_root);
835 
836 	full_path = cifs_build_path_to_root(ctx, cifs_sb,
837 				cifs_sb_master_tcon(cifs_sb), 0);
838 	if (full_path == NULL)
839 		return ERR_PTR(-ENOMEM);
840 
841 	cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
842 
843 	sep = CIFS_DIR_SEP(cifs_sb);
844 	dentry = dget(sb->s_root);
845 	s = full_path;
846 
847 	do {
848 		struct inode *dir = d_inode(dentry);
849 		struct dentry *child;
850 
851 		if (!S_ISDIR(dir->i_mode)) {
852 			dput(dentry);
853 			dentry = ERR_PTR(-ENOTDIR);
854 			break;
855 		}
856 
857 		/* skip separators */
858 		while (*s == sep)
859 			s++;
860 		if (!*s)
861 			break;
862 		p = s++;
863 		/* next separator */
864 		while (*s && *s != sep)
865 			s++;
866 
867 		child = lookup_positive_unlocked(p, dentry, s - p);
868 		dput(dentry);
869 		dentry = child;
870 	} while (!IS_ERR(dentry));
871 	kfree(full_path);
872 	return dentry;
873 }
874 
cifs_set_super(struct super_block * sb,void * data)875 static int cifs_set_super(struct super_block *sb, void *data)
876 {
877 	struct cifs_mnt_data *mnt_data = data;
878 	sb->s_fs_info = mnt_data->cifs_sb;
879 	return set_anon_super(sb, NULL);
880 }
881 
882 struct dentry *
cifs_smb3_do_mount(struct file_system_type * fs_type,int flags,struct smb3_fs_context * old_ctx)883 cifs_smb3_do_mount(struct file_system_type *fs_type,
884 	      int flags, struct smb3_fs_context *old_ctx)
885 {
886 	struct cifs_mnt_data mnt_data;
887 	struct cifs_sb_info *cifs_sb;
888 	struct super_block *sb;
889 	struct dentry *root;
890 	int rc;
891 
892 	/*
893 	 * Prints in Kernel / CIFS log the attempted mount operation
894 	 *	If CIFS_DEBUG && cifs_FYI
895 	 */
896 	if (cifsFYI)
897 		cifs_dbg(FYI, "Devname: %s flags: %d\n", old_ctx->UNC, flags);
898 	else
899 		cifs_info("Attempting to mount %s\n", old_ctx->UNC);
900 
901 	cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL);
902 	if (!cifs_sb)
903 		return ERR_PTR(-ENOMEM);
904 
905 	cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
906 	if (!cifs_sb->ctx) {
907 		root = ERR_PTR(-ENOMEM);
908 		goto out;
909 	}
910 	rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
911 	if (rc) {
912 		root = ERR_PTR(rc);
913 		goto out;
914 	}
915 
916 	rc = cifs_setup_volume_info(cifs_sb->ctx, NULL, NULL);
917 	if (rc) {
918 		root = ERR_PTR(rc);
919 		goto out;
920 	}
921 
922 	rc = cifs_setup_cifs_sb(cifs_sb);
923 	if (rc) {
924 		root = ERR_PTR(rc);
925 		goto out;
926 	}
927 
928 	rc = cifs_mount(cifs_sb, cifs_sb->ctx);
929 	if (rc) {
930 		if (!(flags & SB_SILENT))
931 			cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
932 				 rc);
933 		root = ERR_PTR(rc);
934 		goto out;
935 	}
936 
937 	mnt_data.ctx = cifs_sb->ctx;
938 	mnt_data.cifs_sb = cifs_sb;
939 	mnt_data.flags = flags;
940 
941 	/* BB should we make this contingent on mount parm? */
942 	flags |= SB_NODIRATIME | SB_NOATIME;
943 
944 	sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
945 	if (IS_ERR(sb)) {
946 		cifs_umount(cifs_sb);
947 		return ERR_CAST(sb);
948 	}
949 
950 	if (sb->s_root) {
951 		cifs_dbg(FYI, "Use existing superblock\n");
952 		cifs_umount(cifs_sb);
953 		cifs_sb = NULL;
954 	} else {
955 		rc = cifs_read_super(sb);
956 		if (rc) {
957 			root = ERR_PTR(rc);
958 			goto out_super;
959 		}
960 
961 		sb->s_flags |= SB_ACTIVE;
962 	}
963 
964 	root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
965 	if (IS_ERR(root))
966 		goto out_super;
967 
968 	if (cifs_sb)
969 		cifs_sb->root = dget(root);
970 
971 	cifs_dbg(FYI, "dentry root is: %p\n", root);
972 	return root;
973 
974 out_super:
975 	deactivate_locked_super(sb);
976 	return root;
977 out:
978 	kfree(cifs_sb->prepath);
979 	smb3_cleanup_fs_context(cifs_sb->ctx);
980 	kfree(cifs_sb);
981 	return root;
982 }
983 
984 
985 static ssize_t
cifs_loose_read_iter(struct kiocb * iocb,struct iov_iter * iter)986 cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
987 {
988 	ssize_t rc;
989 	struct inode *inode = file_inode(iocb->ki_filp);
990 
991 	if (iocb->ki_flags & IOCB_DIRECT)
992 		return cifs_user_readv(iocb, iter);
993 
994 	rc = cifs_revalidate_mapping(inode);
995 	if (rc)
996 		return rc;
997 
998 	return generic_file_read_iter(iocb, iter);
999 }
1000 
cifs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)1001 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1002 {
1003 	struct inode *inode = file_inode(iocb->ki_filp);
1004 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1005 	ssize_t written;
1006 	int rc;
1007 
1008 	if (iocb->ki_filp->f_flags & O_DIRECT) {
1009 		written = cifs_user_writev(iocb, from);
1010 		if (written > 0 && CIFS_CACHE_READ(cinode)) {
1011 			cifs_zap_mapping(inode);
1012 			cifs_dbg(FYI,
1013 				 "Set no oplock for inode=%p after a write operation\n",
1014 				 inode);
1015 			cinode->oplock = 0;
1016 		}
1017 		return written;
1018 	}
1019 
1020 	written = cifs_get_writer(cinode);
1021 	if (written)
1022 		return written;
1023 
1024 	written = generic_file_write_iter(iocb, from);
1025 
1026 	if (CIFS_CACHE_WRITE(CIFS_I(inode)))
1027 		goto out;
1028 
1029 	rc = filemap_fdatawrite(inode->i_mapping);
1030 	if (rc)
1031 		cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
1032 			 rc, inode);
1033 
1034 out:
1035 	cifs_put_writer(cinode);
1036 	return written;
1037 }
1038 
cifs_llseek(struct file * file,loff_t offset,int whence)1039 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1040 {
1041 	struct cifsFileInfo *cfile = file->private_data;
1042 	struct cifs_tcon *tcon;
1043 
1044 	/*
1045 	 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1046 	 * the cached file length
1047 	 */
1048 	if (whence != SEEK_SET && whence != SEEK_CUR) {
1049 		int rc;
1050 		struct inode *inode = file_inode(file);
1051 
1052 		/*
1053 		 * We need to be sure that all dirty pages are written and the
1054 		 * server has the newest file length.
1055 		 */
1056 		if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1057 		    inode->i_mapping->nrpages != 0) {
1058 			rc = filemap_fdatawait(inode->i_mapping);
1059 			if (rc) {
1060 				mapping_set_error(inode->i_mapping, rc);
1061 				return rc;
1062 			}
1063 		}
1064 		/*
1065 		 * Some applications poll for the file length in this strange
1066 		 * way so we must seek to end on non-oplocked files by
1067 		 * setting the revalidate time to zero.
1068 		 */
1069 		CIFS_I(inode)->time = 0;
1070 
1071 		rc = cifs_revalidate_file_attr(file);
1072 		if (rc < 0)
1073 			return (loff_t)rc;
1074 	}
1075 	if (cfile && cfile->tlink) {
1076 		tcon = tlink_tcon(cfile->tlink);
1077 		if (tcon->ses->server->ops->llseek)
1078 			return tcon->ses->server->ops->llseek(file, tcon,
1079 							      offset, whence);
1080 	}
1081 	return generic_file_llseek(file, offset, whence);
1082 }
1083 
1084 static int
cifs_setlease(struct file * file,long arg,struct file_lock ** lease,void ** priv)1085 cifs_setlease(struct file *file, long arg, struct file_lock **lease, void **priv)
1086 {
1087 	/*
1088 	 * Note that this is called by vfs setlease with i_lock held to
1089 	 * protect *lease from going away.
1090 	 */
1091 	struct inode *inode = file_inode(file);
1092 	struct cifsFileInfo *cfile = file->private_data;
1093 
1094 	if (!(S_ISREG(inode->i_mode)))
1095 		return -EINVAL;
1096 
1097 	/* Check if file is oplocked if this is request for new lease */
1098 	if (arg == F_UNLCK ||
1099 	    ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1100 	    ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1101 		return generic_setlease(file, arg, lease, priv);
1102 	else if (tlink_tcon(cfile->tlink)->local_lease &&
1103 		 !CIFS_CACHE_READ(CIFS_I(inode)))
1104 		/*
1105 		 * If the server claims to support oplock on this file, then we
1106 		 * still need to check oplock even if the local_lease mount
1107 		 * option is set, but there are servers which do not support
1108 		 * oplock for which this mount option may be useful if the user
1109 		 * knows that the file won't be changed on the server by anyone
1110 		 * else.
1111 		 */
1112 		return generic_setlease(file, arg, lease, priv);
1113 	else
1114 		return -EAGAIN;
1115 }
1116 
1117 struct file_system_type cifs_fs_type = {
1118 	.owner = THIS_MODULE,
1119 	.name = "cifs",
1120 	.init_fs_context = smb3_init_fs_context,
1121 	.parameters = smb3_fs_parameters,
1122 	.kill_sb = cifs_kill_sb,
1123 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1124 };
1125 MODULE_ALIAS_FS("cifs");
1126 
1127 struct file_system_type smb3_fs_type = {
1128 	.owner = THIS_MODULE,
1129 	.name = "smb3",
1130 	.init_fs_context = smb3_init_fs_context,
1131 	.parameters = smb3_fs_parameters,
1132 	.kill_sb = cifs_kill_sb,
1133 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1134 };
1135 MODULE_ALIAS_FS("smb3");
1136 MODULE_ALIAS("smb3");
1137 
1138 const struct inode_operations cifs_dir_inode_ops = {
1139 	.create = cifs_create,
1140 	.atomic_open = cifs_atomic_open,
1141 	.lookup = cifs_lookup,
1142 	.getattr = cifs_getattr,
1143 	.unlink = cifs_unlink,
1144 	.link = cifs_hardlink,
1145 	.mkdir = cifs_mkdir,
1146 	.rmdir = cifs_rmdir,
1147 	.rename = cifs_rename2,
1148 	.permission = cifs_permission,
1149 	.setattr = cifs_setattr,
1150 	.symlink = cifs_symlink,
1151 	.mknod   = cifs_mknod,
1152 	.listxattr = cifs_listxattr,
1153 };
1154 
1155 const struct inode_operations cifs_file_inode_ops = {
1156 	.setattr = cifs_setattr,
1157 	.getattr = cifs_getattr,
1158 	.permission = cifs_permission,
1159 	.listxattr = cifs_listxattr,
1160 	.fiemap = cifs_fiemap,
1161 };
1162 
cifs_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * done)1163 const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1164 			    struct delayed_call *done)
1165 {
1166 	char *target_path;
1167 
1168 	target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1169 	if (!target_path)
1170 		return ERR_PTR(-ENOMEM);
1171 
1172 	spin_lock(&inode->i_lock);
1173 	if (likely(CIFS_I(inode)->symlink_target)) {
1174 		strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1175 	} else {
1176 		kfree(target_path);
1177 		target_path = ERR_PTR(-EOPNOTSUPP);
1178 	}
1179 	spin_unlock(&inode->i_lock);
1180 
1181 	if (!IS_ERR(target_path))
1182 		set_delayed_call(done, kfree_link, target_path);
1183 
1184 	return target_path;
1185 }
1186 
1187 const struct inode_operations cifs_symlink_inode_ops = {
1188 	.get_link = cifs_get_link,
1189 	.setattr = cifs_setattr,
1190 	.permission = cifs_permission,
1191 	.listxattr = cifs_listxattr,
1192 };
1193 
1194 /*
1195  * Advance the EOF marker to after the source range.
1196  */
cifs_precopy_set_eof(struct inode * src_inode,struct cifsInodeInfo * src_cifsi,struct cifs_tcon * src_tcon,unsigned int xid,loff_t src_end)1197 static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
1198 				struct cifs_tcon *src_tcon,
1199 				unsigned int xid, loff_t src_end)
1200 {
1201 	struct cifsFileInfo *writeable_srcfile;
1202 	int rc = -EINVAL;
1203 
1204 	writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
1205 	if (writeable_srcfile) {
1206 		if (src_tcon->ses->server->ops->set_file_size)
1207 			rc = src_tcon->ses->server->ops->set_file_size(
1208 				xid, src_tcon, writeable_srcfile,
1209 				src_inode->i_size, true /* no need to set sparse */);
1210 		else
1211 			rc = -ENOSYS;
1212 		cifsFileInfo_put(writeable_srcfile);
1213 		cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
1214 	}
1215 
1216 	if (rc < 0)
1217 		goto set_failed;
1218 
1219 	netfs_resize_file(&src_cifsi->netfs, src_end);
1220 	fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
1221 	return 0;
1222 
1223 set_failed:
1224 	return filemap_write_and_wait(src_inode->i_mapping);
1225 }
1226 
1227 /*
1228  * Flush out either the folio that overlaps the beginning of a range in which
1229  * pos resides or the folio that overlaps the end of a range unless that folio
1230  * is entirely within the range we're going to invalidate.  We extend the flush
1231  * bounds to encompass the folio.
1232  */
cifs_flush_folio(struct inode * inode,loff_t pos,loff_t * _fstart,loff_t * _fend,bool first)1233 static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
1234 			    bool first)
1235 {
1236 	struct folio *folio;
1237 	unsigned long long fpos, fend;
1238 	pgoff_t index = pos / PAGE_SIZE;
1239 	size_t size;
1240 	int rc = 0;
1241 
1242 	folio = filemap_get_folio(inode->i_mapping, index);
1243 	if (!folio)
1244 		return 0;
1245 
1246 	size = folio_size(folio);
1247 	fpos = folio_pos(folio);
1248 	fend = fpos + size - 1;
1249 	*_fstart = min_t(unsigned long long, *_fstart, fpos);
1250 	*_fend   = max_t(unsigned long long, *_fend, fend);
1251 	if ((first && pos == fpos) || (!first && pos == fend))
1252 		goto out;
1253 
1254 	rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
1255 out:
1256 	folio_put(folio);
1257 	return rc;
1258 }
1259 
cifs_remap_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,loff_t len,unsigned int remap_flags)1260 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1261 		struct file *dst_file, loff_t destoff, loff_t len,
1262 		unsigned int remap_flags)
1263 {
1264 	struct inode *src_inode = file_inode(src_file);
1265 	struct inode *target_inode = file_inode(dst_file);
1266 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1267 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1268 	struct cifsFileInfo *smb_file_src = src_file->private_data;
1269 	struct cifsFileInfo *smb_file_target = dst_file->private_data;
1270 	struct cifs_tcon *target_tcon, *src_tcon;
1271 	unsigned long long destend, fstart, fend, new_size;
1272 	unsigned int xid;
1273 	int rc;
1274 
1275 	if (remap_flags & REMAP_FILE_DEDUP)
1276 		return -EOPNOTSUPP;
1277 	if (remap_flags & ~REMAP_FILE_ADVISORY)
1278 		return -EINVAL;
1279 
1280 	cifs_dbg(FYI, "clone range\n");
1281 
1282 	xid = get_xid();
1283 
1284 	if (!smb_file_src || !smb_file_target) {
1285 		rc = -EBADF;
1286 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1287 		goto out;
1288 	}
1289 
1290 	src_tcon = tlink_tcon(smb_file_src->tlink);
1291 	target_tcon = tlink_tcon(smb_file_target->tlink);
1292 
1293 	/*
1294 	 * Note: cifs case is easier than btrfs since server responsible for
1295 	 * checks for proper open modes and file type and if it wants
1296 	 * server could even support copy of range where source = target
1297 	 */
1298 	lock_two_nondirectories(target_inode, src_inode);
1299 
1300 	if (len == 0)
1301 		len = src_inode->i_size - off;
1302 
1303 	cifs_dbg(FYI, "clone range\n");
1304 
1305 	/* Flush the source buffer */
1306 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1307 					  off + len - 1);
1308 	if (rc)
1309 		goto unlock;
1310 
1311 	/* The server-side copy will fail if the source crosses the EOF marker.
1312 	 * Advance the EOF marker after the flush above to the end of the range
1313 	 * if it's short of that.
1314 	 */
1315 	if (src_cifsi->netfs.remote_i_size < off + len) {
1316 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1317 		if (rc < 0)
1318 			goto unlock;
1319 	}
1320 
1321 	new_size = destoff + len;
1322 	destend = destoff + len - 1;
1323 
1324 	/* Flush the folios at either end of the destination range to prevent
1325 	 * accidental loss of dirty data outside of the range.
1326 	 */
1327 	fstart = destoff;
1328 	fend = destend;
1329 
1330 	rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1331 	if (rc)
1332 		goto unlock;
1333 	rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1334 	if (rc)
1335 		goto unlock;
1336 
1337 	/* Discard all the folios that overlap the destination region. */
1338 	cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
1339 	truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1340 
1341 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1342 			   i_size_read(target_inode), 0);
1343 
1344 	rc = -EOPNOTSUPP;
1345 	if (target_tcon->ses->server->ops->duplicate_extents) {
1346 		rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1347 			smb_file_src, smb_file_target, off, len, destoff);
1348 		if (rc == 0 && new_size > i_size_read(target_inode)) {
1349 			truncate_setsize(target_inode, new_size);
1350 			netfs_resize_file(&target_cifsi->netfs, new_size);
1351 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1352 					      new_size);
1353 		}
1354 	}
1355 
1356 	/* force revalidate of size and timestamps of target file now
1357 	   that target is updated on the server */
1358 	CIFS_I(target_inode)->time = 0;
1359 unlock:
1360 	/* although unlocking in the reverse order from locking is not
1361 	   strictly necessary here it is a little cleaner to be consistent */
1362 	unlock_two_nondirectories(src_inode, target_inode);
1363 out:
1364 	free_xid(xid);
1365 	return rc < 0 ? rc : len;
1366 }
1367 
cifs_file_copychunk_range(unsigned int xid,struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1368 ssize_t cifs_file_copychunk_range(unsigned int xid,
1369 				struct file *src_file, loff_t off,
1370 				struct file *dst_file, loff_t destoff,
1371 				size_t len, unsigned int flags)
1372 {
1373 	struct inode *src_inode = file_inode(src_file);
1374 	struct inode *target_inode = file_inode(dst_file);
1375 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1376 	struct cifsFileInfo *smb_file_src;
1377 	struct cifsFileInfo *smb_file_target;
1378 	struct cifs_tcon *src_tcon;
1379 	struct cifs_tcon *target_tcon;
1380 	unsigned long long destend, fstart, fend;
1381 	ssize_t rc;
1382 
1383 	cifs_dbg(FYI, "copychunk range\n");
1384 
1385 	if (!src_file->private_data || !dst_file->private_data) {
1386 		rc = -EBADF;
1387 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1388 		goto out;
1389 	}
1390 
1391 	rc = -EXDEV;
1392 	smb_file_target = dst_file->private_data;
1393 	smb_file_src = src_file->private_data;
1394 	src_tcon = tlink_tcon(smb_file_src->tlink);
1395 	target_tcon = tlink_tcon(smb_file_target->tlink);
1396 
1397 	if (src_tcon->ses != target_tcon->ses) {
1398 		cifs_dbg(VFS, "source and target of copy not on same server\n");
1399 		goto out;
1400 	}
1401 
1402 	rc = -EOPNOTSUPP;
1403 	if (!target_tcon->ses->server->ops->copychunk_range)
1404 		goto out;
1405 
1406 	/*
1407 	 * Note: cifs case is easier than btrfs since server responsible for
1408 	 * checks for proper open modes and file type and if it wants
1409 	 * server could even support copy of range where source = target
1410 	 */
1411 	lock_two_nondirectories(target_inode, src_inode);
1412 
1413 	cifs_dbg(FYI, "about to flush pages\n");
1414 
1415 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1416 					  off + len - 1);
1417 	if (rc)
1418 		goto unlock;
1419 
1420 	/* The server-side copy will fail if the source crosses the EOF marker.
1421 	 * Advance the EOF marker after the flush above to the end of the range
1422 	 * if it's short of that.
1423 	 */
1424 	if (src_cifsi->server_eof < off + len) {
1425 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1426 		if (rc < 0)
1427 			goto unlock;
1428 	}
1429 
1430 	destend = destoff + len - 1;
1431 
1432 	/* Flush the folios at either end of the destination range to prevent
1433 	 * accidental loss of dirty data outside of the range.
1434 	 */
1435 	fstart = destoff;
1436 	fend = destend;
1437 
1438 	rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1439 	if (rc)
1440 		goto unlock;
1441 	rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1442 	if (rc)
1443 		goto unlock;
1444 
1445 	/* Discard all the folios that overlap the destination region. */
1446 	truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1447 
1448 	rc = file_modified(dst_file);
1449 	if (!rc) {
1450 		rc = target_tcon->ses->server->ops->copychunk_range(xid,
1451 			smb_file_src, smb_file_target, off, len, destoff);
1452 		if (rc > 0 && destoff + rc > i_size_read(target_inode))
1453 			truncate_setsize(target_inode, destoff + rc);
1454 	}
1455 
1456 	file_accessed(src_file);
1457 
1458 	/* force revalidate of size and timestamps of target file now
1459 	 * that target is updated on the server
1460 	 */
1461 	CIFS_I(target_inode)->time = 0;
1462 
1463 unlock:
1464 	/* although unlocking in the reverse order from locking is not
1465 	 * strictly necessary here it is a little cleaner to be consistent
1466 	 */
1467 	unlock_two_nondirectories(src_inode, target_inode);
1468 
1469 out:
1470 	return rc;
1471 }
1472 
1473 /*
1474  * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1475  * is a dummy operation.
1476  */
cifs_dir_fsync(struct file * file,loff_t start,loff_t end,int datasync)1477 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1478 {
1479 	cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1480 		 file, datasync);
1481 
1482 	return 0;
1483 }
1484 
cifs_copy_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1485 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1486 				struct file *dst_file, loff_t destoff,
1487 				size_t len, unsigned int flags)
1488 {
1489 	unsigned int xid = get_xid();
1490 	ssize_t rc;
1491 	struct cifsFileInfo *cfile = dst_file->private_data;
1492 
1493 	if (cfile->swapfile) {
1494 		rc = -EOPNOTSUPP;
1495 		free_xid(xid);
1496 		return rc;
1497 	}
1498 
1499 	rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1500 					len, flags);
1501 	free_xid(xid);
1502 
1503 	if (rc == -EOPNOTSUPP || rc == -EXDEV)
1504 		rc = generic_copy_file_range(src_file, off, dst_file,
1505 					     destoff, len, flags);
1506 	return rc;
1507 }
1508 
1509 const struct file_operations cifs_file_ops = {
1510 	.read_iter = cifs_loose_read_iter,
1511 	.write_iter = cifs_file_write_iter,
1512 	.open = cifs_open,
1513 	.release = cifs_close,
1514 	.lock = cifs_lock,
1515 	.flock = cifs_flock,
1516 	.fsync = cifs_fsync,
1517 	.flush = cifs_flush,
1518 	.mmap  = cifs_file_mmap,
1519 	.splice_read = generic_file_splice_read,
1520 	.splice_write = iter_file_splice_write,
1521 	.llseek = cifs_llseek,
1522 	.unlocked_ioctl	= cifs_ioctl,
1523 	.copy_file_range = cifs_copy_file_range,
1524 	.remap_file_range = cifs_remap_file_range,
1525 	.setlease = cifs_setlease,
1526 	.fallocate = cifs_fallocate,
1527 };
1528 
1529 const struct file_operations cifs_file_strict_ops = {
1530 	.read_iter = cifs_strict_readv,
1531 	.write_iter = cifs_strict_writev,
1532 	.open = cifs_open,
1533 	.release = cifs_close,
1534 	.lock = cifs_lock,
1535 	.flock = cifs_flock,
1536 	.fsync = cifs_strict_fsync,
1537 	.flush = cifs_flush,
1538 	.mmap = cifs_file_strict_mmap,
1539 	.splice_read = generic_file_splice_read,
1540 	.splice_write = iter_file_splice_write,
1541 	.llseek = cifs_llseek,
1542 	.unlocked_ioctl	= cifs_ioctl,
1543 	.copy_file_range = cifs_copy_file_range,
1544 	.remap_file_range = cifs_remap_file_range,
1545 	.setlease = cifs_setlease,
1546 	.fallocate = cifs_fallocate,
1547 };
1548 
1549 const struct file_operations cifs_file_direct_ops = {
1550 	.read_iter = cifs_direct_readv,
1551 	.write_iter = cifs_direct_writev,
1552 	.open = cifs_open,
1553 	.release = cifs_close,
1554 	.lock = cifs_lock,
1555 	.flock = cifs_flock,
1556 	.fsync = cifs_fsync,
1557 	.flush = cifs_flush,
1558 	.mmap = cifs_file_mmap,
1559 	.splice_read = generic_file_splice_read,
1560 	.splice_write = iter_file_splice_write,
1561 	.unlocked_ioctl  = cifs_ioctl,
1562 	.copy_file_range = cifs_copy_file_range,
1563 	.remap_file_range = cifs_remap_file_range,
1564 	.llseek = cifs_llseek,
1565 	.setlease = cifs_setlease,
1566 	.fallocate = cifs_fallocate,
1567 };
1568 
1569 const struct file_operations cifs_file_nobrl_ops = {
1570 	.read_iter = cifs_loose_read_iter,
1571 	.write_iter = cifs_file_write_iter,
1572 	.open = cifs_open,
1573 	.release = cifs_close,
1574 	.fsync = cifs_fsync,
1575 	.flush = cifs_flush,
1576 	.mmap  = cifs_file_mmap,
1577 	.splice_read = generic_file_splice_read,
1578 	.splice_write = iter_file_splice_write,
1579 	.llseek = cifs_llseek,
1580 	.unlocked_ioctl	= cifs_ioctl,
1581 	.copy_file_range = cifs_copy_file_range,
1582 	.remap_file_range = cifs_remap_file_range,
1583 	.setlease = cifs_setlease,
1584 	.fallocate = cifs_fallocate,
1585 };
1586 
1587 const struct file_operations cifs_file_strict_nobrl_ops = {
1588 	.read_iter = cifs_strict_readv,
1589 	.write_iter = cifs_strict_writev,
1590 	.open = cifs_open,
1591 	.release = cifs_close,
1592 	.fsync = cifs_strict_fsync,
1593 	.flush = cifs_flush,
1594 	.mmap = cifs_file_strict_mmap,
1595 	.splice_read = generic_file_splice_read,
1596 	.splice_write = iter_file_splice_write,
1597 	.llseek = cifs_llseek,
1598 	.unlocked_ioctl	= cifs_ioctl,
1599 	.copy_file_range = cifs_copy_file_range,
1600 	.remap_file_range = cifs_remap_file_range,
1601 	.setlease = cifs_setlease,
1602 	.fallocate = cifs_fallocate,
1603 };
1604 
1605 const struct file_operations cifs_file_direct_nobrl_ops = {
1606 	.read_iter = cifs_direct_readv,
1607 	.write_iter = cifs_direct_writev,
1608 	.open = cifs_open,
1609 	.release = cifs_close,
1610 	.fsync = cifs_fsync,
1611 	.flush = cifs_flush,
1612 	.mmap = cifs_file_mmap,
1613 	.splice_read = generic_file_splice_read,
1614 	.splice_write = iter_file_splice_write,
1615 	.unlocked_ioctl  = cifs_ioctl,
1616 	.copy_file_range = cifs_copy_file_range,
1617 	.remap_file_range = cifs_remap_file_range,
1618 	.llseek = cifs_llseek,
1619 	.setlease = cifs_setlease,
1620 	.fallocate = cifs_fallocate,
1621 };
1622 
1623 const struct file_operations cifs_dir_ops = {
1624 	.iterate_shared = cifs_readdir,
1625 	.release = cifs_closedir,
1626 	.read    = generic_read_dir,
1627 	.unlocked_ioctl  = cifs_ioctl,
1628 	.copy_file_range = cifs_copy_file_range,
1629 	.remap_file_range = cifs_remap_file_range,
1630 	.llseek = generic_file_llseek,
1631 	.fsync = cifs_dir_fsync,
1632 };
1633 
1634 static void
cifs_init_once(void * inode)1635 cifs_init_once(void *inode)
1636 {
1637 	struct cifsInodeInfo *cifsi = inode;
1638 
1639 	inode_init_once(&cifsi->netfs.inode);
1640 	init_rwsem(&cifsi->lock_sem);
1641 }
1642 
1643 static int __init
cifs_init_inodecache(void)1644 cifs_init_inodecache(void)
1645 {
1646 	cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1647 					      sizeof(struct cifsInodeInfo),
1648 					      0, (SLAB_RECLAIM_ACCOUNT|
1649 						SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1650 					      cifs_init_once);
1651 	if (cifs_inode_cachep == NULL)
1652 		return -ENOMEM;
1653 
1654 	return 0;
1655 }
1656 
1657 static void
cifs_destroy_inodecache(void)1658 cifs_destroy_inodecache(void)
1659 {
1660 	/*
1661 	 * Make sure all delayed rcu free inodes are flushed before we
1662 	 * destroy cache.
1663 	 */
1664 	rcu_barrier();
1665 	kmem_cache_destroy(cifs_inode_cachep);
1666 }
1667 
1668 static int
cifs_init_request_bufs(void)1669 cifs_init_request_bufs(void)
1670 {
1671 	/*
1672 	 * SMB2 maximum header size is bigger than CIFS one - no problems to
1673 	 * allocate some more bytes for CIFS.
1674 	 */
1675 	size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1676 
1677 	if (CIFSMaxBufSize < 8192) {
1678 	/* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1679 	Unicode path name has to fit in any SMB/CIFS path based frames */
1680 		CIFSMaxBufSize = 8192;
1681 	} else if (CIFSMaxBufSize > 1024*127) {
1682 		CIFSMaxBufSize = 1024 * 127;
1683 	} else {
1684 		CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1685 	}
1686 /*
1687 	cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1688 		 CIFSMaxBufSize, CIFSMaxBufSize);
1689 */
1690 	cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1691 					    CIFSMaxBufSize + max_hdr_size, 0,
1692 					    SLAB_HWCACHE_ALIGN, 0,
1693 					    CIFSMaxBufSize + max_hdr_size,
1694 					    NULL);
1695 	if (cifs_req_cachep == NULL)
1696 		return -ENOMEM;
1697 
1698 	if (cifs_min_rcv < 1)
1699 		cifs_min_rcv = 1;
1700 	else if (cifs_min_rcv > 64) {
1701 		cifs_min_rcv = 64;
1702 		cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1703 	}
1704 
1705 	cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1706 						  cifs_req_cachep);
1707 
1708 	if (cifs_req_poolp == NULL) {
1709 		kmem_cache_destroy(cifs_req_cachep);
1710 		return -ENOMEM;
1711 	}
1712 	/* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1713 	almost all handle based requests (but not write response, nor is it
1714 	sufficient for path based requests).  A smaller size would have
1715 	been more efficient (compacting multiple slab items on one 4k page)
1716 	for the case in which debug was on, but this larger size allows
1717 	more SMBs to use small buffer alloc and is still much more
1718 	efficient to alloc 1 per page off the slab compared to 17K (5page)
1719 	alloc of large cifs buffers even when page debugging is on */
1720 	cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1721 			MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1722 			0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1723 	if (cifs_sm_req_cachep == NULL) {
1724 		mempool_destroy(cifs_req_poolp);
1725 		kmem_cache_destroy(cifs_req_cachep);
1726 		return -ENOMEM;
1727 	}
1728 
1729 	if (cifs_min_small < 2)
1730 		cifs_min_small = 2;
1731 	else if (cifs_min_small > 256) {
1732 		cifs_min_small = 256;
1733 		cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1734 	}
1735 
1736 	cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1737 						     cifs_sm_req_cachep);
1738 
1739 	if (cifs_sm_req_poolp == NULL) {
1740 		mempool_destroy(cifs_req_poolp);
1741 		kmem_cache_destroy(cifs_req_cachep);
1742 		kmem_cache_destroy(cifs_sm_req_cachep);
1743 		return -ENOMEM;
1744 	}
1745 
1746 	return 0;
1747 }
1748 
1749 static void
cifs_destroy_request_bufs(void)1750 cifs_destroy_request_bufs(void)
1751 {
1752 	mempool_destroy(cifs_req_poolp);
1753 	kmem_cache_destroy(cifs_req_cachep);
1754 	mempool_destroy(cifs_sm_req_poolp);
1755 	kmem_cache_destroy(cifs_sm_req_cachep);
1756 }
1757 
init_mids(void)1758 static int init_mids(void)
1759 {
1760 	cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1761 					    sizeof(struct mid_q_entry), 0,
1762 					    SLAB_HWCACHE_ALIGN, NULL);
1763 	if (cifs_mid_cachep == NULL)
1764 		return -ENOMEM;
1765 
1766 	/* 3 is a reasonable minimum number of simultaneous operations */
1767 	cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1768 	if (cifs_mid_poolp == NULL) {
1769 		kmem_cache_destroy(cifs_mid_cachep);
1770 		return -ENOMEM;
1771 	}
1772 
1773 	return 0;
1774 }
1775 
destroy_mids(void)1776 static void destroy_mids(void)
1777 {
1778 	mempool_destroy(cifs_mid_poolp);
1779 	kmem_cache_destroy(cifs_mid_cachep);
1780 }
1781 
1782 static int __init
init_cifs(void)1783 init_cifs(void)
1784 {
1785 	int rc = 0;
1786 	cifs_proc_init();
1787 	INIT_LIST_HEAD(&cifs_tcp_ses_list);
1788 /*
1789  *  Initialize Global counters
1790  */
1791 	atomic_set(&sesInfoAllocCount, 0);
1792 	atomic_set(&tconInfoAllocCount, 0);
1793 	atomic_set(&tcpSesNextId, 0);
1794 	atomic_set(&tcpSesAllocCount, 0);
1795 	atomic_set(&tcpSesReconnectCount, 0);
1796 	atomic_set(&tconInfoReconnectCount, 0);
1797 
1798 	atomic_set(&buf_alloc_count, 0);
1799 	atomic_set(&small_buf_alloc_count, 0);
1800 #ifdef CONFIG_CIFS_STATS2
1801 	atomic_set(&total_buf_alloc_count, 0);
1802 	atomic_set(&total_small_buf_alloc_count, 0);
1803 	if (slow_rsp_threshold < 1)
1804 		cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1805 	else if (slow_rsp_threshold > 32767)
1806 		cifs_dbg(VFS,
1807 		       "slow response threshold set higher than recommended (0 to 32767)\n");
1808 #endif /* CONFIG_CIFS_STATS2 */
1809 
1810 	atomic_set(&mid_count, 0);
1811 	GlobalCurrentXid = 0;
1812 	GlobalTotalActiveXid = 0;
1813 	GlobalMaxActiveXid = 0;
1814 	spin_lock_init(&cifs_tcp_ses_lock);
1815 	spin_lock_init(&GlobalMid_Lock);
1816 
1817 	cifs_lock_secret = get_random_u32();
1818 
1819 	if (cifs_max_pending < 2) {
1820 		cifs_max_pending = 2;
1821 		cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1822 	} else if (cifs_max_pending > CIFS_MAX_REQ) {
1823 		cifs_max_pending = CIFS_MAX_REQ;
1824 		cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1825 			 CIFS_MAX_REQ);
1826 	}
1827 
1828 	cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1829 	if (!cifsiod_wq) {
1830 		rc = -ENOMEM;
1831 		goto out_clean_proc;
1832 	}
1833 
1834 	/*
1835 	 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1836 	 * so that we don't launch too many worker threads but
1837 	 * Documentation/core-api/workqueue.rst recommends setting it to 0
1838 	 */
1839 
1840 	/* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1841 	decrypt_wq = alloc_workqueue("smb3decryptd",
1842 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1843 	if (!decrypt_wq) {
1844 		rc = -ENOMEM;
1845 		goto out_destroy_cifsiod_wq;
1846 	}
1847 
1848 	fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1849 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1850 	if (!fileinfo_put_wq) {
1851 		rc = -ENOMEM;
1852 		goto out_destroy_decrypt_wq;
1853 	}
1854 
1855 	cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1856 					 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1857 	if (!cifsoplockd_wq) {
1858 		rc = -ENOMEM;
1859 		goto out_destroy_fileinfo_put_wq;
1860 	}
1861 
1862 	deferredclose_wq = alloc_workqueue("deferredclose",
1863 					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1864 	if (!deferredclose_wq) {
1865 		rc = -ENOMEM;
1866 		goto out_destroy_cifsoplockd_wq;
1867 	}
1868 
1869 	rc = cifs_init_inodecache();
1870 	if (rc)
1871 		goto out_destroy_deferredclose_wq;
1872 
1873 	rc = init_mids();
1874 	if (rc)
1875 		goto out_destroy_inodecache;
1876 
1877 	rc = cifs_init_request_bufs();
1878 	if (rc)
1879 		goto out_destroy_mids;
1880 
1881 #ifdef CONFIG_CIFS_DFS_UPCALL
1882 	rc = dfs_cache_init();
1883 	if (rc)
1884 		goto out_destroy_request_bufs;
1885 #endif /* CONFIG_CIFS_DFS_UPCALL */
1886 #ifdef CONFIG_CIFS_UPCALL
1887 	rc = init_cifs_spnego();
1888 	if (rc)
1889 		goto out_destroy_dfs_cache;
1890 #endif /* CONFIG_CIFS_UPCALL */
1891 #ifdef CONFIG_CIFS_SWN_UPCALL
1892 	rc = cifs_genl_init();
1893 	if (rc)
1894 		goto out_register_key_type;
1895 #endif /* CONFIG_CIFS_SWN_UPCALL */
1896 
1897 	rc = init_cifs_idmap();
1898 	if (rc)
1899 		goto out_cifs_swn_init;
1900 
1901 	rc = register_filesystem(&cifs_fs_type);
1902 	if (rc)
1903 		goto out_init_cifs_idmap;
1904 
1905 	rc = register_filesystem(&smb3_fs_type);
1906 	if (rc) {
1907 		unregister_filesystem(&cifs_fs_type);
1908 		goto out_init_cifs_idmap;
1909 	}
1910 
1911 	return 0;
1912 
1913 out_init_cifs_idmap:
1914 	exit_cifs_idmap();
1915 out_cifs_swn_init:
1916 #ifdef CONFIG_CIFS_SWN_UPCALL
1917 	cifs_genl_exit();
1918 out_register_key_type:
1919 #endif
1920 #ifdef CONFIG_CIFS_UPCALL
1921 	exit_cifs_spnego();
1922 out_destroy_dfs_cache:
1923 #endif
1924 #ifdef CONFIG_CIFS_DFS_UPCALL
1925 	dfs_cache_destroy();
1926 out_destroy_request_bufs:
1927 #endif
1928 	cifs_destroy_request_bufs();
1929 out_destroy_mids:
1930 	destroy_mids();
1931 out_destroy_inodecache:
1932 	cifs_destroy_inodecache();
1933 out_destroy_deferredclose_wq:
1934 	destroy_workqueue(deferredclose_wq);
1935 out_destroy_cifsoplockd_wq:
1936 	destroy_workqueue(cifsoplockd_wq);
1937 out_destroy_fileinfo_put_wq:
1938 	destroy_workqueue(fileinfo_put_wq);
1939 out_destroy_decrypt_wq:
1940 	destroy_workqueue(decrypt_wq);
1941 out_destroy_cifsiod_wq:
1942 	destroy_workqueue(cifsiod_wq);
1943 out_clean_proc:
1944 	cifs_proc_clean();
1945 	return rc;
1946 }
1947 
1948 static void __exit
exit_cifs(void)1949 exit_cifs(void)
1950 {
1951 	cifs_dbg(NOISY, "exit_smb3\n");
1952 	unregister_filesystem(&cifs_fs_type);
1953 	unregister_filesystem(&smb3_fs_type);
1954 	cifs_dfs_release_automount_timer();
1955 	exit_cifs_idmap();
1956 #ifdef CONFIG_CIFS_SWN_UPCALL
1957 	cifs_genl_exit();
1958 #endif
1959 #ifdef CONFIG_CIFS_UPCALL
1960 	exit_cifs_spnego();
1961 #endif
1962 #ifdef CONFIG_CIFS_DFS_UPCALL
1963 	dfs_cache_destroy();
1964 #endif
1965 	cifs_destroy_request_bufs();
1966 	destroy_mids();
1967 	cifs_destroy_inodecache();
1968 	destroy_workqueue(deferredclose_wq);
1969 	destroy_workqueue(cifsoplockd_wq);
1970 	destroy_workqueue(decrypt_wq);
1971 	destroy_workqueue(fileinfo_put_wq);
1972 	destroy_workqueue(cifsiod_wq);
1973 	cifs_proc_clean();
1974 }
1975 
1976 MODULE_AUTHOR("Steve French");
1977 MODULE_LICENSE("GPL");	/* combination of LGPL + GPL source behaves as GPL */
1978 MODULE_DESCRIPTION
1979 	("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
1980 	"also older servers complying with the SNIA CIFS Specification)");
1981 MODULE_VERSION(CIFS_VERSION);
1982 MODULE_SOFTDEP("ecb");
1983 MODULE_SOFTDEP("hmac");
1984 MODULE_SOFTDEP("md5");
1985 MODULE_SOFTDEP("nls");
1986 MODULE_SOFTDEP("aes");
1987 MODULE_SOFTDEP("cmac");
1988 MODULE_SOFTDEP("sha256");
1989 MODULE_SOFTDEP("sha512");
1990 MODULE_SOFTDEP("aead2");
1991 MODULE_SOFTDEP("ccm");
1992 MODULE_SOFTDEP("gcm");
1993 module_init(init_cifs)
1994 module_exit(exit_cifs)
1995