1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *
7  *   Common Internet FileSystem (CIFS) client
8  *
9  */
10 
11 /* Note that BB means BUGBUG (ie something to fix eventually) */
12 
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/filelock.h>
16 #include <linux/mount.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/seq_file.h>
21 #include <linux/vfs.h>
22 #include <linux/mempool.h>
23 #include <linux/delay.h>
24 #include <linux/kthread.h>
25 #include <linux/freezer.h>
26 #include <linux/namei.h>
27 #include <linux/random.h>
28 #include <linux/splice.h>
29 #include <linux/uuid.h>
30 #include <linux/xattr.h>
31 #include <uapi/linux/magic.h>
32 #include <net/ipv6.h>
33 #include "cifsfs.h"
34 #include "cifspdu.h"
35 #define DECLARE_GLOBALS_HERE
36 #include "cifsglob.h"
37 #include "cifsproto.h"
38 #include "cifs_debug.h"
39 #include "cifs_fs_sb.h"
40 #include <linux/mm.h>
41 #include <linux/key-type.h>
42 #include "cifs_spnego.h"
43 #include "fscache.h"
44 #ifdef CONFIG_CIFS_DFS_UPCALL
45 #include "dfs_cache.h"
46 #endif
47 #ifdef CONFIG_CIFS_SWN_UPCALL
48 #include "netlink.h"
49 #endif
50 #include "fs_context.h"
51 #include "cached_dir.h"
52 
53 /*
54  * DOS dates from 1980/1/1 through 2107/12/31
55  * Protocol specifications indicate the range should be to 119, which
56  * limits maximum year to 2099. But this range has not been checked.
57  */
58 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
59 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
60 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
61 
62 int cifsFYI = 0;
63 bool traceSMB;
64 bool enable_oplocks = true;
65 bool linuxExtEnabled = true;
66 bool lookupCacheEnabled = true;
67 bool disable_legacy_dialects; /* false by default */
68 bool enable_gcm_256 = true;
69 bool require_gcm_256; /* false by default */
70 bool enable_negotiate_signing; /* false by default */
71 unsigned int global_secflags = CIFSSEC_DEF;
72 /* unsigned int ntlmv2_support = 0; */
73 unsigned int sign_CIFS_PDUs = 1;
74 
75 /*
76  * Global transaction id (XID) information
77  */
78 unsigned int GlobalCurrentXid;	/* protected by GlobalMid_Lock */
79 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Lock */
80 unsigned int GlobalMaxActiveXid;	/* prot by GlobalMid_Lock */
81 spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
82 
83 /*
84  *  Global counters, updated atomically
85  */
86 atomic_t sesInfoAllocCount;
87 atomic_t tconInfoAllocCount;
88 atomic_t tcpSesNextId;
89 atomic_t tcpSesAllocCount;
90 atomic_t tcpSesReconnectCount;
91 atomic_t tconInfoReconnectCount;
92 
93 atomic_t mid_count;
94 atomic_t buf_alloc_count;
95 atomic_t small_buf_alloc_count;
96 #ifdef CONFIG_CIFS_STATS2
97 atomic_t total_buf_alloc_count;
98 atomic_t total_small_buf_alloc_count;
99 #endif/* STATS2 */
100 struct list_head	cifs_tcp_ses_list;
101 spinlock_t		cifs_tcp_ses_lock;
102 static const struct super_operations cifs_super_ops;
103 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
104 module_param(CIFSMaxBufSize, uint, 0444);
105 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
106 				 "for CIFS requests. "
107 				 "Default: 16384 Range: 8192 to 130048");
108 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
109 module_param(cifs_min_rcv, uint, 0444);
110 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
111 				"1 to 64");
112 unsigned int cifs_min_small = 30;
113 module_param(cifs_min_small, uint, 0444);
114 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
115 				 "Range: 2 to 256");
116 unsigned int cifs_max_pending = CIFS_MAX_REQ;
117 module_param(cifs_max_pending, uint, 0444);
118 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
119 				   "CIFS/SMB1 dialect (N/A for SMB3) "
120 				   "Default: 32767 Range: 2 to 32767.");
121 unsigned int dir_cache_timeout = 30;
122 module_param(dir_cache_timeout, uint, 0644);
123 MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
124 				 "Range: 1 to 65000 seconds, 0 to disable caching dir contents");
125 #ifdef CONFIG_CIFS_STATS2
126 unsigned int slow_rsp_threshold = 1;
127 module_param(slow_rsp_threshold, uint, 0644);
128 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
129 				   "before logging that a response is delayed. "
130 				   "Default: 1 (if set to 0 disables msg).");
131 #endif /* STATS2 */
132 
133 module_param(enable_oplocks, bool, 0644);
134 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
135 
136 module_param(enable_gcm_256, bool, 0644);
137 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/0");
138 
139 module_param(require_gcm_256, bool, 0644);
140 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
141 
142 module_param(enable_negotiate_signing, bool, 0644);
143 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
144 
145 module_param(disable_legacy_dialects, bool, 0644);
146 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
147 				  "helpful to restrict the ability to "
148 				  "override the default dialects (SMB2.1, "
149 				  "SMB3 and SMB3.02) on mount with old "
150 				  "dialects (CIFS/SMB1 and SMB2) since "
151 				  "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
152 				  " and less secure. Default: n/N/0");
153 
154 struct workqueue_struct	*cifsiod_wq;
155 struct workqueue_struct	*decrypt_wq;
156 struct workqueue_struct	*fileinfo_put_wq;
157 struct workqueue_struct	*cifsoplockd_wq;
158 struct workqueue_struct	*deferredclose_wq;
159 struct workqueue_struct	*serverclose_wq;
160 struct workqueue_struct	*cfid_put_wq;
161 __u32 cifs_lock_secret;
162 
163 /*
164  * Bumps refcount for cifs super block.
165  * Note that it should be only called if a reference to VFS super block is
166  * already held, e.g. in open-type syscalls context. Otherwise it can race with
167  * atomic_dec_and_test in deactivate_locked_super.
168  */
169 void
cifs_sb_active(struct super_block * sb)170 cifs_sb_active(struct super_block *sb)
171 {
172 	struct cifs_sb_info *server = CIFS_SB(sb);
173 
174 	if (atomic_inc_return(&server->active) == 1)
175 		atomic_inc(&sb->s_active);
176 }
177 
178 void
cifs_sb_deactive(struct super_block * sb)179 cifs_sb_deactive(struct super_block *sb)
180 {
181 	struct cifs_sb_info *server = CIFS_SB(sb);
182 
183 	if (atomic_dec_and_test(&server->active))
184 		deactivate_super(sb);
185 }
186 
187 static int
cifs_read_super(struct super_block * sb)188 cifs_read_super(struct super_block *sb)
189 {
190 	struct inode *inode;
191 	struct cifs_sb_info *cifs_sb;
192 	struct cifs_tcon *tcon;
193 	struct timespec64 ts;
194 	int rc = 0;
195 
196 	cifs_sb = CIFS_SB(sb);
197 	tcon = cifs_sb_master_tcon(cifs_sb);
198 
199 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
200 		sb->s_flags |= SB_POSIXACL;
201 
202 	if (tcon->snapshot_time)
203 		sb->s_flags |= SB_RDONLY;
204 
205 	if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
206 		sb->s_maxbytes = MAX_LFS_FILESIZE;
207 	else
208 		sb->s_maxbytes = MAX_NON_LFS;
209 
210 	/*
211 	 * Some very old servers like DOS and OS/2 used 2 second granularity
212 	 * (while all current servers use 100ns granularity - see MS-DTYP)
213 	 * but 1 second is the maximum allowed granularity for the VFS
214 	 * so for old servers set time granularity to 1 second while for
215 	 * everything else (current servers) set it to 100ns.
216 	 */
217 	if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
218 	    ((tcon->ses->capabilities &
219 	      tcon->ses->server->vals->cap_nt_find) == 0) &&
220 	    !tcon->unix_ext) {
221 		sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
222 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
223 		sb->s_time_min = ts.tv_sec;
224 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
225 				    cpu_to_le16(SMB_TIME_MAX), 0);
226 		sb->s_time_max = ts.tv_sec;
227 	} else {
228 		/*
229 		 * Almost every server, including all SMB2+, uses DCE TIME
230 		 * ie 100 nanosecond units, since 1601.  See MS-DTYP and MS-FSCC
231 		 */
232 		sb->s_time_gran = 100;
233 		ts = cifs_NTtimeToUnix(0);
234 		sb->s_time_min = ts.tv_sec;
235 		ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
236 		sb->s_time_max = ts.tv_sec;
237 	}
238 
239 	sb->s_magic = CIFS_SUPER_MAGIC;
240 	sb->s_op = &cifs_super_ops;
241 	sb->s_xattr = cifs_xattr_handlers;
242 	rc = super_setup_bdi(sb);
243 	if (rc)
244 		goto out_no_root;
245 	/* tune readahead according to rsize if readahead size not set on mount */
246 	if (cifs_sb->ctx->rsize == 0)
247 		cifs_sb->ctx->rsize =
248 			tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
249 	if (cifs_sb->ctx->rasize)
250 		sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
251 	else
252 		sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE);
253 
254 	sb->s_blocksize = CIFS_MAX_MSGSIZE;
255 	sb->s_blocksize_bits = 14;	/* default 2**14 = CIFS_MAX_MSGSIZE */
256 	inode = cifs_root_iget(sb);
257 
258 	if (IS_ERR(inode)) {
259 		rc = PTR_ERR(inode);
260 		goto out_no_root;
261 	}
262 
263 	if (tcon->nocase)
264 		sb->s_d_op = &cifs_ci_dentry_ops;
265 	else
266 		sb->s_d_op = &cifs_dentry_ops;
267 
268 	sb->s_root = d_make_root(inode);
269 	if (!sb->s_root) {
270 		rc = -ENOMEM;
271 		goto out_no_root;
272 	}
273 
274 #ifdef CONFIG_CIFS_NFSD_EXPORT
275 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
276 		cifs_dbg(FYI, "export ops supported\n");
277 		sb->s_export_op = &cifs_export_ops;
278 	}
279 #endif /* CONFIG_CIFS_NFSD_EXPORT */
280 
281 	return 0;
282 
283 out_no_root:
284 	cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
285 	return rc;
286 }
287 
cifs_kill_sb(struct super_block * sb)288 static void cifs_kill_sb(struct super_block *sb)
289 {
290 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
291 
292 	/*
293 	 * We need to release all dentries for the cached directories
294 	 * before we kill the sb.
295 	 */
296 	if (cifs_sb->root) {
297 		close_all_cached_dirs(cifs_sb);
298 
299 		/* finally release root dentry */
300 		dput(cifs_sb->root);
301 		cifs_sb->root = NULL;
302 	}
303 
304 	kill_anon_super(sb);
305 	cifs_umount(cifs_sb);
306 }
307 
308 static int
cifs_statfs(struct dentry * dentry,struct kstatfs * buf)309 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
310 {
311 	struct super_block *sb = dentry->d_sb;
312 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
313 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
314 	struct TCP_Server_Info *server = tcon->ses->server;
315 	unsigned int xid;
316 	int rc = 0;
317 	const char *full_path;
318 	void *page;
319 
320 	xid = get_xid();
321 	page = alloc_dentry_path();
322 
323 	full_path = build_path_from_dentry(dentry, page);
324 	if (IS_ERR(full_path)) {
325 		rc = PTR_ERR(full_path);
326 		goto statfs_out;
327 	}
328 
329 	if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
330 		buf->f_namelen =
331 		       le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
332 	else
333 		buf->f_namelen = PATH_MAX;
334 
335 	buf->f_fsid.val[0] = tcon->vol_serial_number;
336 	/* are using part of create time for more randomness, see man statfs */
337 	buf->f_fsid.val[1] =  (int)le64_to_cpu(tcon->vol_create_time);
338 
339 	buf->f_files = 0;	/* undefined */
340 	buf->f_ffree = 0;	/* unlimited */
341 
342 	if (server->ops->queryfs)
343 		rc = server->ops->queryfs(xid, tcon, full_path, cifs_sb, buf);
344 
345 statfs_out:
346 	free_dentry_path(page);
347 	free_xid(xid);
348 	return rc;
349 }
350 
cifs_fallocate(struct file * file,int mode,loff_t off,loff_t len)351 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
352 {
353 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
354 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
355 	struct TCP_Server_Info *server = tcon->ses->server;
356 
357 	if (server->ops->fallocate)
358 		return server->ops->fallocate(file, tcon, mode, off, len);
359 
360 	return -EOPNOTSUPP;
361 }
362 
cifs_permission(struct mnt_idmap * idmap,struct inode * inode,int mask)363 static int cifs_permission(struct mnt_idmap *idmap,
364 			   struct inode *inode, int mask)
365 {
366 	struct cifs_sb_info *cifs_sb;
367 
368 	cifs_sb = CIFS_SB(inode->i_sb);
369 
370 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
371 		if ((mask & MAY_EXEC) && !execute_ok(inode))
372 			return -EACCES;
373 		else
374 			return 0;
375 	} else /* file mode might have been restricted at mount time
376 		on the client (above and beyond ACL on servers) for
377 		servers which do not support setting and viewing mode bits,
378 		so allowing client to check permissions is useful */
379 		return generic_permission(&nop_mnt_idmap, inode, mask);
380 }
381 
382 static struct kmem_cache *cifs_inode_cachep;
383 static struct kmem_cache *cifs_req_cachep;
384 static struct kmem_cache *cifs_mid_cachep;
385 static struct kmem_cache *cifs_sm_req_cachep;
386 static struct kmem_cache *cifs_io_request_cachep;
387 static struct kmem_cache *cifs_io_subrequest_cachep;
388 mempool_t *cifs_sm_req_poolp;
389 mempool_t *cifs_req_poolp;
390 mempool_t *cifs_mid_poolp;
391 mempool_t cifs_io_request_pool;
392 mempool_t cifs_io_subrequest_pool;
393 
394 static struct inode *
cifs_alloc_inode(struct super_block * sb)395 cifs_alloc_inode(struct super_block *sb)
396 {
397 	struct cifsInodeInfo *cifs_inode;
398 	cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
399 	if (!cifs_inode)
400 		return NULL;
401 	cifs_inode->cifsAttrs = 0x20;	/* default */
402 	cifs_inode->time = 0;
403 	/*
404 	 * Until the file is open and we have gotten oplock info back from the
405 	 * server, can not assume caching of file data or metadata.
406 	 */
407 	cifs_set_oplock_level(cifs_inode, 0);
408 	cifs_inode->lease_granted = false;
409 	cifs_inode->flags = 0;
410 	spin_lock_init(&cifs_inode->writers_lock);
411 	cifs_inode->writers = 0;
412 	cifs_inode->netfs.inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
413 	cifs_inode->netfs.remote_i_size = 0;
414 	cifs_inode->uniqueid = 0;
415 	cifs_inode->createtime = 0;
416 	cifs_inode->epoch = 0;
417 	spin_lock_init(&cifs_inode->open_file_lock);
418 	generate_random_uuid(cifs_inode->lease_key);
419 	cifs_inode->symlink_target = NULL;
420 
421 	/*
422 	 * Can not set i_flags here - they get immediately overwritten to zero
423 	 * by the VFS.
424 	 */
425 	/* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
426 	INIT_LIST_HEAD(&cifs_inode->openFileList);
427 	INIT_LIST_HEAD(&cifs_inode->llist);
428 	INIT_LIST_HEAD(&cifs_inode->deferred_closes);
429 	spin_lock_init(&cifs_inode->deferred_lock);
430 	return &cifs_inode->netfs.inode;
431 }
432 
433 static void
cifs_free_inode(struct inode * inode)434 cifs_free_inode(struct inode *inode)
435 {
436 	struct cifsInodeInfo *cinode = CIFS_I(inode);
437 
438 	if (S_ISLNK(inode->i_mode))
439 		kfree(cinode->symlink_target);
440 	kmem_cache_free(cifs_inode_cachep, cinode);
441 }
442 
443 static void
cifs_evict_inode(struct inode * inode)444 cifs_evict_inode(struct inode *inode)
445 {
446 	netfs_wait_for_outstanding_io(inode);
447 	truncate_inode_pages_final(&inode->i_data);
448 	if (inode->i_state & I_PINNING_NETFS_WB)
449 		cifs_fscache_unuse_inode_cookie(inode, true);
450 	cifs_fscache_release_inode_cookie(inode);
451 	clear_inode(inode);
452 }
453 
454 static void
cifs_show_address(struct seq_file * s,struct TCP_Server_Info * server)455 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
456 {
457 	struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
458 	struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
459 
460 	seq_puts(s, ",addr=");
461 
462 	switch (server->dstaddr.ss_family) {
463 	case AF_INET:
464 		seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
465 		break;
466 	case AF_INET6:
467 		seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
468 		if (sa6->sin6_scope_id)
469 			seq_printf(s, "%%%u", sa6->sin6_scope_id);
470 		break;
471 	default:
472 		seq_puts(s, "(unknown)");
473 	}
474 	if (server->rdma)
475 		seq_puts(s, ",rdma");
476 }
477 
478 static void
cifs_show_security(struct seq_file * s,struct cifs_ses * ses)479 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
480 {
481 	if (ses->sectype == Unspecified) {
482 		if (ses->user_name == NULL)
483 			seq_puts(s, ",sec=none");
484 		return;
485 	}
486 
487 	seq_puts(s, ",sec=");
488 
489 	switch (ses->sectype) {
490 	case NTLMv2:
491 		seq_puts(s, "ntlmv2");
492 		break;
493 	case Kerberos:
494 		seq_puts(s, "krb5");
495 		break;
496 	case RawNTLMSSP:
497 		seq_puts(s, "ntlmssp");
498 		break;
499 	default:
500 		/* shouldn't ever happen */
501 		seq_puts(s, "unknown");
502 		break;
503 	}
504 
505 	if (ses->sign)
506 		seq_puts(s, "i");
507 
508 	if (ses->sectype == Kerberos)
509 		seq_printf(s, ",cruid=%u",
510 			   from_kuid_munged(&init_user_ns, ses->cred_uid));
511 }
512 
513 static void
cifs_show_cache_flavor(struct seq_file * s,struct cifs_sb_info * cifs_sb)514 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
515 {
516 	seq_puts(s, ",cache=");
517 
518 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
519 		seq_puts(s, "strict");
520 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
521 		seq_puts(s, "none");
522 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
523 		seq_puts(s, "singleclient"); /* assume only one client access */
524 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
525 		seq_puts(s, "ro"); /* read only caching assumed */
526 	else
527 		seq_puts(s, "loose");
528 }
529 
530 /*
531  * cifs_show_devname() is used so we show the mount device name with correct
532  * format (e.g. forward slashes vs. back slashes) in /proc/mounts
533  */
cifs_show_devname(struct seq_file * m,struct dentry * root)534 static int cifs_show_devname(struct seq_file *m, struct dentry *root)
535 {
536 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
537 	char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
538 
539 	if (devname == NULL)
540 		seq_puts(m, "none");
541 	else {
542 		convert_delimiter(devname, '/');
543 		/* escape all spaces in share names */
544 		seq_escape(m, devname, " \t");
545 		kfree(devname);
546 	}
547 	return 0;
548 }
549 
550 static void
cifs_show_upcall_target(struct seq_file * s,struct cifs_sb_info * cifs_sb)551 cifs_show_upcall_target(struct seq_file *s, struct cifs_sb_info *cifs_sb)
552 {
553 	if (cifs_sb->ctx->upcall_target == UPTARGET_UNSPECIFIED) {
554 		seq_puts(s, ",upcall_target=app");
555 		return;
556 	}
557 
558 	seq_puts(s, ",upcall_target=");
559 
560 	switch (cifs_sb->ctx->upcall_target) {
561 	case UPTARGET_APP:
562 		seq_puts(s, "app");
563 		break;
564 	case UPTARGET_MOUNT:
565 		seq_puts(s, "mount");
566 		break;
567 	default:
568 		/* shouldn't ever happen */
569 		seq_puts(s, "unknown");
570 		break;
571 	}
572 }
573 
574 /*
575  * cifs_show_options() is for displaying mount options in /proc/mounts.
576  * Not all settable options are displayed but most of the important
577  * ones are.
578  */
579 static int
cifs_show_options(struct seq_file * s,struct dentry * root)580 cifs_show_options(struct seq_file *s, struct dentry *root)
581 {
582 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
583 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
584 	struct sockaddr *srcaddr;
585 	srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
586 
587 	seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
588 	cifs_show_security(s, tcon->ses);
589 	cifs_show_cache_flavor(s, cifs_sb);
590 	cifs_show_upcall_target(s, cifs_sb);
591 
592 	if (tcon->no_lease)
593 		seq_puts(s, ",nolease");
594 	if (cifs_sb->ctx->multiuser)
595 		seq_puts(s, ",multiuser");
596 	else if (tcon->ses->user_name)
597 		seq_show_option(s, "username", tcon->ses->user_name);
598 
599 	if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
600 		seq_show_option(s, "domain", tcon->ses->domainName);
601 
602 	if (srcaddr->sa_family != AF_UNSPEC) {
603 		struct sockaddr_in *saddr4;
604 		struct sockaddr_in6 *saddr6;
605 		saddr4 = (struct sockaddr_in *)srcaddr;
606 		saddr6 = (struct sockaddr_in6 *)srcaddr;
607 		if (srcaddr->sa_family == AF_INET6)
608 			seq_printf(s, ",srcaddr=%pI6c",
609 				   &saddr6->sin6_addr);
610 		else if (srcaddr->sa_family == AF_INET)
611 			seq_printf(s, ",srcaddr=%pI4",
612 				   &saddr4->sin_addr.s_addr);
613 		else
614 			seq_printf(s, ",srcaddr=BAD-AF:%i",
615 				   (int)(srcaddr->sa_family));
616 	}
617 
618 	seq_printf(s, ",uid=%u",
619 		   from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
620 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
621 		seq_puts(s, ",forceuid");
622 	else
623 		seq_puts(s, ",noforceuid");
624 
625 	seq_printf(s, ",gid=%u",
626 		   from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
627 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
628 		seq_puts(s, ",forcegid");
629 	else
630 		seq_puts(s, ",noforcegid");
631 
632 	cifs_show_address(s, tcon->ses->server);
633 
634 	if (!tcon->unix_ext)
635 		seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
636 					   cifs_sb->ctx->file_mode,
637 					   cifs_sb->ctx->dir_mode);
638 	if (cifs_sb->ctx->iocharset)
639 		seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
640 	if (tcon->seal)
641 		seq_puts(s, ",seal");
642 	else if (tcon->ses->server->ignore_signature)
643 		seq_puts(s, ",signloosely");
644 	if (tcon->nocase)
645 		seq_puts(s, ",nocase");
646 	if (tcon->nodelete)
647 		seq_puts(s, ",nodelete");
648 	if (cifs_sb->ctx->no_sparse)
649 		seq_puts(s, ",nosparse");
650 	if (tcon->local_lease)
651 		seq_puts(s, ",locallease");
652 	if (tcon->retry)
653 		seq_puts(s, ",hard");
654 	else
655 		seq_puts(s, ",soft");
656 	if (tcon->use_persistent)
657 		seq_puts(s, ",persistenthandles");
658 	else if (tcon->use_resilient)
659 		seq_puts(s, ",resilienthandles");
660 	if (tcon->posix_extensions)
661 		seq_puts(s, ",posix");
662 	else if (tcon->unix_ext)
663 		seq_puts(s, ",unix");
664 	else
665 		seq_puts(s, ",nounix");
666 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
667 		seq_puts(s, ",nodfs");
668 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
669 		seq_puts(s, ",posixpaths");
670 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
671 		seq_puts(s, ",setuids");
672 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
673 		seq_puts(s, ",idsfromsid");
674 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
675 		seq_puts(s, ",serverino");
676 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
677 		seq_puts(s, ",rwpidforward");
678 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
679 		seq_puts(s, ",forcemand");
680 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
681 		seq_puts(s, ",nouser_xattr");
682 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
683 		seq_puts(s, ",mapchars");
684 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
685 		seq_puts(s, ",mapposix");
686 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
687 		seq_puts(s, ",sfu");
688 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
689 		seq_puts(s, ",nobrl");
690 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
691 		seq_puts(s, ",nohandlecache");
692 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
693 		seq_puts(s, ",modefromsid");
694 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
695 		seq_puts(s, ",cifsacl");
696 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
697 		seq_puts(s, ",dynperm");
698 	if (root->d_sb->s_flags & SB_POSIXACL)
699 		seq_puts(s, ",acl");
700 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
701 		seq_puts(s, ",mfsymlinks");
702 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
703 		seq_puts(s, ",fsc");
704 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
705 		seq_puts(s, ",nostrictsync");
706 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
707 		seq_puts(s, ",noperm");
708 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
709 		seq_printf(s, ",backupuid=%u",
710 			   from_kuid_munged(&init_user_ns,
711 					    cifs_sb->ctx->backupuid));
712 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
713 		seq_printf(s, ",backupgid=%u",
714 			   from_kgid_munged(&init_user_ns,
715 					    cifs_sb->ctx->backupgid));
716 	seq_show_option(s, "reparse",
717 			cifs_reparse_type_str(cifs_sb->ctx->reparse_type));
718 
719 	seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
720 	seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
721 	seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
722 	if (cifs_sb->ctx->rasize)
723 		seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
724 	if (tcon->ses->server->min_offload)
725 		seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
726 	if (tcon->ses->server->retrans)
727 		seq_printf(s, ",retrans=%u", tcon->ses->server->retrans);
728 	seq_printf(s, ",echo_interval=%lu",
729 			tcon->ses->server->echo_interval / HZ);
730 
731 	/* Only display the following if overridden on mount */
732 	if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
733 		seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
734 	if (tcon->ses->server->tcp_nodelay)
735 		seq_puts(s, ",tcpnodelay");
736 	if (tcon->ses->server->noautotune)
737 		seq_puts(s, ",noautotune");
738 	if (tcon->ses->server->noblocksnd)
739 		seq_puts(s, ",noblocksend");
740 	if (tcon->ses->server->nosharesock)
741 		seq_puts(s, ",nosharesock");
742 
743 	if (tcon->snapshot_time)
744 		seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
745 	if (tcon->handle_timeout)
746 		seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
747 	if (tcon->max_cached_dirs != MAX_CACHED_FIDS)
748 		seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs);
749 
750 	/*
751 	 * Display file and directory attribute timeout in seconds.
752 	 * If file and directory attribute timeout the same then actimeo
753 	 * was likely specified on mount
754 	 */
755 	if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
756 		seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
757 	else {
758 		seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
759 		seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
760 	}
761 	seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
762 
763 	if (tcon->ses->chan_max > 1)
764 		seq_printf(s, ",multichannel,max_channels=%zu",
765 			   tcon->ses->chan_max);
766 
767 	if (tcon->use_witness)
768 		seq_puts(s, ",witness");
769 
770 	return 0;
771 }
772 
cifs_umount_begin(struct super_block * sb)773 static void cifs_umount_begin(struct super_block *sb)
774 {
775 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
776 	struct cifs_tcon *tcon;
777 
778 	if (cifs_sb == NULL)
779 		return;
780 
781 	tcon = cifs_sb_master_tcon(cifs_sb);
782 
783 	spin_lock(&cifs_tcp_ses_lock);
784 	spin_lock(&tcon->tc_lock);
785 	trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
786 			    netfs_trace_tcon_ref_see_umount);
787 	if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
788 		/* we have other mounts to same share or we have
789 		   already tried to umount this and woken up
790 		   all waiting network requests, nothing to do */
791 		spin_unlock(&tcon->tc_lock);
792 		spin_unlock(&cifs_tcp_ses_lock);
793 		return;
794 	}
795 	/*
796 	 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
797 	 * fail later (e.g. due to open files).  TID_EXITING will be set just before tdis req sent
798 	 */
799 	spin_unlock(&tcon->tc_lock);
800 	spin_unlock(&cifs_tcp_ses_lock);
801 
802 	cifs_close_all_deferred_files(tcon);
803 	/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
804 	/* cancel_notify_requests(tcon); */
805 	if (tcon->ses && tcon->ses->server) {
806 		cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
807 		wake_up_all(&tcon->ses->server->request_q);
808 		wake_up_all(&tcon->ses->server->response_q);
809 		msleep(1); /* yield */
810 		/* we have to kick the requests once more */
811 		wake_up_all(&tcon->ses->server->response_q);
812 		msleep(1);
813 	}
814 
815 	return;
816 }
817 
cifs_freeze(struct super_block * sb)818 static int cifs_freeze(struct super_block *sb)
819 {
820 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
821 	struct cifs_tcon *tcon;
822 
823 	if (cifs_sb == NULL)
824 		return 0;
825 
826 	tcon = cifs_sb_master_tcon(cifs_sb);
827 
828 	cifs_close_all_deferred_files(tcon);
829 	return 0;
830 }
831 
832 #ifdef CONFIG_CIFS_STATS2
cifs_show_stats(struct seq_file * s,struct dentry * root)833 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
834 {
835 	/* BB FIXME */
836 	return 0;
837 }
838 #endif
839 
cifs_write_inode(struct inode * inode,struct writeback_control * wbc)840 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
841 {
842 	return netfs_unpin_writeback(inode, wbc);
843 }
844 
cifs_drop_inode(struct inode * inode)845 static int cifs_drop_inode(struct inode *inode)
846 {
847 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
848 
849 	/* no serverino => unconditional eviction */
850 	return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
851 		generic_drop_inode(inode);
852 }
853 
854 static const struct super_operations cifs_super_ops = {
855 	.statfs = cifs_statfs,
856 	.alloc_inode = cifs_alloc_inode,
857 	.write_inode	= cifs_write_inode,
858 	.free_inode = cifs_free_inode,
859 	.drop_inode	= cifs_drop_inode,
860 	.evict_inode	= cifs_evict_inode,
861 /*	.show_path	= cifs_show_path, */ /* Would we ever need show path? */
862 	.show_devname   = cifs_show_devname,
863 /*	.delete_inode	= cifs_delete_inode,  */  /* Do not need above
864 	function unless later we add lazy close of inodes or unless the
865 	kernel forgets to call us with the same number of releases (closes)
866 	as opens */
867 	.show_options = cifs_show_options,
868 	.umount_begin   = cifs_umount_begin,
869 	.freeze_fs      = cifs_freeze,
870 #ifdef CONFIG_CIFS_STATS2
871 	.show_stats = cifs_show_stats,
872 #endif
873 };
874 
875 /*
876  * Get root dentry from superblock according to prefix path mount option.
877  * Return dentry with refcount + 1 on success and NULL otherwise.
878  */
879 static struct dentry *
cifs_get_root(struct smb3_fs_context * ctx,struct super_block * sb)880 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
881 {
882 	struct dentry *dentry;
883 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
884 	char *full_path = NULL;
885 	char *s, *p;
886 	char sep;
887 
888 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
889 		return dget(sb->s_root);
890 
891 	full_path = cifs_build_path_to_root(ctx, cifs_sb,
892 				cifs_sb_master_tcon(cifs_sb), 0);
893 	if (full_path == NULL)
894 		return ERR_PTR(-ENOMEM);
895 
896 	cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
897 
898 	sep = CIFS_DIR_SEP(cifs_sb);
899 	dentry = dget(sb->s_root);
900 	s = full_path;
901 
902 	do {
903 		struct inode *dir = d_inode(dentry);
904 		struct dentry *child;
905 
906 		if (!S_ISDIR(dir->i_mode)) {
907 			dput(dentry);
908 			dentry = ERR_PTR(-ENOTDIR);
909 			break;
910 		}
911 
912 		/* skip separators */
913 		while (*s == sep)
914 			s++;
915 		if (!*s)
916 			break;
917 		p = s++;
918 		/* next separator */
919 		while (*s && *s != sep)
920 			s++;
921 
922 		child = lookup_positive_unlocked(p, dentry, s - p);
923 		dput(dentry);
924 		dentry = child;
925 	} while (!IS_ERR(dentry));
926 	kfree(full_path);
927 	return dentry;
928 }
929 
cifs_set_super(struct super_block * sb,void * data)930 static int cifs_set_super(struct super_block *sb, void *data)
931 {
932 	struct cifs_mnt_data *mnt_data = data;
933 	sb->s_fs_info = mnt_data->cifs_sb;
934 	return set_anon_super(sb, NULL);
935 }
936 
937 struct dentry *
cifs_smb3_do_mount(struct file_system_type * fs_type,int flags,struct smb3_fs_context * old_ctx)938 cifs_smb3_do_mount(struct file_system_type *fs_type,
939 	      int flags, struct smb3_fs_context *old_ctx)
940 {
941 	struct cifs_mnt_data mnt_data;
942 	struct cifs_sb_info *cifs_sb;
943 	struct super_block *sb;
944 	struct dentry *root;
945 	int rc;
946 
947 	if (cifsFYI) {
948 		cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
949 			 old_ctx->source, flags);
950 	} else {
951 		cifs_info("Attempting to mount %s\n", old_ctx->source);
952 	}
953 
954 	cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL);
955 	if (!cifs_sb)
956 		return ERR_PTR(-ENOMEM);
957 
958 	cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
959 	if (!cifs_sb->ctx) {
960 		root = ERR_PTR(-ENOMEM);
961 		goto out;
962 	}
963 	rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
964 	if (rc) {
965 		root = ERR_PTR(rc);
966 		goto out;
967 	}
968 
969 	rc = cifs_setup_cifs_sb(cifs_sb);
970 	if (rc) {
971 		root = ERR_PTR(rc);
972 		goto out;
973 	}
974 
975 	rc = cifs_mount(cifs_sb, cifs_sb->ctx);
976 	if (rc) {
977 		if (!(flags & SB_SILENT))
978 			cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
979 				 rc);
980 		root = ERR_PTR(rc);
981 		goto out;
982 	}
983 
984 	mnt_data.ctx = cifs_sb->ctx;
985 	mnt_data.cifs_sb = cifs_sb;
986 	mnt_data.flags = flags;
987 
988 	/* BB should we make this contingent on mount parm? */
989 	flags |= SB_NODIRATIME | SB_NOATIME;
990 
991 	sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
992 	if (IS_ERR(sb)) {
993 		cifs_umount(cifs_sb);
994 		return ERR_CAST(sb);
995 	}
996 
997 	if (sb->s_root) {
998 		cifs_dbg(FYI, "Use existing superblock\n");
999 		cifs_umount(cifs_sb);
1000 		cifs_sb = NULL;
1001 	} else {
1002 		rc = cifs_read_super(sb);
1003 		if (rc) {
1004 			root = ERR_PTR(rc);
1005 			goto out_super;
1006 		}
1007 
1008 		sb->s_flags |= SB_ACTIVE;
1009 	}
1010 
1011 	root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
1012 	if (IS_ERR(root))
1013 		goto out_super;
1014 
1015 	if (cifs_sb)
1016 		cifs_sb->root = dget(root);
1017 
1018 	cifs_dbg(FYI, "dentry root is: %p\n", root);
1019 	return root;
1020 
1021 out_super:
1022 	deactivate_locked_super(sb);
1023 	return root;
1024 out:
1025 	kfree(cifs_sb->prepath);
1026 	smb3_cleanup_fs_context(cifs_sb->ctx);
1027 	kfree(cifs_sb);
1028 	return root;
1029 }
1030 
cifs_llseek(struct file * file,loff_t offset,int whence)1031 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1032 {
1033 	struct cifsFileInfo *cfile = file->private_data;
1034 	struct cifs_tcon *tcon;
1035 
1036 	/*
1037 	 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1038 	 * the cached file length
1039 	 */
1040 	if (whence != SEEK_SET && whence != SEEK_CUR) {
1041 		int rc;
1042 		struct inode *inode = file_inode(file);
1043 
1044 		/*
1045 		 * We need to be sure that all dirty pages are written and the
1046 		 * server has the newest file length.
1047 		 */
1048 		if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1049 		    inode->i_mapping->nrpages != 0) {
1050 			rc = filemap_fdatawait(inode->i_mapping);
1051 			if (rc) {
1052 				mapping_set_error(inode->i_mapping, rc);
1053 				return rc;
1054 			}
1055 		}
1056 		/*
1057 		 * Some applications poll for the file length in this strange
1058 		 * way so we must seek to end on non-oplocked files by
1059 		 * setting the revalidate time to zero.
1060 		 */
1061 		CIFS_I(inode)->time = 0;
1062 
1063 		rc = cifs_revalidate_file_attr(file);
1064 		if (rc < 0)
1065 			return (loff_t)rc;
1066 	}
1067 	if (cfile && cfile->tlink) {
1068 		tcon = tlink_tcon(cfile->tlink);
1069 		if (tcon->ses->server->ops->llseek)
1070 			return tcon->ses->server->ops->llseek(file, tcon,
1071 							      offset, whence);
1072 	}
1073 	return generic_file_llseek(file, offset, whence);
1074 }
1075 
1076 static int
cifs_setlease(struct file * file,int arg,struct file_lease ** lease,void ** priv)1077 cifs_setlease(struct file *file, int arg, struct file_lease **lease, void **priv)
1078 {
1079 	/*
1080 	 * Note that this is called by vfs setlease with i_lock held to
1081 	 * protect *lease from going away.
1082 	 */
1083 	struct inode *inode = file_inode(file);
1084 	struct cifsFileInfo *cfile = file->private_data;
1085 
1086 	/* Check if file is oplocked if this is request for new lease */
1087 	if (arg == F_UNLCK ||
1088 	    ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1089 	    ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1090 		return generic_setlease(file, arg, lease, priv);
1091 	else if (tlink_tcon(cfile->tlink)->local_lease &&
1092 		 !CIFS_CACHE_READ(CIFS_I(inode)))
1093 		/*
1094 		 * If the server claims to support oplock on this file, then we
1095 		 * still need to check oplock even if the local_lease mount
1096 		 * option is set, but there are servers which do not support
1097 		 * oplock for which this mount option may be useful if the user
1098 		 * knows that the file won't be changed on the server by anyone
1099 		 * else.
1100 		 */
1101 		return generic_setlease(file, arg, lease, priv);
1102 	else
1103 		return -EAGAIN;
1104 }
1105 
1106 struct file_system_type cifs_fs_type = {
1107 	.owner = THIS_MODULE,
1108 	.name = "cifs",
1109 	.init_fs_context = smb3_init_fs_context,
1110 	.parameters = smb3_fs_parameters,
1111 	.kill_sb = cifs_kill_sb,
1112 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1113 };
1114 MODULE_ALIAS_FS("cifs");
1115 
1116 struct file_system_type smb3_fs_type = {
1117 	.owner = THIS_MODULE,
1118 	.name = "smb3",
1119 	.init_fs_context = smb3_init_fs_context,
1120 	.parameters = smb3_fs_parameters,
1121 	.kill_sb = cifs_kill_sb,
1122 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1123 };
1124 MODULE_ALIAS_FS("smb3");
1125 MODULE_ALIAS("smb3");
1126 
1127 const struct inode_operations cifs_dir_inode_ops = {
1128 	.create = cifs_create,
1129 	.atomic_open = cifs_atomic_open,
1130 	.lookup = cifs_lookup,
1131 	.getattr = cifs_getattr,
1132 	.unlink = cifs_unlink,
1133 	.link = cifs_hardlink,
1134 	.mkdir = cifs_mkdir,
1135 	.rmdir = cifs_rmdir,
1136 	.rename = cifs_rename2,
1137 	.permission = cifs_permission,
1138 	.setattr = cifs_setattr,
1139 	.symlink = cifs_symlink,
1140 	.mknod   = cifs_mknod,
1141 	.listxattr = cifs_listxattr,
1142 	.get_acl = cifs_get_acl,
1143 	.set_acl = cifs_set_acl,
1144 };
1145 
1146 const struct inode_operations cifs_file_inode_ops = {
1147 	.setattr = cifs_setattr,
1148 	.getattr = cifs_getattr,
1149 	.permission = cifs_permission,
1150 	.listxattr = cifs_listxattr,
1151 	.fiemap = cifs_fiemap,
1152 	.get_acl = cifs_get_acl,
1153 	.set_acl = cifs_set_acl,
1154 };
1155 
cifs_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * done)1156 const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1157 			    struct delayed_call *done)
1158 {
1159 	char *target_path;
1160 
1161 	if (!dentry)
1162 		return ERR_PTR(-ECHILD);
1163 
1164 	target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1165 	if (!target_path)
1166 		return ERR_PTR(-ENOMEM);
1167 
1168 	spin_lock(&inode->i_lock);
1169 	if (likely(CIFS_I(inode)->symlink_target)) {
1170 		strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1171 	} else {
1172 		kfree(target_path);
1173 		target_path = ERR_PTR(-EOPNOTSUPP);
1174 	}
1175 	spin_unlock(&inode->i_lock);
1176 
1177 	if (!IS_ERR(target_path))
1178 		set_delayed_call(done, kfree_link, target_path);
1179 
1180 	return target_path;
1181 }
1182 
1183 const struct inode_operations cifs_symlink_inode_ops = {
1184 	.get_link = cifs_get_link,
1185 	.setattr = cifs_setattr,
1186 	.permission = cifs_permission,
1187 	.listxattr = cifs_listxattr,
1188 };
1189 
1190 /*
1191  * Advance the EOF marker to after the source range.
1192  */
cifs_precopy_set_eof(struct inode * src_inode,struct cifsInodeInfo * src_cifsi,struct cifs_tcon * src_tcon,unsigned int xid,loff_t src_end)1193 static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
1194 				struct cifs_tcon *src_tcon,
1195 				unsigned int xid, loff_t src_end)
1196 {
1197 	struct cifsFileInfo *writeable_srcfile;
1198 	int rc = -EINVAL;
1199 
1200 	writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
1201 	if (writeable_srcfile) {
1202 		if (src_tcon->ses->server->ops->set_file_size)
1203 			rc = src_tcon->ses->server->ops->set_file_size(
1204 				xid, src_tcon, writeable_srcfile,
1205 				src_inode->i_size, true /* no need to set sparse */);
1206 		else
1207 			rc = -ENOSYS;
1208 		cifsFileInfo_put(writeable_srcfile);
1209 		cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
1210 	}
1211 
1212 	if (rc < 0)
1213 		goto set_failed;
1214 
1215 	netfs_resize_file(&src_cifsi->netfs, src_end, true);
1216 	fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
1217 	return 0;
1218 
1219 set_failed:
1220 	return filemap_write_and_wait(src_inode->i_mapping);
1221 }
1222 
1223 /*
1224  * Flush out either the folio that overlaps the beginning of a range in which
1225  * pos resides or the folio that overlaps the end of a range unless that folio
1226  * is entirely within the range we're going to invalidate.  We extend the flush
1227  * bounds to encompass the folio.
1228  */
cifs_flush_folio(struct inode * inode,loff_t pos,loff_t * _fstart,loff_t * _fend,bool first)1229 static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
1230 			    bool first)
1231 {
1232 	struct folio *folio;
1233 	unsigned long long fpos, fend;
1234 	pgoff_t index = pos / PAGE_SIZE;
1235 	size_t size;
1236 	int rc = 0;
1237 
1238 	folio = filemap_get_folio(inode->i_mapping, index);
1239 	if (IS_ERR(folio))
1240 		return 0;
1241 
1242 	size = folio_size(folio);
1243 	fpos = folio_pos(folio);
1244 	fend = fpos + size - 1;
1245 	*_fstart = min_t(unsigned long long, *_fstart, fpos);
1246 	*_fend   = max_t(unsigned long long, *_fend, fend);
1247 	if ((first && pos == fpos) || (!first && pos == fend))
1248 		goto out;
1249 
1250 	rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
1251 out:
1252 	folio_put(folio);
1253 	return rc;
1254 }
1255 
cifs_remap_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,loff_t len,unsigned int remap_flags)1256 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1257 		struct file *dst_file, loff_t destoff, loff_t len,
1258 		unsigned int remap_flags)
1259 {
1260 	struct inode *src_inode = file_inode(src_file);
1261 	struct inode *target_inode = file_inode(dst_file);
1262 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1263 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1264 	struct cifsFileInfo *smb_file_src = src_file->private_data;
1265 	struct cifsFileInfo *smb_file_target = dst_file->private_data;
1266 	struct cifs_tcon *target_tcon, *src_tcon;
1267 	unsigned long long destend, fstart, fend, old_size, new_size;
1268 	unsigned int xid;
1269 	int rc;
1270 
1271 	if (remap_flags & REMAP_FILE_DEDUP)
1272 		return -EOPNOTSUPP;
1273 	if (remap_flags & ~REMAP_FILE_ADVISORY)
1274 		return -EINVAL;
1275 
1276 	cifs_dbg(FYI, "clone range\n");
1277 
1278 	xid = get_xid();
1279 
1280 	if (!smb_file_src || !smb_file_target) {
1281 		rc = -EBADF;
1282 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1283 		goto out;
1284 	}
1285 
1286 	src_tcon = tlink_tcon(smb_file_src->tlink);
1287 	target_tcon = tlink_tcon(smb_file_target->tlink);
1288 
1289 	/*
1290 	 * Note: cifs case is easier than btrfs since server responsible for
1291 	 * checks for proper open modes and file type and if it wants
1292 	 * server could even support copy of range where source = target
1293 	 */
1294 	lock_two_nondirectories(target_inode, src_inode);
1295 
1296 	if (len == 0)
1297 		len = src_inode->i_size - off;
1298 
1299 	cifs_dbg(FYI, "clone range\n");
1300 
1301 	/* Flush the source buffer */
1302 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1303 					  off + len - 1);
1304 	if (rc)
1305 		goto unlock;
1306 
1307 	/* The server-side copy will fail if the source crosses the EOF marker.
1308 	 * Advance the EOF marker after the flush above to the end of the range
1309 	 * if it's short of that.
1310 	 */
1311 	if (src_cifsi->netfs.remote_i_size < off + len) {
1312 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1313 		if (rc < 0)
1314 			goto unlock;
1315 	}
1316 
1317 	new_size = destoff + len;
1318 	destend = destoff + len - 1;
1319 
1320 	/* Flush the folios at either end of the destination range to prevent
1321 	 * accidental loss of dirty data outside of the range.
1322 	 */
1323 	fstart = destoff;
1324 	fend = destend;
1325 
1326 	rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1327 	if (rc)
1328 		goto unlock;
1329 	rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1330 	if (rc)
1331 		goto unlock;
1332 	if (fend > target_cifsi->netfs.zero_point)
1333 		target_cifsi->netfs.zero_point = fend + 1;
1334 	old_size = target_cifsi->netfs.remote_i_size;
1335 
1336 	/* Discard all the folios that overlap the destination region. */
1337 	cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
1338 	truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1339 
1340 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1341 			   i_size_read(target_inode), 0);
1342 
1343 	rc = -EOPNOTSUPP;
1344 	if (target_tcon->ses->server->ops->duplicate_extents) {
1345 		rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1346 			smb_file_src, smb_file_target, off, len, destoff);
1347 		if (rc == 0 && new_size > old_size) {
1348 			truncate_setsize(target_inode, new_size);
1349 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1350 					      new_size);
1351 		} else if (rc == -EOPNOTSUPP) {
1352 			/*
1353 			 * copy_file_range syscall man page indicates EINVAL
1354 			 * is returned e.g when "fd_in and fd_out refer to the
1355 			 * same file and the source and target ranges overlap."
1356 			 * Test generic/157 was what showed these cases where
1357 			 * we need to remap EOPNOTSUPP to EINVAL
1358 			 */
1359 			if (off >= src_inode->i_size) {
1360 				rc = -EINVAL;
1361 			} else if (src_inode == target_inode) {
1362 				if (off + len > destoff)
1363 					rc = -EINVAL;
1364 			}
1365 		}
1366 		if (rc == 0 && new_size > target_cifsi->netfs.zero_point)
1367 			target_cifsi->netfs.zero_point = new_size;
1368 	}
1369 
1370 	/* force revalidate of size and timestamps of target file now
1371 	   that target is updated on the server */
1372 	CIFS_I(target_inode)->time = 0;
1373 unlock:
1374 	/* although unlocking in the reverse order from locking is not
1375 	   strictly necessary here it is a little cleaner to be consistent */
1376 	unlock_two_nondirectories(src_inode, target_inode);
1377 out:
1378 	free_xid(xid);
1379 	return rc < 0 ? rc : len;
1380 }
1381 
cifs_file_copychunk_range(unsigned int xid,struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1382 ssize_t cifs_file_copychunk_range(unsigned int xid,
1383 				struct file *src_file, loff_t off,
1384 				struct file *dst_file, loff_t destoff,
1385 				size_t len, unsigned int flags)
1386 {
1387 	struct inode *src_inode = file_inode(src_file);
1388 	struct inode *target_inode = file_inode(dst_file);
1389 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1390 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1391 	struct cifsFileInfo *smb_file_src;
1392 	struct cifsFileInfo *smb_file_target;
1393 	struct cifs_tcon *src_tcon;
1394 	struct cifs_tcon *target_tcon;
1395 	ssize_t rc;
1396 
1397 	cifs_dbg(FYI, "copychunk range\n");
1398 
1399 	if (!src_file->private_data || !dst_file->private_data) {
1400 		rc = -EBADF;
1401 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1402 		goto out;
1403 	}
1404 
1405 	rc = -EXDEV;
1406 	smb_file_target = dst_file->private_data;
1407 	smb_file_src = src_file->private_data;
1408 	src_tcon = tlink_tcon(smb_file_src->tlink);
1409 	target_tcon = tlink_tcon(smb_file_target->tlink);
1410 
1411 	if (src_tcon->ses != target_tcon->ses) {
1412 		cifs_dbg(FYI, "source and target of copy not on same server\n");
1413 		goto out;
1414 	}
1415 
1416 	rc = -EOPNOTSUPP;
1417 	if (!target_tcon->ses->server->ops->copychunk_range)
1418 		goto out;
1419 
1420 	/*
1421 	 * Note: cifs case is easier than btrfs since server responsible for
1422 	 * checks for proper open modes and file type and if it wants
1423 	 * server could even support copy of range where source = target
1424 	 */
1425 	lock_two_nondirectories(target_inode, src_inode);
1426 
1427 	cifs_dbg(FYI, "about to flush pages\n");
1428 
1429 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1430 					  off + len - 1);
1431 	if (rc)
1432 		goto unlock;
1433 
1434 	/* The server-side copy will fail if the source crosses the EOF marker.
1435 	 * Advance the EOF marker after the flush above to the end of the range
1436 	 * if it's short of that.
1437 	 */
1438 	if (src_cifsi->netfs.remote_i_size < off + len) {
1439 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1440 		if (rc < 0)
1441 			goto unlock;
1442 	}
1443 
1444 	/* Flush and invalidate all the folios in the destination region.  If
1445 	 * the copy was successful, then some of the flush is extra overhead,
1446 	 * but we need to allow for the copy failing in some way (eg. ENOSPC).
1447 	 */
1448 	rc = filemap_invalidate_inode(target_inode, true, destoff, destoff + len - 1);
1449 	if (rc)
1450 		goto unlock;
1451 
1452 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1453 			   i_size_read(target_inode), 0);
1454 
1455 	rc = file_modified(dst_file);
1456 	if (!rc) {
1457 		rc = target_tcon->ses->server->ops->copychunk_range(xid,
1458 			smb_file_src, smb_file_target, off, len, destoff);
1459 		if (rc > 0 && destoff + rc > i_size_read(target_inode)) {
1460 			truncate_setsize(target_inode, destoff + rc);
1461 			netfs_resize_file(&target_cifsi->netfs,
1462 					  i_size_read(target_inode), true);
1463 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1464 					      i_size_read(target_inode));
1465 		}
1466 		if (rc > 0 && destoff + rc > target_cifsi->netfs.zero_point)
1467 			target_cifsi->netfs.zero_point = destoff + rc;
1468 	}
1469 
1470 	file_accessed(src_file);
1471 
1472 	/* force revalidate of size and timestamps of target file now
1473 	 * that target is updated on the server
1474 	 */
1475 	CIFS_I(target_inode)->time = 0;
1476 
1477 unlock:
1478 	/* although unlocking in the reverse order from locking is not
1479 	 * strictly necessary here it is a little cleaner to be consistent
1480 	 */
1481 	unlock_two_nondirectories(src_inode, target_inode);
1482 
1483 out:
1484 	return rc;
1485 }
1486 
1487 /*
1488  * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1489  * is a dummy operation.
1490  */
cifs_dir_fsync(struct file * file,loff_t start,loff_t end,int datasync)1491 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1492 {
1493 	cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1494 		 file, datasync);
1495 
1496 	return 0;
1497 }
1498 
cifs_copy_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1499 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1500 				struct file *dst_file, loff_t destoff,
1501 				size_t len, unsigned int flags)
1502 {
1503 	unsigned int xid = get_xid();
1504 	ssize_t rc;
1505 	struct cifsFileInfo *cfile = dst_file->private_data;
1506 
1507 	if (cfile->swapfile) {
1508 		rc = -EOPNOTSUPP;
1509 		free_xid(xid);
1510 		return rc;
1511 	}
1512 
1513 	rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1514 					len, flags);
1515 	free_xid(xid);
1516 
1517 	if (rc == -EOPNOTSUPP || rc == -EXDEV)
1518 		rc = splice_copy_file_range(src_file, off, dst_file,
1519 					    destoff, len);
1520 	return rc;
1521 }
1522 
1523 const struct file_operations cifs_file_ops = {
1524 	.read_iter = cifs_loose_read_iter,
1525 	.write_iter = cifs_file_write_iter,
1526 	.open = cifs_open,
1527 	.release = cifs_close,
1528 	.lock = cifs_lock,
1529 	.flock = cifs_flock,
1530 	.fsync = cifs_fsync,
1531 	.flush = cifs_flush,
1532 	.mmap  = cifs_file_mmap,
1533 	.splice_read = filemap_splice_read,
1534 	.splice_write = iter_file_splice_write,
1535 	.llseek = cifs_llseek,
1536 	.unlocked_ioctl	= cifs_ioctl,
1537 	.copy_file_range = cifs_copy_file_range,
1538 	.remap_file_range = cifs_remap_file_range,
1539 	.setlease = cifs_setlease,
1540 	.fallocate = cifs_fallocate,
1541 };
1542 
1543 const struct file_operations cifs_file_strict_ops = {
1544 	.read_iter = cifs_strict_readv,
1545 	.write_iter = cifs_strict_writev,
1546 	.open = cifs_open,
1547 	.release = cifs_close,
1548 	.lock = cifs_lock,
1549 	.flock = cifs_flock,
1550 	.fsync = cifs_strict_fsync,
1551 	.flush = cifs_flush,
1552 	.mmap = cifs_file_strict_mmap,
1553 	.splice_read = filemap_splice_read,
1554 	.splice_write = iter_file_splice_write,
1555 	.llseek = cifs_llseek,
1556 	.unlocked_ioctl	= cifs_ioctl,
1557 	.copy_file_range = cifs_copy_file_range,
1558 	.remap_file_range = cifs_remap_file_range,
1559 	.setlease = cifs_setlease,
1560 	.fallocate = cifs_fallocate,
1561 };
1562 
1563 const struct file_operations cifs_file_direct_ops = {
1564 	.read_iter = netfs_unbuffered_read_iter,
1565 	.write_iter = netfs_file_write_iter,
1566 	.open = cifs_open,
1567 	.release = cifs_close,
1568 	.lock = cifs_lock,
1569 	.flock = cifs_flock,
1570 	.fsync = cifs_fsync,
1571 	.flush = cifs_flush,
1572 	.mmap = cifs_file_mmap,
1573 	.splice_read = copy_splice_read,
1574 	.splice_write = iter_file_splice_write,
1575 	.unlocked_ioctl  = cifs_ioctl,
1576 	.copy_file_range = cifs_copy_file_range,
1577 	.remap_file_range = cifs_remap_file_range,
1578 	.llseek = cifs_llseek,
1579 	.setlease = cifs_setlease,
1580 	.fallocate = cifs_fallocate,
1581 };
1582 
1583 const struct file_operations cifs_file_nobrl_ops = {
1584 	.read_iter = cifs_loose_read_iter,
1585 	.write_iter = cifs_file_write_iter,
1586 	.open = cifs_open,
1587 	.release = cifs_close,
1588 	.fsync = cifs_fsync,
1589 	.flush = cifs_flush,
1590 	.mmap  = cifs_file_mmap,
1591 	.splice_read = filemap_splice_read,
1592 	.splice_write = iter_file_splice_write,
1593 	.llseek = cifs_llseek,
1594 	.unlocked_ioctl	= cifs_ioctl,
1595 	.copy_file_range = cifs_copy_file_range,
1596 	.remap_file_range = cifs_remap_file_range,
1597 	.setlease = cifs_setlease,
1598 	.fallocate = cifs_fallocate,
1599 };
1600 
1601 const struct file_operations cifs_file_strict_nobrl_ops = {
1602 	.read_iter = cifs_strict_readv,
1603 	.write_iter = cifs_strict_writev,
1604 	.open = cifs_open,
1605 	.release = cifs_close,
1606 	.fsync = cifs_strict_fsync,
1607 	.flush = cifs_flush,
1608 	.mmap = cifs_file_strict_mmap,
1609 	.splice_read = filemap_splice_read,
1610 	.splice_write = iter_file_splice_write,
1611 	.llseek = cifs_llseek,
1612 	.unlocked_ioctl	= cifs_ioctl,
1613 	.copy_file_range = cifs_copy_file_range,
1614 	.remap_file_range = cifs_remap_file_range,
1615 	.setlease = cifs_setlease,
1616 	.fallocate = cifs_fallocate,
1617 };
1618 
1619 const struct file_operations cifs_file_direct_nobrl_ops = {
1620 	.read_iter = netfs_unbuffered_read_iter,
1621 	.write_iter = netfs_file_write_iter,
1622 	.open = cifs_open,
1623 	.release = cifs_close,
1624 	.fsync = cifs_fsync,
1625 	.flush = cifs_flush,
1626 	.mmap = cifs_file_mmap,
1627 	.splice_read = copy_splice_read,
1628 	.splice_write = iter_file_splice_write,
1629 	.unlocked_ioctl  = cifs_ioctl,
1630 	.copy_file_range = cifs_copy_file_range,
1631 	.remap_file_range = cifs_remap_file_range,
1632 	.llseek = cifs_llseek,
1633 	.setlease = cifs_setlease,
1634 	.fallocate = cifs_fallocate,
1635 };
1636 
1637 const struct file_operations cifs_dir_ops = {
1638 	.iterate_shared = cifs_readdir,
1639 	.release = cifs_closedir,
1640 	.read    = generic_read_dir,
1641 	.unlocked_ioctl  = cifs_ioctl,
1642 	.copy_file_range = cifs_copy_file_range,
1643 	.remap_file_range = cifs_remap_file_range,
1644 	.llseek = generic_file_llseek,
1645 	.fsync = cifs_dir_fsync,
1646 };
1647 
1648 static void
cifs_init_once(void * inode)1649 cifs_init_once(void *inode)
1650 {
1651 	struct cifsInodeInfo *cifsi = inode;
1652 
1653 	inode_init_once(&cifsi->netfs.inode);
1654 	init_rwsem(&cifsi->lock_sem);
1655 }
1656 
1657 static int __init
cifs_init_inodecache(void)1658 cifs_init_inodecache(void)
1659 {
1660 	cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1661 					      sizeof(struct cifsInodeInfo),
1662 					      0, (SLAB_RECLAIM_ACCOUNT|
1663 						SLAB_ACCOUNT),
1664 					      cifs_init_once);
1665 	if (cifs_inode_cachep == NULL)
1666 		return -ENOMEM;
1667 
1668 	return 0;
1669 }
1670 
1671 static void
cifs_destroy_inodecache(void)1672 cifs_destroy_inodecache(void)
1673 {
1674 	/*
1675 	 * Make sure all delayed rcu free inodes are flushed before we
1676 	 * destroy cache.
1677 	 */
1678 	rcu_barrier();
1679 	kmem_cache_destroy(cifs_inode_cachep);
1680 }
1681 
1682 static int
cifs_init_request_bufs(void)1683 cifs_init_request_bufs(void)
1684 {
1685 	/*
1686 	 * SMB2 maximum header size is bigger than CIFS one - no problems to
1687 	 * allocate some more bytes for CIFS.
1688 	 */
1689 	size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1690 
1691 	if (CIFSMaxBufSize < 8192) {
1692 	/* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1693 	Unicode path name has to fit in any SMB/CIFS path based frames */
1694 		CIFSMaxBufSize = 8192;
1695 	} else if (CIFSMaxBufSize > 1024*127) {
1696 		CIFSMaxBufSize = 1024 * 127;
1697 	} else {
1698 		CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1699 	}
1700 /*
1701 	cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1702 		 CIFSMaxBufSize, CIFSMaxBufSize);
1703 */
1704 	cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1705 					    CIFSMaxBufSize + max_hdr_size, 0,
1706 					    SLAB_HWCACHE_ALIGN, 0,
1707 					    CIFSMaxBufSize + max_hdr_size,
1708 					    NULL);
1709 	if (cifs_req_cachep == NULL)
1710 		return -ENOMEM;
1711 
1712 	if (cifs_min_rcv < 1)
1713 		cifs_min_rcv = 1;
1714 	else if (cifs_min_rcv > 64) {
1715 		cifs_min_rcv = 64;
1716 		cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1717 	}
1718 
1719 	cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1720 						  cifs_req_cachep);
1721 
1722 	if (cifs_req_poolp == NULL) {
1723 		kmem_cache_destroy(cifs_req_cachep);
1724 		return -ENOMEM;
1725 	}
1726 	/* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1727 	almost all handle based requests (but not write response, nor is it
1728 	sufficient for path based requests).  A smaller size would have
1729 	been more efficient (compacting multiple slab items on one 4k page)
1730 	for the case in which debug was on, but this larger size allows
1731 	more SMBs to use small buffer alloc and is still much more
1732 	efficient to alloc 1 per page off the slab compared to 17K (5page)
1733 	alloc of large cifs buffers even when page debugging is on */
1734 	cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1735 			MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1736 			0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1737 	if (cifs_sm_req_cachep == NULL) {
1738 		mempool_destroy(cifs_req_poolp);
1739 		kmem_cache_destroy(cifs_req_cachep);
1740 		return -ENOMEM;
1741 	}
1742 
1743 	if (cifs_min_small < 2)
1744 		cifs_min_small = 2;
1745 	else if (cifs_min_small > 256) {
1746 		cifs_min_small = 256;
1747 		cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1748 	}
1749 
1750 	cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1751 						     cifs_sm_req_cachep);
1752 
1753 	if (cifs_sm_req_poolp == NULL) {
1754 		mempool_destroy(cifs_req_poolp);
1755 		kmem_cache_destroy(cifs_req_cachep);
1756 		kmem_cache_destroy(cifs_sm_req_cachep);
1757 		return -ENOMEM;
1758 	}
1759 
1760 	return 0;
1761 }
1762 
1763 static void
cifs_destroy_request_bufs(void)1764 cifs_destroy_request_bufs(void)
1765 {
1766 	mempool_destroy(cifs_req_poolp);
1767 	kmem_cache_destroy(cifs_req_cachep);
1768 	mempool_destroy(cifs_sm_req_poolp);
1769 	kmem_cache_destroy(cifs_sm_req_cachep);
1770 }
1771 
init_mids(void)1772 static int init_mids(void)
1773 {
1774 	cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1775 					    sizeof(struct mid_q_entry), 0,
1776 					    SLAB_HWCACHE_ALIGN, NULL);
1777 	if (cifs_mid_cachep == NULL)
1778 		return -ENOMEM;
1779 
1780 	/* 3 is a reasonable minimum number of simultaneous operations */
1781 	cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1782 	if (cifs_mid_poolp == NULL) {
1783 		kmem_cache_destroy(cifs_mid_cachep);
1784 		return -ENOMEM;
1785 	}
1786 
1787 	return 0;
1788 }
1789 
destroy_mids(void)1790 static void destroy_mids(void)
1791 {
1792 	mempool_destroy(cifs_mid_poolp);
1793 	kmem_cache_destroy(cifs_mid_cachep);
1794 }
1795 
cifs_init_netfs(void)1796 static int cifs_init_netfs(void)
1797 {
1798 	cifs_io_request_cachep =
1799 		kmem_cache_create("cifs_io_request",
1800 				  sizeof(struct cifs_io_request), 0,
1801 				  SLAB_HWCACHE_ALIGN, NULL);
1802 	if (!cifs_io_request_cachep)
1803 		goto nomem_req;
1804 
1805 	if (mempool_init_slab_pool(&cifs_io_request_pool, 100, cifs_io_request_cachep) < 0)
1806 		goto nomem_reqpool;
1807 
1808 	cifs_io_subrequest_cachep =
1809 		kmem_cache_create("cifs_io_subrequest",
1810 				  sizeof(struct cifs_io_subrequest), 0,
1811 				  SLAB_HWCACHE_ALIGN, NULL);
1812 	if (!cifs_io_subrequest_cachep)
1813 		goto nomem_subreq;
1814 
1815 	if (mempool_init_slab_pool(&cifs_io_subrequest_pool, 100, cifs_io_subrequest_cachep) < 0)
1816 		goto nomem_subreqpool;
1817 
1818 	return 0;
1819 
1820 nomem_subreqpool:
1821 	kmem_cache_destroy(cifs_io_subrequest_cachep);
1822 nomem_subreq:
1823 	mempool_exit(&cifs_io_request_pool);
1824 nomem_reqpool:
1825 	kmem_cache_destroy(cifs_io_request_cachep);
1826 nomem_req:
1827 	return -ENOMEM;
1828 }
1829 
cifs_destroy_netfs(void)1830 static void cifs_destroy_netfs(void)
1831 {
1832 	mempool_exit(&cifs_io_subrequest_pool);
1833 	kmem_cache_destroy(cifs_io_subrequest_cachep);
1834 	mempool_exit(&cifs_io_request_pool);
1835 	kmem_cache_destroy(cifs_io_request_cachep);
1836 }
1837 
1838 static int __init
init_cifs(void)1839 init_cifs(void)
1840 {
1841 	int rc = 0;
1842 	cifs_proc_init();
1843 	INIT_LIST_HEAD(&cifs_tcp_ses_list);
1844 /*
1845  *  Initialize Global counters
1846  */
1847 	atomic_set(&sesInfoAllocCount, 0);
1848 	atomic_set(&tconInfoAllocCount, 0);
1849 	atomic_set(&tcpSesNextId, 0);
1850 	atomic_set(&tcpSesAllocCount, 0);
1851 	atomic_set(&tcpSesReconnectCount, 0);
1852 	atomic_set(&tconInfoReconnectCount, 0);
1853 
1854 	atomic_set(&buf_alloc_count, 0);
1855 	atomic_set(&small_buf_alloc_count, 0);
1856 #ifdef CONFIG_CIFS_STATS2
1857 	atomic_set(&total_buf_alloc_count, 0);
1858 	atomic_set(&total_small_buf_alloc_count, 0);
1859 	if (slow_rsp_threshold < 1)
1860 		cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1861 	else if (slow_rsp_threshold > 32767)
1862 		cifs_dbg(VFS,
1863 		       "slow response threshold set higher than recommended (0 to 32767)\n");
1864 #endif /* CONFIG_CIFS_STATS2 */
1865 
1866 	atomic_set(&mid_count, 0);
1867 	GlobalCurrentXid = 0;
1868 	GlobalTotalActiveXid = 0;
1869 	GlobalMaxActiveXid = 0;
1870 	spin_lock_init(&cifs_tcp_ses_lock);
1871 	spin_lock_init(&GlobalMid_Lock);
1872 
1873 	cifs_lock_secret = get_random_u32();
1874 
1875 	if (cifs_max_pending < 2) {
1876 		cifs_max_pending = 2;
1877 		cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1878 	} else if (cifs_max_pending > CIFS_MAX_REQ) {
1879 		cifs_max_pending = CIFS_MAX_REQ;
1880 		cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1881 			 CIFS_MAX_REQ);
1882 	}
1883 
1884 	/* Limit max to about 18 hours, and setting to zero disables directory entry caching */
1885 	if (dir_cache_timeout > 65000) {
1886 		dir_cache_timeout = 65000;
1887 		cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
1888 	}
1889 
1890 	cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1891 	if (!cifsiod_wq) {
1892 		rc = -ENOMEM;
1893 		goto out_clean_proc;
1894 	}
1895 
1896 	/*
1897 	 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1898 	 * so that we don't launch too many worker threads but
1899 	 * Documentation/core-api/workqueue.rst recommends setting it to 0
1900 	 */
1901 
1902 	/* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1903 	decrypt_wq = alloc_workqueue("smb3decryptd",
1904 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1905 	if (!decrypt_wq) {
1906 		rc = -ENOMEM;
1907 		goto out_destroy_cifsiod_wq;
1908 	}
1909 
1910 	fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1911 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1912 	if (!fileinfo_put_wq) {
1913 		rc = -ENOMEM;
1914 		goto out_destroy_decrypt_wq;
1915 	}
1916 
1917 	cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1918 					 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1919 	if (!cifsoplockd_wq) {
1920 		rc = -ENOMEM;
1921 		goto out_destroy_fileinfo_put_wq;
1922 	}
1923 
1924 	deferredclose_wq = alloc_workqueue("deferredclose",
1925 					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1926 	if (!deferredclose_wq) {
1927 		rc = -ENOMEM;
1928 		goto out_destroy_cifsoplockd_wq;
1929 	}
1930 
1931 	serverclose_wq = alloc_workqueue("serverclose",
1932 					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1933 	if (!serverclose_wq) {
1934 		rc = -ENOMEM;
1935 		goto out_destroy_deferredclose_wq;
1936 	}
1937 
1938 	cfid_put_wq = alloc_workqueue("cfid_put_wq",
1939 				      WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1940 	if (!cfid_put_wq) {
1941 		rc = -ENOMEM;
1942 		goto out_destroy_serverclose_wq;
1943 	}
1944 
1945 	rc = cifs_init_inodecache();
1946 	if (rc)
1947 		goto out_destroy_cfid_put_wq;
1948 
1949 	rc = cifs_init_netfs();
1950 	if (rc)
1951 		goto out_destroy_inodecache;
1952 
1953 	rc = init_mids();
1954 	if (rc)
1955 		goto out_destroy_netfs;
1956 
1957 	rc = cifs_init_request_bufs();
1958 	if (rc)
1959 		goto out_destroy_mids;
1960 
1961 #ifdef CONFIG_CIFS_DFS_UPCALL
1962 	rc = dfs_cache_init();
1963 	if (rc)
1964 		goto out_destroy_request_bufs;
1965 #endif /* CONFIG_CIFS_DFS_UPCALL */
1966 #ifdef CONFIG_CIFS_UPCALL
1967 	rc = init_cifs_spnego();
1968 	if (rc)
1969 		goto out_destroy_dfs_cache;
1970 #endif /* CONFIG_CIFS_UPCALL */
1971 #ifdef CONFIG_CIFS_SWN_UPCALL
1972 	rc = cifs_genl_init();
1973 	if (rc)
1974 		goto out_register_key_type;
1975 #endif /* CONFIG_CIFS_SWN_UPCALL */
1976 
1977 	rc = init_cifs_idmap();
1978 	if (rc)
1979 		goto out_cifs_swn_init;
1980 
1981 	rc = register_filesystem(&cifs_fs_type);
1982 	if (rc)
1983 		goto out_init_cifs_idmap;
1984 
1985 	rc = register_filesystem(&smb3_fs_type);
1986 	if (rc) {
1987 		unregister_filesystem(&cifs_fs_type);
1988 		goto out_init_cifs_idmap;
1989 	}
1990 
1991 	return 0;
1992 
1993 out_init_cifs_idmap:
1994 	exit_cifs_idmap();
1995 out_cifs_swn_init:
1996 #ifdef CONFIG_CIFS_SWN_UPCALL
1997 	cifs_genl_exit();
1998 out_register_key_type:
1999 #endif
2000 #ifdef CONFIG_CIFS_UPCALL
2001 	exit_cifs_spnego();
2002 out_destroy_dfs_cache:
2003 #endif
2004 #ifdef CONFIG_CIFS_DFS_UPCALL
2005 	dfs_cache_destroy();
2006 out_destroy_request_bufs:
2007 #endif
2008 	cifs_destroy_request_bufs();
2009 out_destroy_mids:
2010 	destroy_mids();
2011 out_destroy_netfs:
2012 	cifs_destroy_netfs();
2013 out_destroy_inodecache:
2014 	cifs_destroy_inodecache();
2015 out_destroy_cfid_put_wq:
2016 	destroy_workqueue(cfid_put_wq);
2017 out_destroy_serverclose_wq:
2018 	destroy_workqueue(serverclose_wq);
2019 out_destroy_deferredclose_wq:
2020 	destroy_workqueue(deferredclose_wq);
2021 out_destroy_cifsoplockd_wq:
2022 	destroy_workqueue(cifsoplockd_wq);
2023 out_destroy_fileinfo_put_wq:
2024 	destroy_workqueue(fileinfo_put_wq);
2025 out_destroy_decrypt_wq:
2026 	destroy_workqueue(decrypt_wq);
2027 out_destroy_cifsiod_wq:
2028 	destroy_workqueue(cifsiod_wq);
2029 out_clean_proc:
2030 	cifs_proc_clean();
2031 	return rc;
2032 }
2033 
2034 static void __exit
exit_cifs(void)2035 exit_cifs(void)
2036 {
2037 	cifs_dbg(NOISY, "exit_smb3\n");
2038 	unregister_filesystem(&cifs_fs_type);
2039 	unregister_filesystem(&smb3_fs_type);
2040 	cifs_release_automount_timer();
2041 	exit_cifs_idmap();
2042 #ifdef CONFIG_CIFS_SWN_UPCALL
2043 	cifs_genl_exit();
2044 #endif
2045 #ifdef CONFIG_CIFS_UPCALL
2046 	exit_cifs_spnego();
2047 #endif
2048 #ifdef CONFIG_CIFS_DFS_UPCALL
2049 	dfs_cache_destroy();
2050 #endif
2051 	cifs_destroy_request_bufs();
2052 	destroy_mids();
2053 	cifs_destroy_netfs();
2054 	cifs_destroy_inodecache();
2055 	destroy_workqueue(deferredclose_wq);
2056 	destroy_workqueue(cifsoplockd_wq);
2057 	destroy_workqueue(decrypt_wq);
2058 	destroy_workqueue(fileinfo_put_wq);
2059 	destroy_workqueue(serverclose_wq);
2060 	destroy_workqueue(cfid_put_wq);
2061 	destroy_workqueue(cifsiod_wq);
2062 	cifs_proc_clean();
2063 }
2064 
2065 MODULE_AUTHOR("Steve French");
2066 MODULE_LICENSE("GPL");	/* combination of LGPL + GPL source behaves as GPL */
2067 MODULE_DESCRIPTION
2068 	("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
2069 	"also older servers complying with the SNIA CIFS Specification)");
2070 MODULE_VERSION(CIFS_VERSION);
2071 MODULE_SOFTDEP("ecb");
2072 MODULE_SOFTDEP("hmac");
2073 MODULE_SOFTDEP("md5");
2074 MODULE_SOFTDEP("nls");
2075 MODULE_SOFTDEP("aes");
2076 MODULE_SOFTDEP("cmac");
2077 MODULE_SOFTDEP("sha256");
2078 MODULE_SOFTDEP("sha512");
2079 MODULE_SOFTDEP("aead2");
2080 MODULE_SOFTDEP("ccm");
2081 MODULE_SOFTDEP("gcm");
2082 module_init(init_cifs)
2083 module_exit(exit_cifs)
2084