1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * Common Internet FileSystem (CIFS) client
8 *
9 */
10
11 /* Note that BB means BUGBUG (ie something to fix eventually) */
12
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/filelock.h>
16 #include <linux/mount.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/seq_file.h>
21 #include <linux/vfs.h>
22 #include <linux/mempool.h>
23 #include <linux/delay.h>
24 #include <linux/kthread.h>
25 #include <linux/freezer.h>
26 #include <linux/namei.h>
27 #include <linux/random.h>
28 #include <linux/uuid.h>
29 #include <linux/xattr.h>
30 #include <uapi/linux/magic.h>
31 #include <net/ipv6.h>
32 #include "cifsfs.h"
33 #include "cifspdu.h"
34 #define DECLARE_GLOBALS_HERE
35 #include "cifsglob.h"
36 #include "cifsproto.h"
37 #include "cifs_debug.h"
38 #include "cifs_fs_sb.h"
39 #include <linux/mm.h>
40 #include <linux/key-type.h>
41 #include "cifs_spnego.h"
42 #include "fscache.h"
43 #ifdef CONFIG_CIFS_DFS_UPCALL
44 #include "dfs_cache.h"
45 #endif
46 #ifdef CONFIG_CIFS_SWN_UPCALL
47 #include "netlink.h"
48 #endif
49 #include "fs_context.h"
50 #include "cached_dir.h"
51
52 /*
53 * DOS dates from 1980/1/1 through 2107/12/31
54 * Protocol specifications indicate the range should be to 119, which
55 * limits maximum year to 2099. But this range has not been checked.
56 */
57 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
58 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
59 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
60
61 int cifsFYI = 0;
62 bool traceSMB;
63 bool enable_oplocks = true;
64 bool linuxExtEnabled = true;
65 bool lookupCacheEnabled = true;
66 bool disable_legacy_dialects; /* false by default */
67 bool enable_gcm_256 = true;
68 bool require_gcm_256; /* false by default */
69 bool enable_negotiate_signing; /* false by default */
70 unsigned int global_secflags = CIFSSEC_DEF;
71 /* unsigned int ntlmv2_support = 0; */
72 unsigned int sign_CIFS_PDUs = 1;
73
74 /*
75 * Global transaction id (XID) information
76 */
77 unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */
78 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
79 unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */
80 spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
81
82 /*
83 * Global counters, updated atomically
84 */
85 atomic_t sesInfoAllocCount;
86 atomic_t tconInfoAllocCount;
87 atomic_t tcpSesNextId;
88 atomic_t tcpSesAllocCount;
89 atomic_t tcpSesReconnectCount;
90 atomic_t tconInfoReconnectCount;
91
92 atomic_t mid_count;
93 atomic_t buf_alloc_count;
94 atomic_t small_buf_alloc_count;
95 #ifdef CONFIG_CIFS_STATS2
96 atomic_t total_buf_alloc_count;
97 atomic_t total_small_buf_alloc_count;
98 #endif/* STATS2 */
99 struct list_head cifs_tcp_ses_list;
100 spinlock_t cifs_tcp_ses_lock;
101 static const struct super_operations cifs_super_ops;
102 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
103 module_param(CIFSMaxBufSize, uint, 0444);
104 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
105 "for CIFS requests. "
106 "Default: 16384 Range: 8192 to 130048");
107 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
108 module_param(cifs_min_rcv, uint, 0444);
109 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
110 "1 to 64");
111 unsigned int cifs_min_small = 30;
112 module_param(cifs_min_small, uint, 0444);
113 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
114 "Range: 2 to 256");
115 unsigned int cifs_max_pending = CIFS_MAX_REQ;
116 module_param(cifs_max_pending, uint, 0444);
117 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
118 "CIFS/SMB1 dialect (N/A for SMB3) "
119 "Default: 32767 Range: 2 to 32767.");
120 unsigned int dir_cache_timeout = 30;
121 module_param(dir_cache_timeout, uint, 0644);
122 MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
123 "Range: 1 to 65000 seconds, 0 to disable caching dir contents");
124 #ifdef CONFIG_CIFS_STATS2
125 unsigned int slow_rsp_threshold = 1;
126 module_param(slow_rsp_threshold, uint, 0644);
127 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
128 "before logging that a response is delayed. "
129 "Default: 1 (if set to 0 disables msg).");
130 #endif /* STATS2 */
131
132 module_param(enable_oplocks, bool, 0644);
133 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
134
135 module_param(enable_gcm_256, bool, 0644);
136 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/0");
137
138 module_param(require_gcm_256, bool, 0644);
139 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
140
141 module_param(enable_negotiate_signing, bool, 0644);
142 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
143
144 module_param(disable_legacy_dialects, bool, 0644);
145 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
146 "helpful to restrict the ability to "
147 "override the default dialects (SMB2.1, "
148 "SMB3 and SMB3.02) on mount with old "
149 "dialects (CIFS/SMB1 and SMB2) since "
150 "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
151 " and less secure. Default: n/N/0");
152
153 struct workqueue_struct *cifsiod_wq;
154 struct workqueue_struct *decrypt_wq;
155 struct workqueue_struct *fileinfo_put_wq;
156 struct workqueue_struct *cifsoplockd_wq;
157 struct workqueue_struct *deferredclose_wq;
158 struct workqueue_struct *serverclose_wq;
159 __u32 cifs_lock_secret;
160
161 /*
162 * Bumps refcount for cifs super block.
163 * Note that it should be only called if a referece to VFS super block is
164 * already held, e.g. in open-type syscalls context. Otherwise it can race with
165 * atomic_dec_and_test in deactivate_locked_super.
166 */
167 void
cifs_sb_active(struct super_block * sb)168 cifs_sb_active(struct super_block *sb)
169 {
170 struct cifs_sb_info *server = CIFS_SB(sb);
171
172 if (atomic_inc_return(&server->active) == 1)
173 atomic_inc(&sb->s_active);
174 }
175
176 void
cifs_sb_deactive(struct super_block * sb)177 cifs_sb_deactive(struct super_block *sb)
178 {
179 struct cifs_sb_info *server = CIFS_SB(sb);
180
181 if (atomic_dec_and_test(&server->active))
182 deactivate_super(sb);
183 }
184
185 static int
cifs_read_super(struct super_block * sb)186 cifs_read_super(struct super_block *sb)
187 {
188 struct inode *inode;
189 struct cifs_sb_info *cifs_sb;
190 struct cifs_tcon *tcon;
191 struct timespec64 ts;
192 int rc = 0;
193
194 cifs_sb = CIFS_SB(sb);
195 tcon = cifs_sb_master_tcon(cifs_sb);
196
197 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
198 sb->s_flags |= SB_POSIXACL;
199
200 if (tcon->snapshot_time)
201 sb->s_flags |= SB_RDONLY;
202
203 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
204 sb->s_maxbytes = MAX_LFS_FILESIZE;
205 else
206 sb->s_maxbytes = MAX_NON_LFS;
207
208 /*
209 * Some very old servers like DOS and OS/2 used 2 second granularity
210 * (while all current servers use 100ns granularity - see MS-DTYP)
211 * but 1 second is the maximum allowed granularity for the VFS
212 * so for old servers set time granularity to 1 second while for
213 * everything else (current servers) set it to 100ns.
214 */
215 if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
216 ((tcon->ses->capabilities &
217 tcon->ses->server->vals->cap_nt_find) == 0) &&
218 !tcon->unix_ext) {
219 sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
220 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
221 sb->s_time_min = ts.tv_sec;
222 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
223 cpu_to_le16(SMB_TIME_MAX), 0);
224 sb->s_time_max = ts.tv_sec;
225 } else {
226 /*
227 * Almost every server, including all SMB2+, uses DCE TIME
228 * ie 100 nanosecond units, since 1601. See MS-DTYP and MS-FSCC
229 */
230 sb->s_time_gran = 100;
231 ts = cifs_NTtimeToUnix(0);
232 sb->s_time_min = ts.tv_sec;
233 ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
234 sb->s_time_max = ts.tv_sec;
235 }
236
237 sb->s_magic = CIFS_SUPER_MAGIC;
238 sb->s_op = &cifs_super_ops;
239 sb->s_xattr = cifs_xattr_handlers;
240 rc = super_setup_bdi(sb);
241 if (rc)
242 goto out_no_root;
243 /* tune readahead according to rsize if readahead size not set on mount */
244 if (cifs_sb->ctx->rsize == 0)
245 cifs_sb->ctx->rsize =
246 tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
247 if (cifs_sb->ctx->rasize)
248 sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
249 else
250 sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE);
251
252 sb->s_blocksize = CIFS_MAX_MSGSIZE;
253 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
254 inode = cifs_root_iget(sb);
255
256 if (IS_ERR(inode)) {
257 rc = PTR_ERR(inode);
258 goto out_no_root;
259 }
260
261 if (tcon->nocase)
262 sb->s_d_op = &cifs_ci_dentry_ops;
263 else
264 sb->s_d_op = &cifs_dentry_ops;
265
266 sb->s_root = d_make_root(inode);
267 if (!sb->s_root) {
268 rc = -ENOMEM;
269 goto out_no_root;
270 }
271
272 #ifdef CONFIG_CIFS_NFSD_EXPORT
273 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
274 cifs_dbg(FYI, "export ops supported\n");
275 sb->s_export_op = &cifs_export_ops;
276 }
277 #endif /* CONFIG_CIFS_NFSD_EXPORT */
278
279 return 0;
280
281 out_no_root:
282 cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
283 return rc;
284 }
285
cifs_kill_sb(struct super_block * sb)286 static void cifs_kill_sb(struct super_block *sb)
287 {
288 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
289
290 /*
291 * We ned to release all dentries for the cached directories
292 * before we kill the sb.
293 */
294 if (cifs_sb->root) {
295 close_all_cached_dirs(cifs_sb);
296
297 /* finally release root dentry */
298 dput(cifs_sb->root);
299 cifs_sb->root = NULL;
300 }
301
302 kill_anon_super(sb);
303 cifs_umount(cifs_sb);
304 }
305
306 static int
cifs_statfs(struct dentry * dentry,struct kstatfs * buf)307 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
308 {
309 struct super_block *sb = dentry->d_sb;
310 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
311 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
312 struct TCP_Server_Info *server = tcon->ses->server;
313 unsigned int xid;
314 int rc = 0;
315
316 xid = get_xid();
317
318 if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
319 buf->f_namelen =
320 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
321 else
322 buf->f_namelen = PATH_MAX;
323
324 buf->f_fsid.val[0] = tcon->vol_serial_number;
325 /* are using part of create time for more randomness, see man statfs */
326 buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time);
327
328 buf->f_files = 0; /* undefined */
329 buf->f_ffree = 0; /* unlimited */
330
331 if (server->ops->queryfs)
332 rc = server->ops->queryfs(xid, tcon, cifs_sb, buf);
333
334 free_xid(xid);
335 return rc;
336 }
337
cifs_fallocate(struct file * file,int mode,loff_t off,loff_t len)338 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
339 {
340 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
341 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
342 struct TCP_Server_Info *server = tcon->ses->server;
343
344 if (server->ops->fallocate)
345 return server->ops->fallocate(file, tcon, mode, off, len);
346
347 return -EOPNOTSUPP;
348 }
349
cifs_permission(struct mnt_idmap * idmap,struct inode * inode,int mask)350 static int cifs_permission(struct mnt_idmap *idmap,
351 struct inode *inode, int mask)
352 {
353 struct cifs_sb_info *cifs_sb;
354
355 cifs_sb = CIFS_SB(inode->i_sb);
356
357 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
358 if ((mask & MAY_EXEC) && !execute_ok(inode))
359 return -EACCES;
360 else
361 return 0;
362 } else /* file mode might have been restricted at mount time
363 on the client (above and beyond ACL on servers) for
364 servers which do not support setting and viewing mode bits,
365 so allowing client to check permissions is useful */
366 return generic_permission(&nop_mnt_idmap, inode, mask);
367 }
368
369 static struct kmem_cache *cifs_inode_cachep;
370 static struct kmem_cache *cifs_req_cachep;
371 static struct kmem_cache *cifs_mid_cachep;
372 static struct kmem_cache *cifs_sm_req_cachep;
373 mempool_t *cifs_sm_req_poolp;
374 mempool_t *cifs_req_poolp;
375 mempool_t *cifs_mid_poolp;
376
377 static struct inode *
cifs_alloc_inode(struct super_block * sb)378 cifs_alloc_inode(struct super_block *sb)
379 {
380 struct cifsInodeInfo *cifs_inode;
381 cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
382 if (!cifs_inode)
383 return NULL;
384 cifs_inode->cifsAttrs = 0x20; /* default */
385 cifs_inode->time = 0;
386 /*
387 * Until the file is open and we have gotten oplock info back from the
388 * server, can not assume caching of file data or metadata.
389 */
390 cifs_set_oplock_level(cifs_inode, 0);
391 cifs_inode->lease_granted = false;
392 cifs_inode->flags = 0;
393 spin_lock_init(&cifs_inode->writers_lock);
394 cifs_inode->writers = 0;
395 cifs_inode->netfs.inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
396 cifs_inode->server_eof = 0;
397 cifs_inode->uniqueid = 0;
398 cifs_inode->createtime = 0;
399 cifs_inode->epoch = 0;
400 spin_lock_init(&cifs_inode->open_file_lock);
401 generate_random_uuid(cifs_inode->lease_key);
402 cifs_inode->symlink_target = NULL;
403
404 /*
405 * Can not set i_flags here - they get immediately overwritten to zero
406 * by the VFS.
407 */
408 /* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
409 INIT_LIST_HEAD(&cifs_inode->openFileList);
410 INIT_LIST_HEAD(&cifs_inode->llist);
411 INIT_LIST_HEAD(&cifs_inode->deferred_closes);
412 spin_lock_init(&cifs_inode->deferred_lock);
413 return &cifs_inode->netfs.inode;
414 }
415
416 static void
cifs_free_inode(struct inode * inode)417 cifs_free_inode(struct inode *inode)
418 {
419 struct cifsInodeInfo *cinode = CIFS_I(inode);
420
421 if (S_ISLNK(inode->i_mode))
422 kfree(cinode->symlink_target);
423 kmem_cache_free(cifs_inode_cachep, cinode);
424 }
425
426 static void
cifs_evict_inode(struct inode * inode)427 cifs_evict_inode(struct inode *inode)
428 {
429 truncate_inode_pages_final(&inode->i_data);
430 if (inode->i_state & I_PINNING_FSCACHE_WB)
431 cifs_fscache_unuse_inode_cookie(inode, true);
432 cifs_fscache_release_inode_cookie(inode);
433 clear_inode(inode);
434 }
435
436 static void
cifs_show_address(struct seq_file * s,struct TCP_Server_Info * server)437 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
438 {
439 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
440 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
441
442 seq_puts(s, ",addr=");
443
444 switch (server->dstaddr.ss_family) {
445 case AF_INET:
446 seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
447 break;
448 case AF_INET6:
449 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
450 if (sa6->sin6_scope_id)
451 seq_printf(s, "%%%u", sa6->sin6_scope_id);
452 break;
453 default:
454 seq_puts(s, "(unknown)");
455 }
456 if (server->rdma)
457 seq_puts(s, ",rdma");
458 }
459
460 static void
cifs_show_security(struct seq_file * s,struct cifs_ses * ses)461 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
462 {
463 if (ses->sectype == Unspecified) {
464 if (ses->user_name == NULL)
465 seq_puts(s, ",sec=none");
466 return;
467 }
468
469 seq_puts(s, ",sec=");
470
471 switch (ses->sectype) {
472 case NTLMv2:
473 seq_puts(s, "ntlmv2");
474 break;
475 case Kerberos:
476 seq_puts(s, "krb5");
477 break;
478 case RawNTLMSSP:
479 seq_puts(s, "ntlmssp");
480 break;
481 default:
482 /* shouldn't ever happen */
483 seq_puts(s, "unknown");
484 break;
485 }
486
487 if (ses->sign)
488 seq_puts(s, "i");
489
490 if (ses->sectype == Kerberos)
491 seq_printf(s, ",cruid=%u",
492 from_kuid_munged(&init_user_ns, ses->cred_uid));
493 }
494
495 static void
cifs_show_cache_flavor(struct seq_file * s,struct cifs_sb_info * cifs_sb)496 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
497 {
498 seq_puts(s, ",cache=");
499
500 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
501 seq_puts(s, "strict");
502 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
503 seq_puts(s, "none");
504 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
505 seq_puts(s, "singleclient"); /* assume only one client access */
506 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
507 seq_puts(s, "ro"); /* read only caching assumed */
508 else
509 seq_puts(s, "loose");
510 }
511
512 /*
513 * cifs_show_devname() is used so we show the mount device name with correct
514 * format (e.g. forward slashes vs. back slashes) in /proc/mounts
515 */
cifs_show_devname(struct seq_file * m,struct dentry * root)516 static int cifs_show_devname(struct seq_file *m, struct dentry *root)
517 {
518 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
519 char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
520
521 if (devname == NULL)
522 seq_puts(m, "none");
523 else {
524 convert_delimiter(devname, '/');
525 /* escape all spaces in share names */
526 seq_escape(m, devname, " \t");
527 kfree(devname);
528 }
529 return 0;
530 }
531
532 /*
533 * cifs_show_options() is for displaying mount options in /proc/mounts.
534 * Not all settable options are displayed but most of the important
535 * ones are.
536 */
537 static int
cifs_show_options(struct seq_file * s,struct dentry * root)538 cifs_show_options(struct seq_file *s, struct dentry *root)
539 {
540 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
541 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
542 struct sockaddr *srcaddr;
543 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
544
545 seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
546 cifs_show_security(s, tcon->ses);
547 cifs_show_cache_flavor(s, cifs_sb);
548
549 if (tcon->no_lease)
550 seq_puts(s, ",nolease");
551 if (cifs_sb->ctx->multiuser)
552 seq_puts(s, ",multiuser");
553 else if (tcon->ses->user_name)
554 seq_show_option(s, "username", tcon->ses->user_name);
555
556 if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
557 seq_show_option(s, "domain", tcon->ses->domainName);
558
559 if (srcaddr->sa_family != AF_UNSPEC) {
560 struct sockaddr_in *saddr4;
561 struct sockaddr_in6 *saddr6;
562 saddr4 = (struct sockaddr_in *)srcaddr;
563 saddr6 = (struct sockaddr_in6 *)srcaddr;
564 if (srcaddr->sa_family == AF_INET6)
565 seq_printf(s, ",srcaddr=%pI6c",
566 &saddr6->sin6_addr);
567 else if (srcaddr->sa_family == AF_INET)
568 seq_printf(s, ",srcaddr=%pI4",
569 &saddr4->sin_addr.s_addr);
570 else
571 seq_printf(s, ",srcaddr=BAD-AF:%i",
572 (int)(srcaddr->sa_family));
573 }
574
575 seq_printf(s, ",uid=%u",
576 from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
577 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
578 seq_puts(s, ",forceuid");
579 else
580 seq_puts(s, ",noforceuid");
581
582 seq_printf(s, ",gid=%u",
583 from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
584 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
585 seq_puts(s, ",forcegid");
586 else
587 seq_puts(s, ",noforcegid");
588
589 cifs_show_address(s, tcon->ses->server);
590
591 if (!tcon->unix_ext)
592 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
593 cifs_sb->ctx->file_mode,
594 cifs_sb->ctx->dir_mode);
595 if (cifs_sb->ctx->iocharset)
596 seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
597 if (tcon->seal)
598 seq_puts(s, ",seal");
599 else if (tcon->ses->server->ignore_signature)
600 seq_puts(s, ",signloosely");
601 if (tcon->nocase)
602 seq_puts(s, ",nocase");
603 if (tcon->nodelete)
604 seq_puts(s, ",nodelete");
605 if (cifs_sb->ctx->no_sparse)
606 seq_puts(s, ",nosparse");
607 if (tcon->local_lease)
608 seq_puts(s, ",locallease");
609 if (tcon->retry)
610 seq_puts(s, ",hard");
611 else
612 seq_puts(s, ",soft");
613 if (tcon->use_persistent)
614 seq_puts(s, ",persistenthandles");
615 else if (tcon->use_resilient)
616 seq_puts(s, ",resilienthandles");
617 if (tcon->posix_extensions)
618 seq_puts(s, ",posix");
619 else if (tcon->unix_ext)
620 seq_puts(s, ",unix");
621 else
622 seq_puts(s, ",nounix");
623 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
624 seq_puts(s, ",nodfs");
625 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
626 seq_puts(s, ",posixpaths");
627 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
628 seq_puts(s, ",setuids");
629 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
630 seq_puts(s, ",idsfromsid");
631 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
632 seq_puts(s, ",serverino");
633 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
634 seq_puts(s, ",rwpidforward");
635 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
636 seq_puts(s, ",forcemand");
637 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
638 seq_puts(s, ",nouser_xattr");
639 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
640 seq_puts(s, ",mapchars");
641 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
642 seq_puts(s, ",mapposix");
643 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
644 seq_puts(s, ",sfu");
645 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
646 seq_puts(s, ",nobrl");
647 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
648 seq_puts(s, ",nohandlecache");
649 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
650 seq_puts(s, ",modefromsid");
651 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
652 seq_puts(s, ",cifsacl");
653 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
654 seq_puts(s, ",dynperm");
655 if (root->d_sb->s_flags & SB_POSIXACL)
656 seq_puts(s, ",acl");
657 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
658 seq_puts(s, ",mfsymlinks");
659 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
660 seq_puts(s, ",fsc");
661 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
662 seq_puts(s, ",nostrictsync");
663 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
664 seq_puts(s, ",noperm");
665 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
666 seq_printf(s, ",backupuid=%u",
667 from_kuid_munged(&init_user_ns,
668 cifs_sb->ctx->backupuid));
669 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
670 seq_printf(s, ",backupgid=%u",
671 from_kgid_munged(&init_user_ns,
672 cifs_sb->ctx->backupgid));
673 seq_show_option(s, "reparse",
674 cifs_reparse_type_str(cifs_sb->ctx->reparse_type));
675
676 seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
677 seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
678 seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
679 if (cifs_sb->ctx->rasize)
680 seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
681 if (tcon->ses->server->min_offload)
682 seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
683 if (tcon->ses->server->retrans)
684 seq_printf(s, ",retrans=%u", tcon->ses->server->retrans);
685 seq_printf(s, ",echo_interval=%lu",
686 tcon->ses->server->echo_interval / HZ);
687
688 /* Only display the following if overridden on mount */
689 if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
690 seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
691 if (tcon->ses->server->tcp_nodelay)
692 seq_puts(s, ",tcpnodelay");
693 if (tcon->ses->server->noautotune)
694 seq_puts(s, ",noautotune");
695 if (tcon->ses->server->noblocksnd)
696 seq_puts(s, ",noblocksend");
697 if (tcon->ses->server->nosharesock)
698 seq_puts(s, ",nosharesock");
699
700 if (tcon->snapshot_time)
701 seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
702 if (tcon->handle_timeout)
703 seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
704 if (tcon->max_cached_dirs != MAX_CACHED_FIDS)
705 seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs);
706
707 /*
708 * Display file and directory attribute timeout in seconds.
709 * If file and directory attribute timeout the same then actimeo
710 * was likely specified on mount
711 */
712 if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
713 seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
714 else {
715 seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
716 seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
717 }
718 seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
719
720 if (tcon->ses->chan_max > 1)
721 seq_printf(s, ",multichannel,max_channels=%zu",
722 tcon->ses->chan_max);
723
724 if (tcon->use_witness)
725 seq_puts(s, ",witness");
726
727 return 0;
728 }
729
cifs_umount_begin(struct super_block * sb)730 static void cifs_umount_begin(struct super_block *sb)
731 {
732 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
733 struct cifs_tcon *tcon;
734
735 if (cifs_sb == NULL)
736 return;
737
738 tcon = cifs_sb_master_tcon(cifs_sb);
739
740 spin_lock(&cifs_tcp_ses_lock);
741 spin_lock(&tcon->tc_lock);
742 trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
743 netfs_trace_tcon_ref_see_umount);
744 if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
745 /* we have other mounts to same share or we have
746 already tried to umount this and woken up
747 all waiting network requests, nothing to do */
748 spin_unlock(&tcon->tc_lock);
749 spin_unlock(&cifs_tcp_ses_lock);
750 return;
751 }
752 /*
753 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
754 * fail later (e.g. due to open files). TID_EXITING will be set just before tdis req sent
755 */
756 spin_unlock(&tcon->tc_lock);
757 spin_unlock(&cifs_tcp_ses_lock);
758
759 cifs_close_all_deferred_files(tcon);
760 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
761 /* cancel_notify_requests(tcon); */
762 if (tcon->ses && tcon->ses->server) {
763 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
764 wake_up_all(&tcon->ses->server->request_q);
765 wake_up_all(&tcon->ses->server->response_q);
766 msleep(1); /* yield */
767 /* we have to kick the requests once more */
768 wake_up_all(&tcon->ses->server->response_q);
769 msleep(1);
770 }
771
772 return;
773 }
774
cifs_freeze(struct super_block * sb)775 static int cifs_freeze(struct super_block *sb)
776 {
777 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
778 struct cifs_tcon *tcon;
779
780 if (cifs_sb == NULL)
781 return 0;
782
783 tcon = cifs_sb_master_tcon(cifs_sb);
784
785 cifs_close_all_deferred_files(tcon);
786 return 0;
787 }
788
789 #ifdef CONFIG_CIFS_STATS2
cifs_show_stats(struct seq_file * s,struct dentry * root)790 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
791 {
792 /* BB FIXME */
793 return 0;
794 }
795 #endif
796
cifs_write_inode(struct inode * inode,struct writeback_control * wbc)797 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
798 {
799 fscache_unpin_writeback(wbc, cifs_inode_cookie(inode));
800 return 0;
801 }
802
cifs_drop_inode(struct inode * inode)803 static int cifs_drop_inode(struct inode *inode)
804 {
805 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
806
807 /* no serverino => unconditional eviction */
808 return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
809 generic_drop_inode(inode);
810 }
811
812 static const struct super_operations cifs_super_ops = {
813 .statfs = cifs_statfs,
814 .alloc_inode = cifs_alloc_inode,
815 .write_inode = cifs_write_inode,
816 .free_inode = cifs_free_inode,
817 .drop_inode = cifs_drop_inode,
818 .evict_inode = cifs_evict_inode,
819 /* .show_path = cifs_show_path, */ /* Would we ever need show path? */
820 .show_devname = cifs_show_devname,
821 /* .delete_inode = cifs_delete_inode, */ /* Do not need above
822 function unless later we add lazy close of inodes or unless the
823 kernel forgets to call us with the same number of releases (closes)
824 as opens */
825 .show_options = cifs_show_options,
826 .umount_begin = cifs_umount_begin,
827 .freeze_fs = cifs_freeze,
828 #ifdef CONFIG_CIFS_STATS2
829 .show_stats = cifs_show_stats,
830 #endif
831 };
832
833 /*
834 * Get root dentry from superblock according to prefix path mount option.
835 * Return dentry with refcount + 1 on success and NULL otherwise.
836 */
837 static struct dentry *
cifs_get_root(struct smb3_fs_context * ctx,struct super_block * sb)838 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
839 {
840 struct dentry *dentry;
841 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
842 char *full_path = NULL;
843 char *s, *p;
844 char sep;
845
846 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
847 return dget(sb->s_root);
848
849 full_path = cifs_build_path_to_root(ctx, cifs_sb,
850 cifs_sb_master_tcon(cifs_sb), 0);
851 if (full_path == NULL)
852 return ERR_PTR(-ENOMEM);
853
854 cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
855
856 sep = CIFS_DIR_SEP(cifs_sb);
857 dentry = dget(sb->s_root);
858 s = full_path;
859
860 do {
861 struct inode *dir = d_inode(dentry);
862 struct dentry *child;
863
864 if (!S_ISDIR(dir->i_mode)) {
865 dput(dentry);
866 dentry = ERR_PTR(-ENOTDIR);
867 break;
868 }
869
870 /* skip separators */
871 while (*s == sep)
872 s++;
873 if (!*s)
874 break;
875 p = s++;
876 /* next separator */
877 while (*s && *s != sep)
878 s++;
879
880 child = lookup_positive_unlocked(p, dentry, s - p);
881 dput(dentry);
882 dentry = child;
883 } while (!IS_ERR(dentry));
884 kfree(full_path);
885 return dentry;
886 }
887
cifs_set_super(struct super_block * sb,void * data)888 static int cifs_set_super(struct super_block *sb, void *data)
889 {
890 struct cifs_mnt_data *mnt_data = data;
891 sb->s_fs_info = mnt_data->cifs_sb;
892 return set_anon_super(sb, NULL);
893 }
894
895 struct dentry *
cifs_smb3_do_mount(struct file_system_type * fs_type,int flags,struct smb3_fs_context * old_ctx)896 cifs_smb3_do_mount(struct file_system_type *fs_type,
897 int flags, struct smb3_fs_context *old_ctx)
898 {
899 struct cifs_mnt_data mnt_data;
900 struct cifs_sb_info *cifs_sb;
901 struct super_block *sb;
902 struct dentry *root;
903 int rc;
904
905 if (cifsFYI) {
906 cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
907 old_ctx->source, flags);
908 } else {
909 cifs_info("Attempting to mount %s\n", old_ctx->source);
910 }
911
912 cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL);
913 if (!cifs_sb)
914 return ERR_PTR(-ENOMEM);
915
916 cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
917 if (!cifs_sb->ctx) {
918 root = ERR_PTR(-ENOMEM);
919 goto out;
920 }
921 rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
922 if (rc) {
923 root = ERR_PTR(rc);
924 goto out;
925 }
926
927 rc = cifs_setup_cifs_sb(cifs_sb);
928 if (rc) {
929 root = ERR_PTR(rc);
930 goto out;
931 }
932
933 rc = cifs_mount(cifs_sb, cifs_sb->ctx);
934 if (rc) {
935 if (!(flags & SB_SILENT))
936 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
937 rc);
938 root = ERR_PTR(rc);
939 goto out;
940 }
941
942 mnt_data.ctx = cifs_sb->ctx;
943 mnt_data.cifs_sb = cifs_sb;
944 mnt_data.flags = flags;
945
946 /* BB should we make this contingent on mount parm? */
947 flags |= SB_NODIRATIME | SB_NOATIME;
948
949 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
950 if (IS_ERR(sb)) {
951 cifs_umount(cifs_sb);
952 return ERR_CAST(sb);
953 }
954
955 if (sb->s_root) {
956 cifs_dbg(FYI, "Use existing superblock\n");
957 cifs_umount(cifs_sb);
958 cifs_sb = NULL;
959 } else {
960 rc = cifs_read_super(sb);
961 if (rc) {
962 root = ERR_PTR(rc);
963 goto out_super;
964 }
965
966 sb->s_flags |= SB_ACTIVE;
967 }
968
969 root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
970 if (IS_ERR(root))
971 goto out_super;
972
973 if (cifs_sb)
974 cifs_sb->root = dget(root);
975
976 cifs_dbg(FYI, "dentry root is: %p\n", root);
977 return root;
978
979 out_super:
980 deactivate_locked_super(sb);
981 return root;
982 out:
983 kfree(cifs_sb->prepath);
984 smb3_cleanup_fs_context(cifs_sb->ctx);
985 kfree(cifs_sb);
986 return root;
987 }
988
989
990 static ssize_t
cifs_loose_read_iter(struct kiocb * iocb,struct iov_iter * iter)991 cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
992 {
993 ssize_t rc;
994 struct inode *inode = file_inode(iocb->ki_filp);
995
996 if (iocb->ki_flags & IOCB_DIRECT)
997 return cifs_user_readv(iocb, iter);
998
999 rc = cifs_revalidate_mapping(inode);
1000 if (rc)
1001 return rc;
1002
1003 return generic_file_read_iter(iocb, iter);
1004 }
1005
cifs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)1006 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1007 {
1008 struct inode *inode = file_inode(iocb->ki_filp);
1009 struct cifsInodeInfo *cinode = CIFS_I(inode);
1010 ssize_t written;
1011 int rc;
1012
1013 if (iocb->ki_filp->f_flags & O_DIRECT) {
1014 written = cifs_user_writev(iocb, from);
1015 if (written > 0 && CIFS_CACHE_READ(cinode)) {
1016 cifs_zap_mapping(inode);
1017 cifs_dbg(FYI,
1018 "Set no oplock for inode=%p after a write operation\n",
1019 inode);
1020 cinode->oplock = 0;
1021 }
1022 return written;
1023 }
1024
1025 written = cifs_get_writer(cinode);
1026 if (written)
1027 return written;
1028
1029 written = generic_file_write_iter(iocb, from);
1030
1031 if (CIFS_CACHE_WRITE(CIFS_I(inode)))
1032 goto out;
1033
1034 rc = filemap_fdatawrite(inode->i_mapping);
1035 if (rc)
1036 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
1037 rc, inode);
1038
1039 out:
1040 cifs_put_writer(cinode);
1041 return written;
1042 }
1043
cifs_llseek(struct file * file,loff_t offset,int whence)1044 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1045 {
1046 struct cifsFileInfo *cfile = file->private_data;
1047 struct cifs_tcon *tcon;
1048
1049 /*
1050 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1051 * the cached file length
1052 */
1053 if (whence != SEEK_SET && whence != SEEK_CUR) {
1054 int rc;
1055 struct inode *inode = file_inode(file);
1056
1057 /*
1058 * We need to be sure that all dirty pages are written and the
1059 * server has the newest file length.
1060 */
1061 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1062 inode->i_mapping->nrpages != 0) {
1063 rc = filemap_fdatawait(inode->i_mapping);
1064 if (rc) {
1065 mapping_set_error(inode->i_mapping, rc);
1066 return rc;
1067 }
1068 }
1069 /*
1070 * Some applications poll for the file length in this strange
1071 * way so we must seek to end on non-oplocked files by
1072 * setting the revalidate time to zero.
1073 */
1074 CIFS_I(inode)->time = 0;
1075
1076 rc = cifs_revalidate_file_attr(file);
1077 if (rc < 0)
1078 return (loff_t)rc;
1079 }
1080 if (cfile && cfile->tlink) {
1081 tcon = tlink_tcon(cfile->tlink);
1082 if (tcon->ses->server->ops->llseek)
1083 return tcon->ses->server->ops->llseek(file, tcon,
1084 offset, whence);
1085 }
1086 return generic_file_llseek(file, offset, whence);
1087 }
1088
1089 static int
cifs_setlease(struct file * file,int arg,struct file_lock ** lease,void ** priv)1090 cifs_setlease(struct file *file, int arg, struct file_lock **lease, void **priv)
1091 {
1092 /*
1093 * Note that this is called by vfs setlease with i_lock held to
1094 * protect *lease from going away.
1095 */
1096 struct inode *inode = file_inode(file);
1097 struct cifsFileInfo *cfile = file->private_data;
1098
1099 if (!(S_ISREG(inode->i_mode)))
1100 return -EINVAL;
1101
1102 /* Check if file is oplocked if this is request for new lease */
1103 if (arg == F_UNLCK ||
1104 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1105 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1106 return generic_setlease(file, arg, lease, priv);
1107 else if (tlink_tcon(cfile->tlink)->local_lease &&
1108 !CIFS_CACHE_READ(CIFS_I(inode)))
1109 /*
1110 * If the server claims to support oplock on this file, then we
1111 * still need to check oplock even if the local_lease mount
1112 * option is set, but there are servers which do not support
1113 * oplock for which this mount option may be useful if the user
1114 * knows that the file won't be changed on the server by anyone
1115 * else.
1116 */
1117 return generic_setlease(file, arg, lease, priv);
1118 else
1119 return -EAGAIN;
1120 }
1121
1122 struct file_system_type cifs_fs_type = {
1123 .owner = THIS_MODULE,
1124 .name = "cifs",
1125 .init_fs_context = smb3_init_fs_context,
1126 .parameters = smb3_fs_parameters,
1127 .kill_sb = cifs_kill_sb,
1128 .fs_flags = FS_RENAME_DOES_D_MOVE,
1129 };
1130 MODULE_ALIAS_FS("cifs");
1131
1132 struct file_system_type smb3_fs_type = {
1133 .owner = THIS_MODULE,
1134 .name = "smb3",
1135 .init_fs_context = smb3_init_fs_context,
1136 .parameters = smb3_fs_parameters,
1137 .kill_sb = cifs_kill_sb,
1138 .fs_flags = FS_RENAME_DOES_D_MOVE,
1139 };
1140 MODULE_ALIAS_FS("smb3");
1141 MODULE_ALIAS("smb3");
1142
1143 const struct inode_operations cifs_dir_inode_ops = {
1144 .create = cifs_create,
1145 .atomic_open = cifs_atomic_open,
1146 .lookup = cifs_lookup,
1147 .getattr = cifs_getattr,
1148 .unlink = cifs_unlink,
1149 .link = cifs_hardlink,
1150 .mkdir = cifs_mkdir,
1151 .rmdir = cifs_rmdir,
1152 .rename = cifs_rename2,
1153 .permission = cifs_permission,
1154 .setattr = cifs_setattr,
1155 .symlink = cifs_symlink,
1156 .mknod = cifs_mknod,
1157 .listxattr = cifs_listxattr,
1158 .get_acl = cifs_get_acl,
1159 .set_acl = cifs_set_acl,
1160 };
1161
1162 const struct inode_operations cifs_file_inode_ops = {
1163 .setattr = cifs_setattr,
1164 .getattr = cifs_getattr,
1165 .permission = cifs_permission,
1166 .listxattr = cifs_listxattr,
1167 .fiemap = cifs_fiemap,
1168 .get_acl = cifs_get_acl,
1169 .set_acl = cifs_set_acl,
1170 };
1171
cifs_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * done)1172 const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1173 struct delayed_call *done)
1174 {
1175 char *target_path;
1176
1177 target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1178 if (!target_path)
1179 return ERR_PTR(-ENOMEM);
1180
1181 spin_lock(&inode->i_lock);
1182 if (likely(CIFS_I(inode)->symlink_target)) {
1183 strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1184 } else {
1185 kfree(target_path);
1186 target_path = ERR_PTR(-EOPNOTSUPP);
1187 }
1188 spin_unlock(&inode->i_lock);
1189
1190 if (!IS_ERR(target_path))
1191 set_delayed_call(done, kfree_link, target_path);
1192
1193 return target_path;
1194 }
1195
1196 const struct inode_operations cifs_symlink_inode_ops = {
1197 .get_link = cifs_get_link,
1198 .setattr = cifs_setattr,
1199 .permission = cifs_permission,
1200 .listxattr = cifs_listxattr,
1201 };
1202
1203 /*
1204 * Advance the EOF marker to after the source range.
1205 */
cifs_precopy_set_eof(struct inode * src_inode,struct cifsInodeInfo * src_cifsi,struct cifs_tcon * src_tcon,unsigned int xid,loff_t src_end)1206 static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
1207 struct cifs_tcon *src_tcon,
1208 unsigned int xid, loff_t src_end)
1209 {
1210 struct cifsFileInfo *writeable_srcfile;
1211 int rc = -EINVAL;
1212
1213 writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
1214 if (writeable_srcfile) {
1215 if (src_tcon->ses->server->ops->set_file_size)
1216 rc = src_tcon->ses->server->ops->set_file_size(
1217 xid, src_tcon, writeable_srcfile,
1218 src_inode->i_size, true /* no need to set sparse */);
1219 else
1220 rc = -ENOSYS;
1221 cifsFileInfo_put(writeable_srcfile);
1222 cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
1223 }
1224
1225 if (rc < 0)
1226 goto set_failed;
1227
1228 netfs_resize_file(&src_cifsi->netfs, src_end);
1229 fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
1230 return 0;
1231
1232 set_failed:
1233 return filemap_write_and_wait(src_inode->i_mapping);
1234 }
1235
1236 /*
1237 * Flush out either the folio that overlaps the beginning of a range in which
1238 * pos resides or the folio that overlaps the end of a range unless that folio
1239 * is entirely within the range we're going to invalidate. We extend the flush
1240 * bounds to encompass the folio.
1241 */
cifs_flush_folio(struct inode * inode,loff_t pos,loff_t * _fstart,loff_t * _fend,bool first)1242 static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
1243 bool first)
1244 {
1245 struct folio *folio;
1246 unsigned long long fpos, fend;
1247 pgoff_t index = pos / PAGE_SIZE;
1248 size_t size;
1249 int rc = 0;
1250
1251 folio = filemap_get_folio(inode->i_mapping, index);
1252 if (IS_ERR(folio))
1253 return 0;
1254
1255 size = folio_size(folio);
1256 fpos = folio_pos(folio);
1257 fend = fpos + size - 1;
1258 *_fstart = min_t(unsigned long long, *_fstart, fpos);
1259 *_fend = max_t(unsigned long long, *_fend, fend);
1260 if ((first && pos == fpos) || (!first && pos == fend))
1261 goto out;
1262
1263 rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
1264 out:
1265 folio_put(folio);
1266 return rc;
1267 }
1268
cifs_remap_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,loff_t len,unsigned int remap_flags)1269 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1270 struct file *dst_file, loff_t destoff, loff_t len,
1271 unsigned int remap_flags)
1272 {
1273 struct inode *src_inode = file_inode(src_file);
1274 struct inode *target_inode = file_inode(dst_file);
1275 struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1276 struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1277 struct cifsFileInfo *smb_file_src = src_file->private_data;
1278 struct cifsFileInfo *smb_file_target = dst_file->private_data;
1279 struct cifs_tcon *target_tcon, *src_tcon;
1280 unsigned long long destend, fstart, fend, new_size;
1281 unsigned int xid;
1282 int rc;
1283
1284 if (remap_flags & REMAP_FILE_DEDUP)
1285 return -EOPNOTSUPP;
1286 if (remap_flags & ~REMAP_FILE_ADVISORY)
1287 return -EINVAL;
1288
1289 cifs_dbg(FYI, "clone range\n");
1290
1291 xid = get_xid();
1292
1293 if (!smb_file_src || !smb_file_target) {
1294 rc = -EBADF;
1295 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1296 goto out;
1297 }
1298
1299 src_tcon = tlink_tcon(smb_file_src->tlink);
1300 target_tcon = tlink_tcon(smb_file_target->tlink);
1301
1302 /*
1303 * Note: cifs case is easier than btrfs since server responsible for
1304 * checks for proper open modes and file type and if it wants
1305 * server could even support copy of range where source = target
1306 */
1307 lock_two_nondirectories(target_inode, src_inode);
1308
1309 if (len == 0)
1310 len = src_inode->i_size - off;
1311
1312 cifs_dbg(FYI, "clone range\n");
1313
1314 /* Flush the source buffer */
1315 rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1316 off + len - 1);
1317 if (rc)
1318 goto unlock;
1319
1320 /* The server-side copy will fail if the source crosses the EOF marker.
1321 * Advance the EOF marker after the flush above to the end of the range
1322 * if it's short of that.
1323 */
1324 if (src_cifsi->netfs.remote_i_size < off + len) {
1325 rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1326 if (rc < 0)
1327 goto unlock;
1328 }
1329
1330 new_size = destoff + len;
1331 destend = destoff + len - 1;
1332
1333 /* Flush the folios at either end of the destination range to prevent
1334 * accidental loss of dirty data outside of the range.
1335 */
1336 fstart = destoff;
1337 fend = destend;
1338
1339 rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1340 if (rc)
1341 goto unlock;
1342 rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1343 if (rc)
1344 goto unlock;
1345
1346 /* Discard all the folios that overlap the destination region. */
1347 cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
1348 truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1349
1350 fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1351 i_size_read(target_inode), 0);
1352
1353 rc = -EOPNOTSUPP;
1354 if (target_tcon->ses->server->ops->duplicate_extents) {
1355 rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1356 smb_file_src, smb_file_target, off, len, destoff);
1357 if (rc == 0 && new_size > i_size_read(target_inode)) {
1358 truncate_setsize(target_inode, new_size);
1359 netfs_resize_file(&target_cifsi->netfs, new_size);
1360 fscache_resize_cookie(cifs_inode_cookie(target_inode),
1361 new_size);
1362 }
1363 }
1364
1365 /* force revalidate of size and timestamps of target file now
1366 that target is updated on the server */
1367 CIFS_I(target_inode)->time = 0;
1368 unlock:
1369 /* although unlocking in the reverse order from locking is not
1370 strictly necessary here it is a little cleaner to be consistent */
1371 unlock_two_nondirectories(src_inode, target_inode);
1372 out:
1373 free_xid(xid);
1374 return rc < 0 ? rc : len;
1375 }
1376
cifs_file_copychunk_range(unsigned int xid,struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1377 ssize_t cifs_file_copychunk_range(unsigned int xid,
1378 struct file *src_file, loff_t off,
1379 struct file *dst_file, loff_t destoff,
1380 size_t len, unsigned int flags)
1381 {
1382 struct inode *src_inode = file_inode(src_file);
1383 struct inode *target_inode = file_inode(dst_file);
1384 struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1385 struct cifsFileInfo *smb_file_src;
1386 struct cifsFileInfo *smb_file_target;
1387 struct cifs_tcon *src_tcon;
1388 struct cifs_tcon *target_tcon;
1389 unsigned long long destend, fstart, fend;
1390 ssize_t rc;
1391
1392 cifs_dbg(FYI, "copychunk range\n");
1393
1394 if (!src_file->private_data || !dst_file->private_data) {
1395 rc = -EBADF;
1396 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1397 goto out;
1398 }
1399
1400 rc = -EXDEV;
1401 smb_file_target = dst_file->private_data;
1402 smb_file_src = src_file->private_data;
1403 src_tcon = tlink_tcon(smb_file_src->tlink);
1404 target_tcon = tlink_tcon(smb_file_target->tlink);
1405
1406 if (src_tcon->ses != target_tcon->ses) {
1407 cifs_dbg(FYI, "source and target of copy not on same server\n");
1408 goto out;
1409 }
1410
1411 rc = -EOPNOTSUPP;
1412 if (!target_tcon->ses->server->ops->copychunk_range)
1413 goto out;
1414
1415 /*
1416 * Note: cifs case is easier than btrfs since server responsible for
1417 * checks for proper open modes and file type and if it wants
1418 * server could even support copy of range where source = target
1419 */
1420 lock_two_nondirectories(target_inode, src_inode);
1421
1422 cifs_dbg(FYI, "about to flush pages\n");
1423
1424 rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1425 off + len - 1);
1426 if (rc)
1427 goto unlock;
1428
1429 /* The server-side copy will fail if the source crosses the EOF marker.
1430 * Advance the EOF marker after the flush above to the end of the range
1431 * if it's short of that.
1432 */
1433 if (src_cifsi->server_eof < off + len) {
1434 rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1435 if (rc < 0)
1436 goto unlock;
1437 }
1438
1439 destend = destoff + len - 1;
1440
1441 /* Flush the folios at either end of the destination range to prevent
1442 * accidental loss of dirty data outside of the range.
1443 */
1444 fstart = destoff;
1445 fend = destend;
1446
1447 rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1448 if (rc)
1449 goto unlock;
1450 rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1451 if (rc)
1452 goto unlock;
1453
1454 /* Discard all the folios that overlap the destination region. */
1455 truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1456
1457 rc = file_modified(dst_file);
1458 if (!rc) {
1459 rc = target_tcon->ses->server->ops->copychunk_range(xid,
1460 smb_file_src, smb_file_target, off, len, destoff);
1461 if (rc > 0 && destoff + rc > i_size_read(target_inode))
1462 truncate_setsize(target_inode, destoff + rc);
1463 }
1464
1465 file_accessed(src_file);
1466
1467 /* force revalidate of size and timestamps of target file now
1468 * that target is updated on the server
1469 */
1470 CIFS_I(target_inode)->time = 0;
1471
1472 unlock:
1473 /* although unlocking in the reverse order from locking is not
1474 * strictly necessary here it is a little cleaner to be consistent
1475 */
1476 unlock_two_nondirectories(src_inode, target_inode);
1477
1478 out:
1479 return rc;
1480 }
1481
1482 /*
1483 * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1484 * is a dummy operation.
1485 */
cifs_dir_fsync(struct file * file,loff_t start,loff_t end,int datasync)1486 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1487 {
1488 cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1489 file, datasync);
1490
1491 return 0;
1492 }
1493
cifs_copy_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1494 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1495 struct file *dst_file, loff_t destoff,
1496 size_t len, unsigned int flags)
1497 {
1498 unsigned int xid = get_xid();
1499 ssize_t rc;
1500 struct cifsFileInfo *cfile = dst_file->private_data;
1501
1502 if (cfile->swapfile) {
1503 rc = -EOPNOTSUPP;
1504 free_xid(xid);
1505 return rc;
1506 }
1507
1508 rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1509 len, flags);
1510 free_xid(xid);
1511
1512 if (rc == -EOPNOTSUPP || rc == -EXDEV)
1513 rc = generic_copy_file_range(src_file, off, dst_file,
1514 destoff, len, flags);
1515 return rc;
1516 }
1517
1518 const struct file_operations cifs_file_ops = {
1519 .read_iter = cifs_loose_read_iter,
1520 .write_iter = cifs_file_write_iter,
1521 .open = cifs_open,
1522 .release = cifs_close,
1523 .lock = cifs_lock,
1524 .flock = cifs_flock,
1525 .fsync = cifs_fsync,
1526 .flush = cifs_flush,
1527 .mmap = cifs_file_mmap,
1528 .splice_read = filemap_splice_read,
1529 .splice_write = iter_file_splice_write,
1530 .llseek = cifs_llseek,
1531 .unlocked_ioctl = cifs_ioctl,
1532 .copy_file_range = cifs_copy_file_range,
1533 .remap_file_range = cifs_remap_file_range,
1534 .setlease = cifs_setlease,
1535 .fallocate = cifs_fallocate,
1536 };
1537
1538 const struct file_operations cifs_file_strict_ops = {
1539 .read_iter = cifs_strict_readv,
1540 .write_iter = cifs_strict_writev,
1541 .open = cifs_open,
1542 .release = cifs_close,
1543 .lock = cifs_lock,
1544 .flock = cifs_flock,
1545 .fsync = cifs_strict_fsync,
1546 .flush = cifs_flush,
1547 .mmap = cifs_file_strict_mmap,
1548 .splice_read = filemap_splice_read,
1549 .splice_write = iter_file_splice_write,
1550 .llseek = cifs_llseek,
1551 .unlocked_ioctl = cifs_ioctl,
1552 .copy_file_range = cifs_copy_file_range,
1553 .remap_file_range = cifs_remap_file_range,
1554 .setlease = cifs_setlease,
1555 .fallocate = cifs_fallocate,
1556 };
1557
1558 const struct file_operations cifs_file_direct_ops = {
1559 .read_iter = cifs_direct_readv,
1560 .write_iter = cifs_direct_writev,
1561 .open = cifs_open,
1562 .release = cifs_close,
1563 .lock = cifs_lock,
1564 .flock = cifs_flock,
1565 .fsync = cifs_fsync,
1566 .flush = cifs_flush,
1567 .mmap = cifs_file_mmap,
1568 .splice_read = copy_splice_read,
1569 .splice_write = iter_file_splice_write,
1570 .unlocked_ioctl = cifs_ioctl,
1571 .copy_file_range = cifs_copy_file_range,
1572 .remap_file_range = cifs_remap_file_range,
1573 .llseek = cifs_llseek,
1574 .setlease = cifs_setlease,
1575 .fallocate = cifs_fallocate,
1576 };
1577
1578 const struct file_operations cifs_file_nobrl_ops = {
1579 .read_iter = cifs_loose_read_iter,
1580 .write_iter = cifs_file_write_iter,
1581 .open = cifs_open,
1582 .release = cifs_close,
1583 .fsync = cifs_fsync,
1584 .flush = cifs_flush,
1585 .mmap = cifs_file_mmap,
1586 .splice_read = filemap_splice_read,
1587 .splice_write = iter_file_splice_write,
1588 .llseek = cifs_llseek,
1589 .unlocked_ioctl = cifs_ioctl,
1590 .copy_file_range = cifs_copy_file_range,
1591 .remap_file_range = cifs_remap_file_range,
1592 .setlease = cifs_setlease,
1593 .fallocate = cifs_fallocate,
1594 };
1595
1596 const struct file_operations cifs_file_strict_nobrl_ops = {
1597 .read_iter = cifs_strict_readv,
1598 .write_iter = cifs_strict_writev,
1599 .open = cifs_open,
1600 .release = cifs_close,
1601 .fsync = cifs_strict_fsync,
1602 .flush = cifs_flush,
1603 .mmap = cifs_file_strict_mmap,
1604 .splice_read = filemap_splice_read,
1605 .splice_write = iter_file_splice_write,
1606 .llseek = cifs_llseek,
1607 .unlocked_ioctl = cifs_ioctl,
1608 .copy_file_range = cifs_copy_file_range,
1609 .remap_file_range = cifs_remap_file_range,
1610 .setlease = cifs_setlease,
1611 .fallocate = cifs_fallocate,
1612 };
1613
1614 const struct file_operations cifs_file_direct_nobrl_ops = {
1615 .read_iter = cifs_direct_readv,
1616 .write_iter = cifs_direct_writev,
1617 .open = cifs_open,
1618 .release = cifs_close,
1619 .fsync = cifs_fsync,
1620 .flush = cifs_flush,
1621 .mmap = cifs_file_mmap,
1622 .splice_read = copy_splice_read,
1623 .splice_write = iter_file_splice_write,
1624 .unlocked_ioctl = cifs_ioctl,
1625 .copy_file_range = cifs_copy_file_range,
1626 .remap_file_range = cifs_remap_file_range,
1627 .llseek = cifs_llseek,
1628 .setlease = cifs_setlease,
1629 .fallocate = cifs_fallocate,
1630 };
1631
1632 const struct file_operations cifs_dir_ops = {
1633 .iterate_shared = cifs_readdir,
1634 .release = cifs_closedir,
1635 .read = generic_read_dir,
1636 .unlocked_ioctl = cifs_ioctl,
1637 .copy_file_range = cifs_copy_file_range,
1638 .remap_file_range = cifs_remap_file_range,
1639 .llseek = generic_file_llseek,
1640 .fsync = cifs_dir_fsync,
1641 };
1642
1643 static void
cifs_init_once(void * inode)1644 cifs_init_once(void *inode)
1645 {
1646 struct cifsInodeInfo *cifsi = inode;
1647
1648 inode_init_once(&cifsi->netfs.inode);
1649 init_rwsem(&cifsi->lock_sem);
1650 }
1651
1652 static int __init
cifs_init_inodecache(void)1653 cifs_init_inodecache(void)
1654 {
1655 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1656 sizeof(struct cifsInodeInfo),
1657 0, (SLAB_RECLAIM_ACCOUNT|
1658 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1659 cifs_init_once);
1660 if (cifs_inode_cachep == NULL)
1661 return -ENOMEM;
1662
1663 return 0;
1664 }
1665
1666 static void
cifs_destroy_inodecache(void)1667 cifs_destroy_inodecache(void)
1668 {
1669 /*
1670 * Make sure all delayed rcu free inodes are flushed before we
1671 * destroy cache.
1672 */
1673 rcu_barrier();
1674 kmem_cache_destroy(cifs_inode_cachep);
1675 }
1676
1677 static int
cifs_init_request_bufs(void)1678 cifs_init_request_bufs(void)
1679 {
1680 /*
1681 * SMB2 maximum header size is bigger than CIFS one - no problems to
1682 * allocate some more bytes for CIFS.
1683 */
1684 size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1685
1686 if (CIFSMaxBufSize < 8192) {
1687 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1688 Unicode path name has to fit in any SMB/CIFS path based frames */
1689 CIFSMaxBufSize = 8192;
1690 } else if (CIFSMaxBufSize > 1024*127) {
1691 CIFSMaxBufSize = 1024 * 127;
1692 } else {
1693 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1694 }
1695 /*
1696 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1697 CIFSMaxBufSize, CIFSMaxBufSize);
1698 */
1699 cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1700 CIFSMaxBufSize + max_hdr_size, 0,
1701 SLAB_HWCACHE_ALIGN, 0,
1702 CIFSMaxBufSize + max_hdr_size,
1703 NULL);
1704 if (cifs_req_cachep == NULL)
1705 return -ENOMEM;
1706
1707 if (cifs_min_rcv < 1)
1708 cifs_min_rcv = 1;
1709 else if (cifs_min_rcv > 64) {
1710 cifs_min_rcv = 64;
1711 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1712 }
1713
1714 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1715 cifs_req_cachep);
1716
1717 if (cifs_req_poolp == NULL) {
1718 kmem_cache_destroy(cifs_req_cachep);
1719 return -ENOMEM;
1720 }
1721 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1722 almost all handle based requests (but not write response, nor is it
1723 sufficient for path based requests). A smaller size would have
1724 been more efficient (compacting multiple slab items on one 4k page)
1725 for the case in which debug was on, but this larger size allows
1726 more SMBs to use small buffer alloc and is still much more
1727 efficient to alloc 1 per page off the slab compared to 17K (5page)
1728 alloc of large cifs buffers even when page debugging is on */
1729 cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1730 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1731 0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1732 if (cifs_sm_req_cachep == NULL) {
1733 mempool_destroy(cifs_req_poolp);
1734 kmem_cache_destroy(cifs_req_cachep);
1735 return -ENOMEM;
1736 }
1737
1738 if (cifs_min_small < 2)
1739 cifs_min_small = 2;
1740 else if (cifs_min_small > 256) {
1741 cifs_min_small = 256;
1742 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1743 }
1744
1745 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1746 cifs_sm_req_cachep);
1747
1748 if (cifs_sm_req_poolp == NULL) {
1749 mempool_destroy(cifs_req_poolp);
1750 kmem_cache_destroy(cifs_req_cachep);
1751 kmem_cache_destroy(cifs_sm_req_cachep);
1752 return -ENOMEM;
1753 }
1754
1755 return 0;
1756 }
1757
1758 static void
cifs_destroy_request_bufs(void)1759 cifs_destroy_request_bufs(void)
1760 {
1761 mempool_destroy(cifs_req_poolp);
1762 kmem_cache_destroy(cifs_req_cachep);
1763 mempool_destroy(cifs_sm_req_poolp);
1764 kmem_cache_destroy(cifs_sm_req_cachep);
1765 }
1766
init_mids(void)1767 static int init_mids(void)
1768 {
1769 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1770 sizeof(struct mid_q_entry), 0,
1771 SLAB_HWCACHE_ALIGN, NULL);
1772 if (cifs_mid_cachep == NULL)
1773 return -ENOMEM;
1774
1775 /* 3 is a reasonable minimum number of simultaneous operations */
1776 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1777 if (cifs_mid_poolp == NULL) {
1778 kmem_cache_destroy(cifs_mid_cachep);
1779 return -ENOMEM;
1780 }
1781
1782 return 0;
1783 }
1784
destroy_mids(void)1785 static void destroy_mids(void)
1786 {
1787 mempool_destroy(cifs_mid_poolp);
1788 kmem_cache_destroy(cifs_mid_cachep);
1789 }
1790
1791 static int __init
init_cifs(void)1792 init_cifs(void)
1793 {
1794 int rc = 0;
1795 cifs_proc_init();
1796 INIT_LIST_HEAD(&cifs_tcp_ses_list);
1797 /*
1798 * Initialize Global counters
1799 */
1800 atomic_set(&sesInfoAllocCount, 0);
1801 atomic_set(&tconInfoAllocCount, 0);
1802 atomic_set(&tcpSesNextId, 0);
1803 atomic_set(&tcpSesAllocCount, 0);
1804 atomic_set(&tcpSesReconnectCount, 0);
1805 atomic_set(&tconInfoReconnectCount, 0);
1806
1807 atomic_set(&buf_alloc_count, 0);
1808 atomic_set(&small_buf_alloc_count, 0);
1809 #ifdef CONFIG_CIFS_STATS2
1810 atomic_set(&total_buf_alloc_count, 0);
1811 atomic_set(&total_small_buf_alloc_count, 0);
1812 if (slow_rsp_threshold < 1)
1813 cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1814 else if (slow_rsp_threshold > 32767)
1815 cifs_dbg(VFS,
1816 "slow response threshold set higher than recommended (0 to 32767)\n");
1817 #endif /* CONFIG_CIFS_STATS2 */
1818
1819 atomic_set(&mid_count, 0);
1820 GlobalCurrentXid = 0;
1821 GlobalTotalActiveXid = 0;
1822 GlobalMaxActiveXid = 0;
1823 spin_lock_init(&cifs_tcp_ses_lock);
1824 spin_lock_init(&GlobalMid_Lock);
1825
1826 cifs_lock_secret = get_random_u32();
1827
1828 if (cifs_max_pending < 2) {
1829 cifs_max_pending = 2;
1830 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1831 } else if (cifs_max_pending > CIFS_MAX_REQ) {
1832 cifs_max_pending = CIFS_MAX_REQ;
1833 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1834 CIFS_MAX_REQ);
1835 }
1836
1837 /* Limit max to about 18 hours, and setting to zero disables directory entry caching */
1838 if (dir_cache_timeout > 65000) {
1839 dir_cache_timeout = 65000;
1840 cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
1841 }
1842
1843 cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1844 if (!cifsiod_wq) {
1845 rc = -ENOMEM;
1846 goto out_clean_proc;
1847 }
1848
1849 /*
1850 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1851 * so that we don't launch too many worker threads but
1852 * Documentation/core-api/workqueue.rst recommends setting it to 0
1853 */
1854
1855 /* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1856 decrypt_wq = alloc_workqueue("smb3decryptd",
1857 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1858 if (!decrypt_wq) {
1859 rc = -ENOMEM;
1860 goto out_destroy_cifsiod_wq;
1861 }
1862
1863 fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1864 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1865 if (!fileinfo_put_wq) {
1866 rc = -ENOMEM;
1867 goto out_destroy_decrypt_wq;
1868 }
1869
1870 cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1871 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1872 if (!cifsoplockd_wq) {
1873 rc = -ENOMEM;
1874 goto out_destroy_fileinfo_put_wq;
1875 }
1876
1877 deferredclose_wq = alloc_workqueue("deferredclose",
1878 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1879 if (!deferredclose_wq) {
1880 rc = -ENOMEM;
1881 goto out_destroy_cifsoplockd_wq;
1882 }
1883
1884 serverclose_wq = alloc_workqueue("serverclose",
1885 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1886 if (!serverclose_wq) {
1887 rc = -ENOMEM;
1888 goto out_destroy_deferredclose_wq;
1889 }
1890
1891 rc = cifs_init_inodecache();
1892 if (rc)
1893 goto out_destroy_serverclose_wq;
1894
1895 rc = init_mids();
1896 if (rc)
1897 goto out_destroy_inodecache;
1898
1899 rc = cifs_init_request_bufs();
1900 if (rc)
1901 goto out_destroy_mids;
1902
1903 #ifdef CONFIG_CIFS_DFS_UPCALL
1904 rc = dfs_cache_init();
1905 if (rc)
1906 goto out_destroy_request_bufs;
1907 #endif /* CONFIG_CIFS_DFS_UPCALL */
1908 #ifdef CONFIG_CIFS_UPCALL
1909 rc = init_cifs_spnego();
1910 if (rc)
1911 goto out_destroy_dfs_cache;
1912 #endif /* CONFIG_CIFS_UPCALL */
1913 #ifdef CONFIG_CIFS_SWN_UPCALL
1914 rc = cifs_genl_init();
1915 if (rc)
1916 goto out_register_key_type;
1917 #endif /* CONFIG_CIFS_SWN_UPCALL */
1918
1919 rc = init_cifs_idmap();
1920 if (rc)
1921 goto out_cifs_swn_init;
1922
1923 rc = register_filesystem(&cifs_fs_type);
1924 if (rc)
1925 goto out_init_cifs_idmap;
1926
1927 rc = register_filesystem(&smb3_fs_type);
1928 if (rc) {
1929 unregister_filesystem(&cifs_fs_type);
1930 goto out_init_cifs_idmap;
1931 }
1932
1933 return 0;
1934
1935 out_init_cifs_idmap:
1936 exit_cifs_idmap();
1937 out_cifs_swn_init:
1938 #ifdef CONFIG_CIFS_SWN_UPCALL
1939 cifs_genl_exit();
1940 out_register_key_type:
1941 #endif
1942 #ifdef CONFIG_CIFS_UPCALL
1943 exit_cifs_spnego();
1944 out_destroy_dfs_cache:
1945 #endif
1946 #ifdef CONFIG_CIFS_DFS_UPCALL
1947 dfs_cache_destroy();
1948 out_destroy_request_bufs:
1949 #endif
1950 cifs_destroy_request_bufs();
1951 out_destroy_mids:
1952 destroy_mids();
1953 out_destroy_inodecache:
1954 cifs_destroy_inodecache();
1955 out_destroy_serverclose_wq:
1956 destroy_workqueue(serverclose_wq);
1957 out_destroy_deferredclose_wq:
1958 destroy_workqueue(deferredclose_wq);
1959 out_destroy_cifsoplockd_wq:
1960 destroy_workqueue(cifsoplockd_wq);
1961 out_destroy_fileinfo_put_wq:
1962 destroy_workqueue(fileinfo_put_wq);
1963 out_destroy_decrypt_wq:
1964 destroy_workqueue(decrypt_wq);
1965 out_destroy_cifsiod_wq:
1966 destroy_workqueue(cifsiod_wq);
1967 out_clean_proc:
1968 cifs_proc_clean();
1969 return rc;
1970 }
1971
1972 static void __exit
exit_cifs(void)1973 exit_cifs(void)
1974 {
1975 cifs_dbg(NOISY, "exit_smb3\n");
1976 unregister_filesystem(&cifs_fs_type);
1977 unregister_filesystem(&smb3_fs_type);
1978 cifs_release_automount_timer();
1979 exit_cifs_idmap();
1980 #ifdef CONFIG_CIFS_SWN_UPCALL
1981 cifs_genl_exit();
1982 #endif
1983 #ifdef CONFIG_CIFS_UPCALL
1984 exit_cifs_spnego();
1985 #endif
1986 #ifdef CONFIG_CIFS_DFS_UPCALL
1987 dfs_cache_destroy();
1988 #endif
1989 cifs_destroy_request_bufs();
1990 destroy_mids();
1991 cifs_destroy_inodecache();
1992 destroy_workqueue(deferredclose_wq);
1993 destroy_workqueue(cifsoplockd_wq);
1994 destroy_workqueue(decrypt_wq);
1995 destroy_workqueue(fileinfo_put_wq);
1996 destroy_workqueue(serverclose_wq);
1997 destroy_workqueue(cifsiod_wq);
1998 cifs_proc_clean();
1999 }
2000
2001 MODULE_AUTHOR("Steve French");
2002 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
2003 MODULE_IMPORT_NS(ANDROID_GKI_VFS_EXPORT_ONLY);
2004 MODULE_DESCRIPTION
2005 ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
2006 "also older servers complying with the SNIA CIFS Specification)");
2007 MODULE_VERSION(CIFS_VERSION);
2008 MODULE_SOFTDEP("ecb");
2009 MODULE_SOFTDEP("hmac");
2010 MODULE_SOFTDEP("md5");
2011 MODULE_SOFTDEP("nls");
2012 MODULE_SOFTDEP("aes");
2013 MODULE_SOFTDEP("cmac");
2014 MODULE_SOFTDEP("sha256");
2015 MODULE_SOFTDEP("sha512");
2016 MODULE_SOFTDEP("aead2");
2017 MODULE_SOFTDEP("ccm");
2018 MODULE_SOFTDEP("gcm");
2019 module_init(init_cifs)
2020 module_exit(exit_cifs)
2021