1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6
7 #include "xfs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sb.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_btree.h"
16 #include "xfs_bmap.h"
17 #include "xfs_alloc.h"
18 #include "xfs_fsops.h"
19 #include "xfs_trans.h"
20 #include "xfs_buf_item.h"
21 #include "xfs_log.h"
22 #include "xfs_log_priv.h"
23 #include "xfs_dir2.h"
24 #include "xfs_extfree_item.h"
25 #include "xfs_mru_cache.h"
26 #include "xfs_inode_item.h"
27 #include "xfs_icache.h"
28 #include "xfs_trace.h"
29 #include "xfs_icreate_item.h"
30 #include "xfs_filestream.h"
31 #include "xfs_quota.h"
32 #include "xfs_sysfs.h"
33 #include "xfs_ondisk.h"
34 #include "xfs_rmap_item.h"
35 #include "xfs_refcount_item.h"
36 #include "xfs_bmap_item.h"
37 #include "xfs_reflink.h"
38 #include "xfs_pwork.h"
39 #include "xfs_ag.h"
40 #include "xfs_defer.h"
41 #include "xfs_attr_item.h"
42 #include "xfs_xattr.h"
43 #include "xfs_iunlink_item.h"
44 #include "xfs_dahash_test.h"
45 #include "xfs_rtbitmap.h"
46 #include "xfs_exchmaps_item.h"
47 #include "xfs_parent.h"
48 #include "scrub/stats.h"
49 #include "scrub/rcbag_btree.h"
50
51 #include <linux/magic.h>
52 #include <linux/fs_context.h>
53 #include <linux/fs_parser.h>
54
55 static const struct super_operations xfs_super_operations;
56
57 static struct dentry *xfs_debugfs; /* top-level xfs debugfs dir */
58 static struct kset *xfs_kset; /* top-level xfs sysfs dir */
59 #ifdef DEBUG
60 static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */
61 #endif
62
63 enum xfs_dax_mode {
64 XFS_DAX_INODE = 0,
65 XFS_DAX_ALWAYS = 1,
66 XFS_DAX_NEVER = 2,
67 };
68
69 static void
xfs_mount_set_dax_mode(struct xfs_mount * mp,enum xfs_dax_mode mode)70 xfs_mount_set_dax_mode(
71 struct xfs_mount *mp,
72 enum xfs_dax_mode mode)
73 {
74 switch (mode) {
75 case XFS_DAX_INODE:
76 mp->m_features &= ~(XFS_FEAT_DAX_ALWAYS | XFS_FEAT_DAX_NEVER);
77 break;
78 case XFS_DAX_ALWAYS:
79 mp->m_features |= XFS_FEAT_DAX_ALWAYS;
80 mp->m_features &= ~XFS_FEAT_DAX_NEVER;
81 break;
82 case XFS_DAX_NEVER:
83 mp->m_features |= XFS_FEAT_DAX_NEVER;
84 mp->m_features &= ~XFS_FEAT_DAX_ALWAYS;
85 break;
86 }
87 }
88
89 static const struct constant_table dax_param_enums[] = {
90 {"inode", XFS_DAX_INODE },
91 {"always", XFS_DAX_ALWAYS },
92 {"never", XFS_DAX_NEVER },
93 {}
94 };
95
96 /*
97 * Table driven mount option parser.
98 */
99 enum {
100 Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
101 Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
102 Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
103 Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
104 Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
105 Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
106 Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
107 Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
108 Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum,
109 };
110
111 static const struct fs_parameter_spec xfs_fs_parameters[] = {
112 fsparam_u32("logbufs", Opt_logbufs),
113 fsparam_string("logbsize", Opt_logbsize),
114 fsparam_string("logdev", Opt_logdev),
115 fsparam_string("rtdev", Opt_rtdev),
116 fsparam_flag("wsync", Opt_wsync),
117 fsparam_flag("noalign", Opt_noalign),
118 fsparam_flag("swalloc", Opt_swalloc),
119 fsparam_u32("sunit", Opt_sunit),
120 fsparam_u32("swidth", Opt_swidth),
121 fsparam_flag("nouuid", Opt_nouuid),
122 fsparam_flag("grpid", Opt_grpid),
123 fsparam_flag("nogrpid", Opt_nogrpid),
124 fsparam_flag("bsdgroups", Opt_bsdgroups),
125 fsparam_flag("sysvgroups", Opt_sysvgroups),
126 fsparam_string("allocsize", Opt_allocsize),
127 fsparam_flag("norecovery", Opt_norecovery),
128 fsparam_flag("inode64", Opt_inode64),
129 fsparam_flag("inode32", Opt_inode32),
130 fsparam_flag("ikeep", Opt_ikeep),
131 fsparam_flag("noikeep", Opt_noikeep),
132 fsparam_flag("largeio", Opt_largeio),
133 fsparam_flag("nolargeio", Opt_nolargeio),
134 fsparam_flag("attr2", Opt_attr2),
135 fsparam_flag("noattr2", Opt_noattr2),
136 fsparam_flag("filestreams", Opt_filestreams),
137 fsparam_flag("quota", Opt_quota),
138 fsparam_flag("noquota", Opt_noquota),
139 fsparam_flag("usrquota", Opt_usrquota),
140 fsparam_flag("grpquota", Opt_grpquota),
141 fsparam_flag("prjquota", Opt_prjquota),
142 fsparam_flag("uquota", Opt_uquota),
143 fsparam_flag("gquota", Opt_gquota),
144 fsparam_flag("pquota", Opt_pquota),
145 fsparam_flag("uqnoenforce", Opt_uqnoenforce),
146 fsparam_flag("gqnoenforce", Opt_gqnoenforce),
147 fsparam_flag("pqnoenforce", Opt_pqnoenforce),
148 fsparam_flag("qnoenforce", Opt_qnoenforce),
149 fsparam_flag("discard", Opt_discard),
150 fsparam_flag("nodiscard", Opt_nodiscard),
151 fsparam_flag("dax", Opt_dax),
152 fsparam_enum("dax", Opt_dax_enum, dax_param_enums),
153 {}
154 };
155
156 struct proc_xfs_info {
157 uint64_t flag;
158 char *str;
159 };
160
161 static int
xfs_fs_show_options(struct seq_file * m,struct dentry * root)162 xfs_fs_show_options(
163 struct seq_file *m,
164 struct dentry *root)
165 {
166 static struct proc_xfs_info xfs_info_set[] = {
167 /* the few simple ones we can get from the mount struct */
168 { XFS_FEAT_IKEEP, ",ikeep" },
169 { XFS_FEAT_WSYNC, ",wsync" },
170 { XFS_FEAT_NOALIGN, ",noalign" },
171 { XFS_FEAT_SWALLOC, ",swalloc" },
172 { XFS_FEAT_NOUUID, ",nouuid" },
173 { XFS_FEAT_NORECOVERY, ",norecovery" },
174 { XFS_FEAT_ATTR2, ",attr2" },
175 { XFS_FEAT_FILESTREAMS, ",filestreams" },
176 { XFS_FEAT_GRPID, ",grpid" },
177 { XFS_FEAT_DISCARD, ",discard" },
178 { XFS_FEAT_LARGE_IOSIZE, ",largeio" },
179 { XFS_FEAT_DAX_ALWAYS, ",dax=always" },
180 { XFS_FEAT_DAX_NEVER, ",dax=never" },
181 { 0, NULL }
182 };
183 struct xfs_mount *mp = XFS_M(root->d_sb);
184 struct proc_xfs_info *xfs_infop;
185
186 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
187 if (mp->m_features & xfs_infop->flag)
188 seq_puts(m, xfs_infop->str);
189 }
190
191 seq_printf(m, ",inode%d", xfs_has_small_inums(mp) ? 32 : 64);
192
193 if (xfs_has_allocsize(mp))
194 seq_printf(m, ",allocsize=%dk",
195 (1 << mp->m_allocsize_log) >> 10);
196
197 if (mp->m_logbufs > 0)
198 seq_printf(m, ",logbufs=%d", mp->m_logbufs);
199 if (mp->m_logbsize > 0)
200 seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
201
202 if (mp->m_logname)
203 seq_show_option(m, "logdev", mp->m_logname);
204 if (mp->m_rtname)
205 seq_show_option(m, "rtdev", mp->m_rtname);
206
207 if (mp->m_dalign > 0)
208 seq_printf(m, ",sunit=%d",
209 (int)XFS_FSB_TO_BB(mp, mp->m_dalign));
210 if (mp->m_swidth > 0)
211 seq_printf(m, ",swidth=%d",
212 (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
213
214 if (mp->m_qflags & XFS_UQUOTA_ENFD)
215 seq_puts(m, ",usrquota");
216 else if (mp->m_qflags & XFS_UQUOTA_ACCT)
217 seq_puts(m, ",uqnoenforce");
218
219 if (mp->m_qflags & XFS_PQUOTA_ENFD)
220 seq_puts(m, ",prjquota");
221 else if (mp->m_qflags & XFS_PQUOTA_ACCT)
222 seq_puts(m, ",pqnoenforce");
223
224 if (mp->m_qflags & XFS_GQUOTA_ENFD)
225 seq_puts(m, ",grpquota");
226 else if (mp->m_qflags & XFS_GQUOTA_ACCT)
227 seq_puts(m, ",gqnoenforce");
228
229 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
230 seq_puts(m, ",noquota");
231
232 return 0;
233 }
234
235 static bool
xfs_set_inode_alloc_perag(struct xfs_perag * pag,xfs_ino_t ino,xfs_agnumber_t max_metadata)236 xfs_set_inode_alloc_perag(
237 struct xfs_perag *pag,
238 xfs_ino_t ino,
239 xfs_agnumber_t max_metadata)
240 {
241 if (!xfs_is_inode32(pag->pag_mount)) {
242 set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
243 clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
244 return false;
245 }
246
247 if (ino > XFS_MAXINUMBER_32) {
248 clear_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
249 clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
250 return false;
251 }
252
253 set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
254 if (pag->pag_agno < max_metadata)
255 set_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
256 else
257 clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
258 return true;
259 }
260
261 /*
262 * Set parameters for inode allocation heuristics, taking into account
263 * filesystem size and inode32/inode64 mount options; i.e. specifically
264 * whether or not XFS_FEAT_SMALL_INUMS is set.
265 *
266 * Inode allocation patterns are altered only if inode32 is requested
267 * (XFS_FEAT_SMALL_INUMS), and the filesystem is sufficiently large.
268 * If altered, XFS_OPSTATE_INODE32 is set as well.
269 *
270 * An agcount independent of that in the mount structure is provided
271 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
272 * to the potentially higher ag count.
273 *
274 * Returns the maximum AG index which may contain inodes.
275 */
276 xfs_agnumber_t
xfs_set_inode_alloc(struct xfs_mount * mp,xfs_agnumber_t agcount)277 xfs_set_inode_alloc(
278 struct xfs_mount *mp,
279 xfs_agnumber_t agcount)
280 {
281 xfs_agnumber_t index;
282 xfs_agnumber_t maxagi = 0;
283 xfs_sb_t *sbp = &mp->m_sb;
284 xfs_agnumber_t max_metadata;
285 xfs_agino_t agino;
286 xfs_ino_t ino;
287
288 /*
289 * Calculate how much should be reserved for inodes to meet
290 * the max inode percentage. Used only for inode32.
291 */
292 if (M_IGEO(mp)->maxicount) {
293 uint64_t icount;
294
295 icount = sbp->sb_dblocks * sbp->sb_imax_pct;
296 do_div(icount, 100);
297 icount += sbp->sb_agblocks - 1;
298 do_div(icount, sbp->sb_agblocks);
299 max_metadata = icount;
300 } else {
301 max_metadata = agcount;
302 }
303
304 /* Get the last possible inode in the filesystem */
305 agino = XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
306 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
307
308 /*
309 * If user asked for no more than 32-bit inodes, and the fs is
310 * sufficiently large, set XFS_OPSTATE_INODE32 if we must alter
311 * the allocator to accommodate the request.
312 */
313 if (xfs_has_small_inums(mp) && ino > XFS_MAXINUMBER_32)
314 xfs_set_inode32(mp);
315 else
316 xfs_clear_inode32(mp);
317
318 for (index = 0; index < agcount; index++) {
319 struct xfs_perag *pag;
320
321 ino = XFS_AGINO_TO_INO(mp, index, agino);
322
323 pag = xfs_perag_get(mp, index);
324 if (xfs_set_inode_alloc_perag(pag, ino, max_metadata))
325 maxagi++;
326 xfs_perag_put(pag);
327 }
328
329 return xfs_is_inode32(mp) ? maxagi : agcount;
330 }
331
332 static int
xfs_setup_dax_always(struct xfs_mount * mp)333 xfs_setup_dax_always(
334 struct xfs_mount *mp)
335 {
336 if (!mp->m_ddev_targp->bt_daxdev &&
337 (!mp->m_rtdev_targp || !mp->m_rtdev_targp->bt_daxdev)) {
338 xfs_alert(mp,
339 "DAX unsupported by block device. Turning off DAX.");
340 goto disable_dax;
341 }
342
343 if (mp->m_super->s_blocksize != PAGE_SIZE) {
344 xfs_alert(mp,
345 "DAX not supported for blocksize. Turning off DAX.");
346 goto disable_dax;
347 }
348
349 if (xfs_has_reflink(mp) &&
350 bdev_is_partition(mp->m_ddev_targp->bt_bdev)) {
351 xfs_alert(mp,
352 "DAX and reflink cannot work with multi-partitions!");
353 return -EINVAL;
354 }
355
356 return 0;
357
358 disable_dax:
359 xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER);
360 return 0;
361 }
362
363 STATIC int
xfs_blkdev_get(xfs_mount_t * mp,const char * name,struct file ** bdev_filep)364 xfs_blkdev_get(
365 xfs_mount_t *mp,
366 const char *name,
367 struct file **bdev_filep)
368 {
369 int error = 0;
370
371 *bdev_filep = bdev_file_open_by_path(name,
372 BLK_OPEN_READ | BLK_OPEN_WRITE | BLK_OPEN_RESTRICT_WRITES,
373 mp->m_super, &fs_holder_ops);
374 if (IS_ERR(*bdev_filep)) {
375 error = PTR_ERR(*bdev_filep);
376 *bdev_filep = NULL;
377 xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
378 }
379
380 return error;
381 }
382
383 STATIC void
xfs_shutdown_devices(struct xfs_mount * mp)384 xfs_shutdown_devices(
385 struct xfs_mount *mp)
386 {
387 /*
388 * Udev is triggered whenever anyone closes a block device or unmounts
389 * a file systemm on a block device.
390 * The default udev rules invoke blkid to read the fs super and create
391 * symlinks to the bdev under /dev/disk. For this, it uses buffered
392 * reads through the page cache.
393 *
394 * xfs_db also uses buffered reads to examine metadata. There is no
395 * coordination between xfs_db and udev, which means that they can run
396 * concurrently. Note there is no coordination between the kernel and
397 * blkid either.
398 *
399 * On a system with 64k pages, the page cache can cache the superblock
400 * and the root inode (and hence the root directory) with the same 64k
401 * page. If udev spawns blkid after the mkfs and the system is busy
402 * enough that it is still running when xfs_db starts up, they'll both
403 * read from the same page in the pagecache.
404 *
405 * The unmount writes updated inode metadata to disk directly. The XFS
406 * buffer cache does not use the bdev pagecache, so it needs to
407 * invalidate that pagecache on unmount. If the above scenario occurs,
408 * the pagecache no longer reflects what's on disk, xfs_db reads the
409 * stale metadata, and fails to find /a. Most of the time this succeeds
410 * because closing a bdev invalidates the page cache, but when processes
411 * race, everyone loses.
412 */
413 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
414 blkdev_issue_flush(mp->m_logdev_targp->bt_bdev);
415 invalidate_bdev(mp->m_logdev_targp->bt_bdev);
416 }
417 if (mp->m_rtdev_targp) {
418 blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev);
419 invalidate_bdev(mp->m_rtdev_targp->bt_bdev);
420 }
421 blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
422 invalidate_bdev(mp->m_ddev_targp->bt_bdev);
423 }
424
425 /*
426 * The file system configurations are:
427 * (1) device (partition) with data and internal log
428 * (2) logical volume with data and log subvolumes.
429 * (3) logical volume with data, log, and realtime subvolumes.
430 *
431 * We only have to handle opening the log and realtime volumes here if
432 * they are present. The data subvolume has already been opened by
433 * get_sb_bdev() and is stored in sb->s_bdev.
434 */
435 STATIC int
xfs_open_devices(struct xfs_mount * mp)436 xfs_open_devices(
437 struct xfs_mount *mp)
438 {
439 struct super_block *sb = mp->m_super;
440 struct block_device *ddev = sb->s_bdev;
441 struct file *logdev_file = NULL, *rtdev_file = NULL;
442 int error;
443
444 /*
445 * Open real time and log devices - order is important.
446 */
447 if (mp->m_logname) {
448 error = xfs_blkdev_get(mp, mp->m_logname, &logdev_file);
449 if (error)
450 return error;
451 }
452
453 if (mp->m_rtname) {
454 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev_file);
455 if (error)
456 goto out_close_logdev;
457
458 if (file_bdev(rtdev_file) == ddev ||
459 (logdev_file &&
460 file_bdev(rtdev_file) == file_bdev(logdev_file))) {
461 xfs_warn(mp,
462 "Cannot mount filesystem with identical rtdev and ddev/logdev.");
463 error = -EINVAL;
464 goto out_close_rtdev;
465 }
466 }
467
468 /*
469 * Setup xfs_mount buffer target pointers
470 */
471 error = -ENOMEM;
472 mp->m_ddev_targp = xfs_alloc_buftarg(mp, sb->s_bdev_file);
473 if (!mp->m_ddev_targp)
474 goto out_close_rtdev;
475
476 if (rtdev_file) {
477 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev_file);
478 if (!mp->m_rtdev_targp)
479 goto out_free_ddev_targ;
480 }
481
482 if (logdev_file && file_bdev(logdev_file) != ddev) {
483 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev_file);
484 if (!mp->m_logdev_targp)
485 goto out_free_rtdev_targ;
486 } else {
487 mp->m_logdev_targp = mp->m_ddev_targp;
488 /* Handle won't be used, drop it */
489 if (logdev_file)
490 bdev_fput(logdev_file);
491 }
492
493 return 0;
494
495 out_free_rtdev_targ:
496 if (mp->m_rtdev_targp)
497 xfs_free_buftarg(mp->m_rtdev_targp);
498 out_free_ddev_targ:
499 xfs_free_buftarg(mp->m_ddev_targp);
500 out_close_rtdev:
501 if (rtdev_file)
502 bdev_fput(rtdev_file);
503 out_close_logdev:
504 if (logdev_file)
505 bdev_fput(logdev_file);
506 return error;
507 }
508
509 /*
510 * Setup xfs_mount buffer target pointers based on superblock
511 */
512 STATIC int
xfs_setup_devices(struct xfs_mount * mp)513 xfs_setup_devices(
514 struct xfs_mount *mp)
515 {
516 int error;
517
518 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
519 if (error)
520 return error;
521
522 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
523 unsigned int log_sector_size = BBSIZE;
524
525 if (xfs_has_sector(mp))
526 log_sector_size = mp->m_sb.sb_logsectsize;
527 error = xfs_setsize_buftarg(mp->m_logdev_targp,
528 log_sector_size);
529 if (error)
530 return error;
531 }
532 if (mp->m_rtdev_targp) {
533 error = xfs_setsize_buftarg(mp->m_rtdev_targp,
534 mp->m_sb.sb_sectsize);
535 if (error)
536 return error;
537 }
538
539 return 0;
540 }
541
542 STATIC int
xfs_init_mount_workqueues(struct xfs_mount * mp)543 xfs_init_mount_workqueues(
544 struct xfs_mount *mp)
545 {
546 mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
547 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
548 1, mp->m_super->s_id);
549 if (!mp->m_buf_workqueue)
550 goto out;
551
552 mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
553 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
554 0, mp->m_super->s_id);
555 if (!mp->m_unwritten_workqueue)
556 goto out_destroy_buf;
557
558 mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
559 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
560 0, mp->m_super->s_id);
561 if (!mp->m_reclaim_workqueue)
562 goto out_destroy_unwritten;
563
564 mp->m_blockgc_wq = alloc_workqueue("xfs-blockgc/%s",
565 XFS_WQFLAGS(WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM),
566 0, mp->m_super->s_id);
567 if (!mp->m_blockgc_wq)
568 goto out_destroy_reclaim;
569
570 mp->m_inodegc_wq = alloc_workqueue("xfs-inodegc/%s",
571 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
572 1, mp->m_super->s_id);
573 if (!mp->m_inodegc_wq)
574 goto out_destroy_blockgc;
575
576 mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s",
577 XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id);
578 if (!mp->m_sync_workqueue)
579 goto out_destroy_inodegc;
580
581 return 0;
582
583 out_destroy_inodegc:
584 destroy_workqueue(mp->m_inodegc_wq);
585 out_destroy_blockgc:
586 destroy_workqueue(mp->m_blockgc_wq);
587 out_destroy_reclaim:
588 destroy_workqueue(mp->m_reclaim_workqueue);
589 out_destroy_unwritten:
590 destroy_workqueue(mp->m_unwritten_workqueue);
591 out_destroy_buf:
592 destroy_workqueue(mp->m_buf_workqueue);
593 out:
594 return -ENOMEM;
595 }
596
597 STATIC void
xfs_destroy_mount_workqueues(struct xfs_mount * mp)598 xfs_destroy_mount_workqueues(
599 struct xfs_mount *mp)
600 {
601 destroy_workqueue(mp->m_sync_workqueue);
602 destroy_workqueue(mp->m_blockgc_wq);
603 destroy_workqueue(mp->m_inodegc_wq);
604 destroy_workqueue(mp->m_reclaim_workqueue);
605 destroy_workqueue(mp->m_unwritten_workqueue);
606 destroy_workqueue(mp->m_buf_workqueue);
607 }
608
609 static void
xfs_flush_inodes_worker(struct work_struct * work)610 xfs_flush_inodes_worker(
611 struct work_struct *work)
612 {
613 struct xfs_mount *mp = container_of(work, struct xfs_mount,
614 m_flush_inodes_work);
615 struct super_block *sb = mp->m_super;
616
617 if (down_read_trylock(&sb->s_umount)) {
618 sync_inodes_sb(sb);
619 up_read(&sb->s_umount);
620 }
621 }
622
623 /*
624 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
625 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
626 * for IO to complete so that we effectively throttle multiple callers to the
627 * rate at which IO is completing.
628 */
629 void
xfs_flush_inodes(struct xfs_mount * mp)630 xfs_flush_inodes(
631 struct xfs_mount *mp)
632 {
633 /*
634 * If flush_work() returns true then that means we waited for a flush
635 * which was already in progress. Don't bother running another scan.
636 */
637 if (flush_work(&mp->m_flush_inodes_work))
638 return;
639
640 queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work);
641 flush_work(&mp->m_flush_inodes_work);
642 }
643
644 /* Catch misguided souls that try to use this interface on XFS */
645 STATIC struct inode *
xfs_fs_alloc_inode(struct super_block * sb)646 xfs_fs_alloc_inode(
647 struct super_block *sb)
648 {
649 BUG();
650 return NULL;
651 }
652
653 /*
654 * Now that the generic code is guaranteed not to be accessing
655 * the linux inode, we can inactivate and reclaim the inode.
656 */
657 STATIC void
xfs_fs_destroy_inode(struct inode * inode)658 xfs_fs_destroy_inode(
659 struct inode *inode)
660 {
661 struct xfs_inode *ip = XFS_I(inode);
662
663 trace_xfs_destroy_inode(ip);
664
665 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
666 XFS_STATS_INC(ip->i_mount, vn_rele);
667 XFS_STATS_INC(ip->i_mount, vn_remove);
668 xfs_inode_mark_reclaimable(ip);
669 }
670
671 static void
xfs_fs_dirty_inode(struct inode * inode,int flags)672 xfs_fs_dirty_inode(
673 struct inode *inode,
674 int flags)
675 {
676 struct xfs_inode *ip = XFS_I(inode);
677 struct xfs_mount *mp = ip->i_mount;
678 struct xfs_trans *tp;
679
680 if (!(inode->i_sb->s_flags & SB_LAZYTIME))
681 return;
682
683 /*
684 * Only do the timestamp update if the inode is dirty (I_DIRTY_SYNC)
685 * and has dirty timestamp (I_DIRTY_TIME). I_DIRTY_TIME can be passed
686 * in flags possibly together with I_DIRTY_SYNC.
687 */
688 if ((flags & ~I_DIRTY_TIME) != I_DIRTY_SYNC || !(flags & I_DIRTY_TIME))
689 return;
690
691 if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
692 return;
693 xfs_ilock(ip, XFS_ILOCK_EXCL);
694 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
695 xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
696 xfs_trans_commit(tp);
697 }
698
699 /*
700 * Slab object creation initialisation for the XFS inode.
701 * This covers only the idempotent fields in the XFS inode;
702 * all other fields need to be initialised on allocation
703 * from the slab. This avoids the need to repeatedly initialise
704 * fields in the xfs inode that left in the initialise state
705 * when freeing the inode.
706 */
707 STATIC void
xfs_fs_inode_init_once(void * inode)708 xfs_fs_inode_init_once(
709 void *inode)
710 {
711 struct xfs_inode *ip = inode;
712
713 memset(ip, 0, sizeof(struct xfs_inode));
714
715 /* vfs inode */
716 inode_init_once(VFS_I(ip));
717
718 /* xfs inode */
719 atomic_set(&ip->i_pincount, 0);
720 spin_lock_init(&ip->i_flags_lock);
721 init_rwsem(&ip->i_lock);
722 }
723
724 /*
725 * We do an unlocked check for XFS_IDONTCACHE here because we are already
726 * serialised against cache hits here via the inode->i_lock and igrab() in
727 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
728 * racing with us, and it avoids needing to grab a spinlock here for every inode
729 * we drop the final reference on.
730 */
731 STATIC int
xfs_fs_drop_inode(struct inode * inode)732 xfs_fs_drop_inode(
733 struct inode *inode)
734 {
735 struct xfs_inode *ip = XFS_I(inode);
736
737 /*
738 * If this unlinked inode is in the middle of recovery, don't
739 * drop the inode just yet; log recovery will take care of
740 * that. See the comment for this inode flag.
741 */
742 if (ip->i_flags & XFS_IRECOVERY) {
743 ASSERT(xlog_recovery_needed(ip->i_mount->m_log));
744 return 0;
745 }
746
747 return generic_drop_inode(inode);
748 }
749
750 static void
xfs_mount_free(struct xfs_mount * mp)751 xfs_mount_free(
752 struct xfs_mount *mp)
753 {
754 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
755 xfs_free_buftarg(mp->m_logdev_targp);
756 if (mp->m_rtdev_targp)
757 xfs_free_buftarg(mp->m_rtdev_targp);
758 if (mp->m_ddev_targp)
759 xfs_free_buftarg(mp->m_ddev_targp);
760
761 debugfs_remove(mp->m_debugfs);
762 kfree(mp->m_rtname);
763 kfree(mp->m_logname);
764 kfree(mp);
765 }
766
767 STATIC int
xfs_fs_sync_fs(struct super_block * sb,int wait)768 xfs_fs_sync_fs(
769 struct super_block *sb,
770 int wait)
771 {
772 struct xfs_mount *mp = XFS_M(sb);
773 int error;
774
775 trace_xfs_fs_sync_fs(mp, __return_address);
776
777 /*
778 * Doing anything during the async pass would be counterproductive.
779 */
780 if (!wait)
781 return 0;
782
783 error = xfs_log_force(mp, XFS_LOG_SYNC);
784 if (error)
785 return error;
786
787 if (laptop_mode) {
788 /*
789 * The disk must be active because we're syncing.
790 * We schedule log work now (now that the disk is
791 * active) instead of later (when it might not be).
792 */
793 flush_delayed_work(&mp->m_log->l_work);
794 }
795
796 /*
797 * If we are called with page faults frozen out, it means we are about
798 * to freeze the transaction subsystem. Take the opportunity to shut
799 * down inodegc because once SB_FREEZE_FS is set it's too late to
800 * prevent inactivation races with freeze. The fs doesn't get called
801 * again by the freezing process until after SB_FREEZE_FS has been set,
802 * so it's now or never. Same logic applies to speculative allocation
803 * garbage collection.
804 *
805 * We don't care if this is a normal syncfs call that does this or
806 * freeze that does this - we can run this multiple times without issue
807 * and we won't race with a restart because a restart can only occur
808 * when the state is either SB_FREEZE_FS or SB_FREEZE_COMPLETE.
809 */
810 if (sb->s_writers.frozen == SB_FREEZE_PAGEFAULT) {
811 xfs_inodegc_stop(mp);
812 xfs_blockgc_stop(mp);
813 }
814
815 return 0;
816 }
817
818 STATIC int
xfs_fs_statfs(struct dentry * dentry,struct kstatfs * statp)819 xfs_fs_statfs(
820 struct dentry *dentry,
821 struct kstatfs *statp)
822 {
823 struct xfs_mount *mp = XFS_M(dentry->d_sb);
824 xfs_sb_t *sbp = &mp->m_sb;
825 struct xfs_inode *ip = XFS_I(d_inode(dentry));
826 uint64_t fakeinos, id;
827 uint64_t icount;
828 uint64_t ifree;
829 uint64_t fdblocks;
830 xfs_extlen_t lsize;
831 int64_t ffree;
832
833 /*
834 * Expedite background inodegc but don't wait. We do not want to block
835 * here waiting hours for a billion extent file to be truncated.
836 */
837 xfs_inodegc_push(mp);
838
839 statp->f_type = XFS_SUPER_MAGIC;
840 statp->f_namelen = MAXNAMELEN - 1;
841
842 id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
843 statp->f_fsid = u64_to_fsid(id);
844
845 icount = percpu_counter_sum(&mp->m_icount);
846 ifree = percpu_counter_sum(&mp->m_ifree);
847 fdblocks = percpu_counter_sum(&mp->m_fdblocks);
848
849 spin_lock(&mp->m_sb_lock);
850 statp->f_bsize = sbp->sb_blocksize;
851 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
852 statp->f_blocks = sbp->sb_dblocks - lsize;
853 spin_unlock(&mp->m_sb_lock);
854
855 /* make sure statp->f_bfree does not underflow */
856 statp->f_bfree = max_t(int64_t, 0,
857 fdblocks - xfs_fdblocks_unavailable(mp));
858 statp->f_bavail = statp->f_bfree;
859
860 fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
861 statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
862 if (M_IGEO(mp)->maxicount)
863 statp->f_files = min_t(typeof(statp->f_files),
864 statp->f_files,
865 M_IGEO(mp)->maxicount);
866
867 /* If sb_icount overshot maxicount, report actual allocation */
868 statp->f_files = max_t(typeof(statp->f_files),
869 statp->f_files,
870 sbp->sb_icount);
871
872 /* make sure statp->f_ffree does not underflow */
873 ffree = statp->f_files - (icount - ifree);
874 statp->f_ffree = max_t(int64_t, ffree, 0);
875
876 if (XFS_IS_REALTIME_MOUNT(mp) &&
877 (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
878 s64 freertx;
879
880 statp->f_blocks = sbp->sb_rblocks;
881 freertx = percpu_counter_sum_positive(&mp->m_frextents);
882 statp->f_bavail = statp->f_bfree = xfs_rtx_to_rtb(mp, freertx);
883 }
884
885 if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
886 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
887 (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
888 xfs_qm_statvfs(ip, statp);
889
890 return 0;
891 }
892
893 STATIC void
xfs_save_resvblks(struct xfs_mount * mp)894 xfs_save_resvblks(struct xfs_mount *mp)
895 {
896 mp->m_resblks_save = mp->m_resblks;
897 xfs_reserve_blocks(mp, 0);
898 }
899
900 STATIC void
xfs_restore_resvblks(struct xfs_mount * mp)901 xfs_restore_resvblks(struct xfs_mount *mp)
902 {
903 uint64_t resblks;
904
905 if (mp->m_resblks_save) {
906 resblks = mp->m_resblks_save;
907 mp->m_resblks_save = 0;
908 } else
909 resblks = xfs_default_resblks(mp);
910
911 xfs_reserve_blocks(mp, resblks);
912 }
913
914 /*
915 * Second stage of a freeze. The data is already frozen so we only
916 * need to take care of the metadata. Once that's done sync the superblock
917 * to the log to dirty it in case of a crash while frozen. This ensures that we
918 * will recover the unlinked inode lists on the next mount.
919 */
920 STATIC int
xfs_fs_freeze(struct super_block * sb)921 xfs_fs_freeze(
922 struct super_block *sb)
923 {
924 struct xfs_mount *mp = XFS_M(sb);
925 unsigned int flags;
926 int ret;
927
928 /*
929 * The filesystem is now frozen far enough that memory reclaim
930 * cannot safely operate on the filesystem. Hence we need to
931 * set a GFP_NOFS context here to avoid recursion deadlocks.
932 */
933 flags = memalloc_nofs_save();
934 xfs_save_resvblks(mp);
935 ret = xfs_log_quiesce(mp);
936 memalloc_nofs_restore(flags);
937
938 /*
939 * For read-write filesystems, we need to restart the inodegc on error
940 * because we stopped it at SB_FREEZE_PAGEFAULT level and a thaw is not
941 * going to be run to restart it now. We are at SB_FREEZE_FS level
942 * here, so we can restart safely without racing with a stop in
943 * xfs_fs_sync_fs().
944 */
945 if (ret && !xfs_is_readonly(mp)) {
946 xfs_blockgc_start(mp);
947 xfs_inodegc_start(mp);
948 }
949
950 return ret;
951 }
952
953 STATIC int
xfs_fs_unfreeze(struct super_block * sb)954 xfs_fs_unfreeze(
955 struct super_block *sb)
956 {
957 struct xfs_mount *mp = XFS_M(sb);
958
959 xfs_restore_resvblks(mp);
960 xfs_log_work_queue(mp);
961
962 /*
963 * Don't reactivate the inodegc worker on a readonly filesystem because
964 * inodes are sent directly to reclaim. Don't reactivate the blockgc
965 * worker because there are no speculative preallocations on a readonly
966 * filesystem.
967 */
968 if (!xfs_is_readonly(mp)) {
969 xfs_blockgc_start(mp);
970 xfs_inodegc_start(mp);
971 }
972
973 return 0;
974 }
975
976 /*
977 * This function fills in xfs_mount_t fields based on mount args.
978 * Note: the superblock _has_ now been read in.
979 */
980 STATIC int
xfs_finish_flags(struct xfs_mount * mp)981 xfs_finish_flags(
982 struct xfs_mount *mp)
983 {
984 /* Fail a mount where the logbuf is smaller than the log stripe */
985 if (xfs_has_logv2(mp)) {
986 if (mp->m_logbsize <= 0 &&
987 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
988 mp->m_logbsize = mp->m_sb.sb_logsunit;
989 } else if (mp->m_logbsize > 0 &&
990 mp->m_logbsize < mp->m_sb.sb_logsunit) {
991 xfs_warn(mp,
992 "logbuf size must be greater than or equal to log stripe size");
993 return -EINVAL;
994 }
995 } else {
996 /* Fail a mount if the logbuf is larger than 32K */
997 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
998 xfs_warn(mp,
999 "logbuf size for version 1 logs must be 16K or 32K");
1000 return -EINVAL;
1001 }
1002 }
1003
1004 /*
1005 * V5 filesystems always use attr2 format for attributes.
1006 */
1007 if (xfs_has_crc(mp) && xfs_has_noattr2(mp)) {
1008 xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
1009 "attr2 is always enabled for V5 filesystems.");
1010 return -EINVAL;
1011 }
1012
1013 /*
1014 * prohibit r/w mounts of read-only filesystems
1015 */
1016 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !xfs_is_readonly(mp)) {
1017 xfs_warn(mp,
1018 "cannot mount a read-only filesystem as read-write");
1019 return -EROFS;
1020 }
1021
1022 if ((mp->m_qflags & XFS_GQUOTA_ACCT) &&
1023 (mp->m_qflags & XFS_PQUOTA_ACCT) &&
1024 !xfs_has_pquotino(mp)) {
1025 xfs_warn(mp,
1026 "Super block does not support project and group quota together");
1027 return -EINVAL;
1028 }
1029
1030 return 0;
1031 }
1032
1033 static int
xfs_init_percpu_counters(struct xfs_mount * mp)1034 xfs_init_percpu_counters(
1035 struct xfs_mount *mp)
1036 {
1037 int error;
1038
1039 error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
1040 if (error)
1041 return -ENOMEM;
1042
1043 error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
1044 if (error)
1045 goto free_icount;
1046
1047 error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
1048 if (error)
1049 goto free_ifree;
1050
1051 error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
1052 if (error)
1053 goto free_fdblocks;
1054
1055 error = percpu_counter_init(&mp->m_delalloc_rtextents, 0, GFP_KERNEL);
1056 if (error)
1057 goto free_delalloc;
1058
1059 error = percpu_counter_init(&mp->m_frextents, 0, GFP_KERNEL);
1060 if (error)
1061 goto free_delalloc_rt;
1062
1063 return 0;
1064
1065 free_delalloc_rt:
1066 percpu_counter_destroy(&mp->m_delalloc_rtextents);
1067 free_delalloc:
1068 percpu_counter_destroy(&mp->m_delalloc_blks);
1069 free_fdblocks:
1070 percpu_counter_destroy(&mp->m_fdblocks);
1071 free_ifree:
1072 percpu_counter_destroy(&mp->m_ifree);
1073 free_icount:
1074 percpu_counter_destroy(&mp->m_icount);
1075 return -ENOMEM;
1076 }
1077
1078 void
xfs_reinit_percpu_counters(struct xfs_mount * mp)1079 xfs_reinit_percpu_counters(
1080 struct xfs_mount *mp)
1081 {
1082 percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1083 percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1084 percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
1085 percpu_counter_set(&mp->m_frextents, mp->m_sb.sb_frextents);
1086 }
1087
1088 static void
xfs_destroy_percpu_counters(struct xfs_mount * mp)1089 xfs_destroy_percpu_counters(
1090 struct xfs_mount *mp)
1091 {
1092 percpu_counter_destroy(&mp->m_icount);
1093 percpu_counter_destroy(&mp->m_ifree);
1094 percpu_counter_destroy(&mp->m_fdblocks);
1095 ASSERT(xfs_is_shutdown(mp) ||
1096 percpu_counter_sum(&mp->m_delalloc_rtextents) == 0);
1097 percpu_counter_destroy(&mp->m_delalloc_rtextents);
1098 ASSERT(xfs_is_shutdown(mp) ||
1099 percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1100 percpu_counter_destroy(&mp->m_delalloc_blks);
1101 percpu_counter_destroy(&mp->m_frextents);
1102 }
1103
1104 static int
xfs_inodegc_init_percpu(struct xfs_mount * mp)1105 xfs_inodegc_init_percpu(
1106 struct xfs_mount *mp)
1107 {
1108 struct xfs_inodegc *gc;
1109 int cpu;
1110
1111 mp->m_inodegc = alloc_percpu(struct xfs_inodegc);
1112 if (!mp->m_inodegc)
1113 return -ENOMEM;
1114
1115 for_each_possible_cpu(cpu) {
1116 gc = per_cpu_ptr(mp->m_inodegc, cpu);
1117 gc->cpu = cpu;
1118 gc->mp = mp;
1119 init_llist_head(&gc->list);
1120 gc->items = 0;
1121 gc->error = 0;
1122 INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker);
1123 }
1124 return 0;
1125 }
1126
1127 static void
xfs_inodegc_free_percpu(struct xfs_mount * mp)1128 xfs_inodegc_free_percpu(
1129 struct xfs_mount *mp)
1130 {
1131 if (!mp->m_inodegc)
1132 return;
1133 free_percpu(mp->m_inodegc);
1134 }
1135
1136 static void
xfs_fs_put_super(struct super_block * sb)1137 xfs_fs_put_super(
1138 struct super_block *sb)
1139 {
1140 struct xfs_mount *mp = XFS_M(sb);
1141
1142 xfs_notice(mp, "Unmounting Filesystem %pU", &mp->m_sb.sb_uuid);
1143 xfs_filestream_unmount(mp);
1144 xfs_unmountfs(mp);
1145
1146 xfs_freesb(mp);
1147 xchk_mount_stats_free(mp);
1148 free_percpu(mp->m_stats.xs_stats);
1149 xfs_inodegc_free_percpu(mp);
1150 xfs_destroy_percpu_counters(mp);
1151 xfs_destroy_mount_workqueues(mp);
1152 xfs_shutdown_devices(mp);
1153 }
1154
1155 static long
xfs_fs_nr_cached_objects(struct super_block * sb,struct shrink_control * sc)1156 xfs_fs_nr_cached_objects(
1157 struct super_block *sb,
1158 struct shrink_control *sc)
1159 {
1160 /* Paranoia: catch incorrect calls during mount setup or teardown */
1161 if (WARN_ON_ONCE(!sb->s_fs_info))
1162 return 0;
1163 return xfs_reclaim_inodes_count(XFS_M(sb));
1164 }
1165
1166 static long
xfs_fs_free_cached_objects(struct super_block * sb,struct shrink_control * sc)1167 xfs_fs_free_cached_objects(
1168 struct super_block *sb,
1169 struct shrink_control *sc)
1170 {
1171 return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1172 }
1173
1174 static void
xfs_fs_shutdown(struct super_block * sb)1175 xfs_fs_shutdown(
1176 struct super_block *sb)
1177 {
1178 xfs_force_shutdown(XFS_M(sb), SHUTDOWN_DEVICE_REMOVED);
1179 }
1180
1181 static const struct super_operations xfs_super_operations = {
1182 .alloc_inode = xfs_fs_alloc_inode,
1183 .destroy_inode = xfs_fs_destroy_inode,
1184 .dirty_inode = xfs_fs_dirty_inode,
1185 .drop_inode = xfs_fs_drop_inode,
1186 .put_super = xfs_fs_put_super,
1187 .sync_fs = xfs_fs_sync_fs,
1188 .freeze_fs = xfs_fs_freeze,
1189 .unfreeze_fs = xfs_fs_unfreeze,
1190 .statfs = xfs_fs_statfs,
1191 .show_options = xfs_fs_show_options,
1192 .nr_cached_objects = xfs_fs_nr_cached_objects,
1193 .free_cached_objects = xfs_fs_free_cached_objects,
1194 .shutdown = xfs_fs_shutdown,
1195 };
1196
1197 static int
suffix_kstrtoint(const char * s,unsigned int base,int * res)1198 suffix_kstrtoint(
1199 const char *s,
1200 unsigned int base,
1201 int *res)
1202 {
1203 int last, shift_left_factor = 0, _res;
1204 char *value;
1205 int ret = 0;
1206
1207 value = kstrdup(s, GFP_KERNEL);
1208 if (!value)
1209 return -ENOMEM;
1210
1211 last = strlen(value) - 1;
1212 if (value[last] == 'K' || value[last] == 'k') {
1213 shift_left_factor = 10;
1214 value[last] = '\0';
1215 }
1216 if (value[last] == 'M' || value[last] == 'm') {
1217 shift_left_factor = 20;
1218 value[last] = '\0';
1219 }
1220 if (value[last] == 'G' || value[last] == 'g') {
1221 shift_left_factor = 30;
1222 value[last] = '\0';
1223 }
1224
1225 if (kstrtoint(value, base, &_res))
1226 ret = -EINVAL;
1227 kfree(value);
1228 *res = _res << shift_left_factor;
1229 return ret;
1230 }
1231
1232 static inline void
xfs_fs_warn_deprecated(struct fs_context * fc,struct fs_parameter * param,uint64_t flag,bool value)1233 xfs_fs_warn_deprecated(
1234 struct fs_context *fc,
1235 struct fs_parameter *param,
1236 uint64_t flag,
1237 bool value)
1238 {
1239 /* Don't print the warning if reconfiguring and current mount point
1240 * already had the flag set
1241 */
1242 if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) &&
1243 !!(XFS_M(fc->root->d_sb)->m_features & flag) == value)
1244 return;
1245 xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key);
1246 }
1247
1248 /*
1249 * Set mount state from a mount option.
1250 *
1251 * NOTE: mp->m_super is NULL here!
1252 */
1253 static int
xfs_fs_parse_param(struct fs_context * fc,struct fs_parameter * param)1254 xfs_fs_parse_param(
1255 struct fs_context *fc,
1256 struct fs_parameter *param)
1257 {
1258 struct xfs_mount *parsing_mp = fc->s_fs_info;
1259 struct fs_parse_result result;
1260 int size = 0;
1261 int opt;
1262
1263 opt = fs_parse(fc, xfs_fs_parameters, param, &result);
1264 if (opt < 0)
1265 return opt;
1266
1267 switch (opt) {
1268 case Opt_logbufs:
1269 parsing_mp->m_logbufs = result.uint_32;
1270 return 0;
1271 case Opt_logbsize:
1272 if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize))
1273 return -EINVAL;
1274 return 0;
1275 case Opt_logdev:
1276 kfree(parsing_mp->m_logname);
1277 parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL);
1278 if (!parsing_mp->m_logname)
1279 return -ENOMEM;
1280 return 0;
1281 case Opt_rtdev:
1282 kfree(parsing_mp->m_rtname);
1283 parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
1284 if (!parsing_mp->m_rtname)
1285 return -ENOMEM;
1286 return 0;
1287 case Opt_allocsize:
1288 if (suffix_kstrtoint(param->string, 10, &size))
1289 return -EINVAL;
1290 parsing_mp->m_allocsize_log = ffs(size) - 1;
1291 parsing_mp->m_features |= XFS_FEAT_ALLOCSIZE;
1292 return 0;
1293 case Opt_grpid:
1294 case Opt_bsdgroups:
1295 parsing_mp->m_features |= XFS_FEAT_GRPID;
1296 return 0;
1297 case Opt_nogrpid:
1298 case Opt_sysvgroups:
1299 parsing_mp->m_features &= ~XFS_FEAT_GRPID;
1300 return 0;
1301 case Opt_wsync:
1302 parsing_mp->m_features |= XFS_FEAT_WSYNC;
1303 return 0;
1304 case Opt_norecovery:
1305 parsing_mp->m_features |= XFS_FEAT_NORECOVERY;
1306 return 0;
1307 case Opt_noalign:
1308 parsing_mp->m_features |= XFS_FEAT_NOALIGN;
1309 return 0;
1310 case Opt_swalloc:
1311 parsing_mp->m_features |= XFS_FEAT_SWALLOC;
1312 return 0;
1313 case Opt_sunit:
1314 parsing_mp->m_dalign = result.uint_32;
1315 return 0;
1316 case Opt_swidth:
1317 parsing_mp->m_swidth = result.uint_32;
1318 return 0;
1319 case Opt_inode32:
1320 parsing_mp->m_features |= XFS_FEAT_SMALL_INUMS;
1321 return 0;
1322 case Opt_inode64:
1323 parsing_mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
1324 return 0;
1325 case Opt_nouuid:
1326 parsing_mp->m_features |= XFS_FEAT_NOUUID;
1327 return 0;
1328 case Opt_largeio:
1329 parsing_mp->m_features |= XFS_FEAT_LARGE_IOSIZE;
1330 return 0;
1331 case Opt_nolargeio:
1332 parsing_mp->m_features &= ~XFS_FEAT_LARGE_IOSIZE;
1333 return 0;
1334 case Opt_filestreams:
1335 parsing_mp->m_features |= XFS_FEAT_FILESTREAMS;
1336 return 0;
1337 case Opt_noquota:
1338 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
1339 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
1340 return 0;
1341 case Opt_quota:
1342 case Opt_uquota:
1343 case Opt_usrquota:
1344 parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD);
1345 return 0;
1346 case Opt_qnoenforce:
1347 case Opt_uqnoenforce:
1348 parsing_mp->m_qflags |= XFS_UQUOTA_ACCT;
1349 parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD;
1350 return 0;
1351 case Opt_pquota:
1352 case Opt_prjquota:
1353 parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD);
1354 return 0;
1355 case Opt_pqnoenforce:
1356 parsing_mp->m_qflags |= XFS_PQUOTA_ACCT;
1357 parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD;
1358 return 0;
1359 case Opt_gquota:
1360 case Opt_grpquota:
1361 parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD);
1362 return 0;
1363 case Opt_gqnoenforce:
1364 parsing_mp->m_qflags |= XFS_GQUOTA_ACCT;
1365 parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD;
1366 return 0;
1367 case Opt_discard:
1368 parsing_mp->m_features |= XFS_FEAT_DISCARD;
1369 return 0;
1370 case Opt_nodiscard:
1371 parsing_mp->m_features &= ~XFS_FEAT_DISCARD;
1372 return 0;
1373 #ifdef CONFIG_FS_DAX
1374 case Opt_dax:
1375 xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS);
1376 return 0;
1377 case Opt_dax_enum:
1378 xfs_mount_set_dax_mode(parsing_mp, result.uint_32);
1379 return 0;
1380 #endif
1381 /* Following mount options will be removed in September 2025 */
1382 case Opt_ikeep:
1383 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, true);
1384 parsing_mp->m_features |= XFS_FEAT_IKEEP;
1385 return 0;
1386 case Opt_noikeep:
1387 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, false);
1388 parsing_mp->m_features &= ~XFS_FEAT_IKEEP;
1389 return 0;
1390 case Opt_attr2:
1391 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_ATTR2, true);
1392 parsing_mp->m_features |= XFS_FEAT_ATTR2;
1393 return 0;
1394 case Opt_noattr2:
1395 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_NOATTR2, true);
1396 parsing_mp->m_features |= XFS_FEAT_NOATTR2;
1397 return 0;
1398 default:
1399 xfs_warn(parsing_mp, "unknown mount option [%s].", param->key);
1400 return -EINVAL;
1401 }
1402
1403 return 0;
1404 }
1405
1406 static int
xfs_fs_validate_params(struct xfs_mount * mp)1407 xfs_fs_validate_params(
1408 struct xfs_mount *mp)
1409 {
1410 /* No recovery flag requires a read-only mount */
1411 if (xfs_has_norecovery(mp) && !xfs_is_readonly(mp)) {
1412 xfs_warn(mp, "no-recovery mounts must be read-only.");
1413 return -EINVAL;
1414 }
1415
1416 /*
1417 * We have not read the superblock at this point, so only the attr2
1418 * mount option can set the attr2 feature by this stage.
1419 */
1420 if (xfs_has_attr2(mp) && xfs_has_noattr2(mp)) {
1421 xfs_warn(mp, "attr2 and noattr2 cannot both be specified.");
1422 return -EINVAL;
1423 }
1424
1425
1426 if (xfs_has_noalign(mp) && (mp->m_dalign || mp->m_swidth)) {
1427 xfs_warn(mp,
1428 "sunit and swidth options incompatible with the noalign option");
1429 return -EINVAL;
1430 }
1431
1432 if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) {
1433 xfs_warn(mp, "quota support not available in this kernel.");
1434 return -EINVAL;
1435 }
1436
1437 if ((mp->m_dalign && !mp->m_swidth) ||
1438 (!mp->m_dalign && mp->m_swidth)) {
1439 xfs_warn(mp, "sunit and swidth must be specified together");
1440 return -EINVAL;
1441 }
1442
1443 if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
1444 xfs_warn(mp,
1445 "stripe width (%d) must be a multiple of the stripe unit (%d)",
1446 mp->m_swidth, mp->m_dalign);
1447 return -EINVAL;
1448 }
1449
1450 if (mp->m_logbufs != -1 &&
1451 mp->m_logbufs != 0 &&
1452 (mp->m_logbufs < XLOG_MIN_ICLOGS ||
1453 mp->m_logbufs > XLOG_MAX_ICLOGS)) {
1454 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
1455 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
1456 return -EINVAL;
1457 }
1458
1459 if (mp->m_logbsize != -1 &&
1460 mp->m_logbsize != 0 &&
1461 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
1462 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
1463 !is_power_of_2(mp->m_logbsize))) {
1464 xfs_warn(mp,
1465 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
1466 mp->m_logbsize);
1467 return -EINVAL;
1468 }
1469
1470 if (xfs_has_allocsize(mp) &&
1471 (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
1472 mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
1473 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
1474 mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
1475 return -EINVAL;
1476 }
1477
1478 return 0;
1479 }
1480
1481 struct dentry *
xfs_debugfs_mkdir(const char * name,struct dentry * parent)1482 xfs_debugfs_mkdir(
1483 const char *name,
1484 struct dentry *parent)
1485 {
1486 struct dentry *child;
1487
1488 /* Apparently we're expected to ignore error returns?? */
1489 child = debugfs_create_dir(name, parent);
1490 if (IS_ERR(child))
1491 return NULL;
1492
1493 return child;
1494 }
1495
1496 static int
xfs_fs_fill_super(struct super_block * sb,struct fs_context * fc)1497 xfs_fs_fill_super(
1498 struct super_block *sb,
1499 struct fs_context *fc)
1500 {
1501 struct xfs_mount *mp = sb->s_fs_info;
1502 struct inode *root;
1503 int flags = 0, error;
1504
1505 mp->m_super = sb;
1506
1507 /*
1508 * Copy VFS mount flags from the context now that all parameter parsing
1509 * is guaranteed to have been completed by either the old mount API or
1510 * the newer fsopen/fsconfig API.
1511 */
1512 if (fc->sb_flags & SB_RDONLY)
1513 xfs_set_readonly(mp);
1514 if (fc->sb_flags & SB_DIRSYNC)
1515 mp->m_features |= XFS_FEAT_DIRSYNC;
1516 if (fc->sb_flags & SB_SYNCHRONOUS)
1517 mp->m_features |= XFS_FEAT_WSYNC;
1518
1519 error = xfs_fs_validate_params(mp);
1520 if (error)
1521 return error;
1522
1523 sb_min_blocksize(sb, BBSIZE);
1524 sb->s_xattr = xfs_xattr_handlers;
1525 sb->s_export_op = &xfs_export_operations;
1526 #ifdef CONFIG_XFS_QUOTA
1527 sb->s_qcop = &xfs_quotactl_operations;
1528 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1529 #endif
1530 sb->s_op = &xfs_super_operations;
1531
1532 /*
1533 * Delay mount work if the debug hook is set. This is debug
1534 * instrumention to coordinate simulation of xfs mount failures with
1535 * VFS superblock operations
1536 */
1537 if (xfs_globals.mount_delay) {
1538 xfs_notice(mp, "Delaying mount for %d seconds.",
1539 xfs_globals.mount_delay);
1540 msleep(xfs_globals.mount_delay * 1000);
1541 }
1542
1543 if (fc->sb_flags & SB_SILENT)
1544 flags |= XFS_MFSI_QUIET;
1545
1546 error = xfs_open_devices(mp);
1547 if (error)
1548 return error;
1549
1550 if (xfs_debugfs) {
1551 mp->m_debugfs = xfs_debugfs_mkdir(mp->m_super->s_id,
1552 xfs_debugfs);
1553 } else {
1554 mp->m_debugfs = NULL;
1555 }
1556
1557 error = xfs_init_mount_workqueues(mp);
1558 if (error)
1559 goto out_shutdown_devices;
1560
1561 error = xfs_init_percpu_counters(mp);
1562 if (error)
1563 goto out_destroy_workqueues;
1564
1565 error = xfs_inodegc_init_percpu(mp);
1566 if (error)
1567 goto out_destroy_counters;
1568
1569 /* Allocate stats memory before we do operations that might use it */
1570 mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1571 if (!mp->m_stats.xs_stats) {
1572 error = -ENOMEM;
1573 goto out_destroy_inodegc;
1574 }
1575
1576 error = xchk_mount_stats_alloc(mp);
1577 if (error)
1578 goto out_free_stats;
1579
1580 error = xfs_readsb(mp, flags);
1581 if (error)
1582 goto out_free_scrub_stats;
1583
1584 error = xfs_finish_flags(mp);
1585 if (error)
1586 goto out_free_sb;
1587
1588 error = xfs_setup_devices(mp);
1589 if (error)
1590 goto out_free_sb;
1591
1592 /*
1593 * V4 support is undergoing deprecation.
1594 *
1595 * Note: this has to use an open coded m_features check as xfs_has_crc
1596 * always returns false for !CONFIG_XFS_SUPPORT_V4.
1597 */
1598 if (!(mp->m_features & XFS_FEAT_CRC)) {
1599 if (!IS_ENABLED(CONFIG_XFS_SUPPORT_V4)) {
1600 xfs_warn(mp,
1601 "Deprecated V4 format (crc=0) not supported by kernel.");
1602 error = -EINVAL;
1603 goto out_free_sb;
1604 }
1605 xfs_warn_once(mp,
1606 "Deprecated V4 format (crc=0) will not be supported after September 2030.");
1607 }
1608
1609 /* ASCII case insensitivity is undergoing deprecation. */
1610 if (xfs_has_asciici(mp)) {
1611 #ifdef CONFIG_XFS_SUPPORT_ASCII_CI
1612 xfs_warn_once(mp,
1613 "Deprecated ASCII case-insensitivity feature (ascii-ci=1) will not be supported after September 2030.");
1614 #else
1615 xfs_warn(mp,
1616 "Deprecated ASCII case-insensitivity feature (ascii-ci=1) not supported by kernel.");
1617 error = -EINVAL;
1618 goto out_free_sb;
1619 #endif
1620 }
1621
1622 /*
1623 * Filesystem claims it needs repair, so refuse the mount unless
1624 * norecovery is also specified, in which case the filesystem can
1625 * be mounted with no risk of further damage.
1626 */
1627 if (xfs_has_needsrepair(mp) && !xfs_has_norecovery(mp)) {
1628 xfs_warn(mp, "Filesystem needs repair. Please run xfs_repair.");
1629 error = -EFSCORRUPTED;
1630 goto out_free_sb;
1631 }
1632
1633 /*
1634 * Don't touch the filesystem if a user tool thinks it owns the primary
1635 * superblock. mkfs doesn't clear the flag from secondary supers, so
1636 * we don't check them at all.
1637 */
1638 if (mp->m_sb.sb_inprogress) {
1639 xfs_warn(mp, "Offline file system operation in progress!");
1640 error = -EFSCORRUPTED;
1641 goto out_free_sb;
1642 }
1643
1644 if (mp->m_sb.sb_blocksize > PAGE_SIZE) {
1645 size_t max_folio_size = mapping_max_folio_size_supported();
1646
1647 if (!xfs_has_crc(mp)) {
1648 xfs_warn(mp,
1649 "V4 Filesystem with blocksize %d bytes. Only pagesize (%ld) or less is supported.",
1650 mp->m_sb.sb_blocksize, PAGE_SIZE);
1651 error = -ENOSYS;
1652 goto out_free_sb;
1653 }
1654
1655 if (mp->m_sb.sb_blocksize > max_folio_size) {
1656 xfs_warn(mp,
1657 "block size (%u bytes) not supported; Only block size (%zu) or less is supported",
1658 mp->m_sb.sb_blocksize, max_folio_size);
1659 error = -ENOSYS;
1660 goto out_free_sb;
1661 }
1662
1663 xfs_warn(mp,
1664 "EXPERIMENTAL: V5 Filesystem with Large Block Size (%d bytes) enabled.",
1665 mp->m_sb.sb_blocksize);
1666 }
1667
1668 /* Ensure this filesystem fits in the page cache limits */
1669 if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) ||
1670 xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) {
1671 xfs_warn(mp,
1672 "file system too large to be mounted on this system.");
1673 error = -EFBIG;
1674 goto out_free_sb;
1675 }
1676
1677 /*
1678 * XFS block mappings use 54 bits to store the logical block offset.
1679 * This should suffice to handle the maximum file size that the VFS
1680 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
1681 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
1682 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
1683 * to check this assertion.
1684 *
1685 * Avoid integer overflow by comparing the maximum bmbt offset to the
1686 * maximum pagecache offset in units of fs blocks.
1687 */
1688 if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) {
1689 xfs_warn(mp,
1690 "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
1691 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
1692 XFS_MAX_FILEOFF);
1693 error = -EINVAL;
1694 goto out_free_sb;
1695 }
1696
1697 error = xfs_filestream_mount(mp);
1698 if (error)
1699 goto out_free_sb;
1700
1701 /*
1702 * we must configure the block size in the superblock before we run the
1703 * full mount process as the mount process can lookup and cache inodes.
1704 */
1705 sb->s_magic = XFS_SUPER_MAGIC;
1706 sb->s_blocksize = mp->m_sb.sb_blocksize;
1707 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1708 sb->s_maxbytes = MAX_LFS_FILESIZE;
1709 sb->s_max_links = XFS_MAXLINK;
1710 sb->s_time_gran = 1;
1711 if (xfs_has_bigtime(mp)) {
1712 sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN);
1713 sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX);
1714 } else {
1715 sb->s_time_min = XFS_LEGACY_TIME_MIN;
1716 sb->s_time_max = XFS_LEGACY_TIME_MAX;
1717 }
1718 trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max);
1719 sb->s_iflags |= SB_I_CGROUPWB;
1720
1721 set_posix_acl_flag(sb);
1722
1723 /* version 5 superblocks support inode version counters. */
1724 if (xfs_has_crc(mp))
1725 sb->s_flags |= SB_I_VERSION;
1726
1727 if (xfs_has_dax_always(mp)) {
1728 error = xfs_setup_dax_always(mp);
1729 if (error)
1730 goto out_filestream_unmount;
1731 }
1732
1733 if (xfs_has_discard(mp) && !bdev_max_discard_sectors(sb->s_bdev)) {
1734 xfs_warn(mp,
1735 "mounting with \"discard\" option, but the device does not support discard");
1736 mp->m_features &= ~XFS_FEAT_DISCARD;
1737 }
1738
1739 if (xfs_has_reflink(mp)) {
1740 if (mp->m_sb.sb_rblocks) {
1741 xfs_alert(mp,
1742 "reflink not compatible with realtime device!");
1743 error = -EINVAL;
1744 goto out_filestream_unmount;
1745 }
1746
1747 if (xfs_globals.always_cow) {
1748 xfs_info(mp, "using DEBUG-only always_cow mode.");
1749 mp->m_always_cow = true;
1750 }
1751 }
1752
1753 if (xfs_has_rmapbt(mp) && mp->m_sb.sb_rblocks) {
1754 xfs_alert(mp,
1755 "reverse mapping btree not compatible with realtime device!");
1756 error = -EINVAL;
1757 goto out_filestream_unmount;
1758 }
1759
1760 if (xfs_has_exchange_range(mp))
1761 xfs_warn(mp,
1762 "EXPERIMENTAL exchange-range feature enabled. Use at your own risk!");
1763
1764 if (xfs_has_parent(mp))
1765 xfs_warn(mp,
1766 "EXPERIMENTAL parent pointer feature enabled. Use at your own risk!");
1767
1768 error = xfs_mountfs(mp);
1769 if (error)
1770 goto out_filestream_unmount;
1771
1772 root = igrab(VFS_I(mp->m_rootip));
1773 if (!root) {
1774 error = -ENOENT;
1775 goto out_unmount;
1776 }
1777 sb->s_root = d_make_root(root);
1778 if (!sb->s_root) {
1779 error = -ENOMEM;
1780 goto out_unmount;
1781 }
1782
1783 return 0;
1784
1785 out_filestream_unmount:
1786 xfs_filestream_unmount(mp);
1787 out_free_sb:
1788 xfs_freesb(mp);
1789 out_free_scrub_stats:
1790 xchk_mount_stats_free(mp);
1791 out_free_stats:
1792 free_percpu(mp->m_stats.xs_stats);
1793 out_destroy_inodegc:
1794 xfs_inodegc_free_percpu(mp);
1795 out_destroy_counters:
1796 xfs_destroy_percpu_counters(mp);
1797 out_destroy_workqueues:
1798 xfs_destroy_mount_workqueues(mp);
1799 out_shutdown_devices:
1800 xfs_shutdown_devices(mp);
1801 return error;
1802
1803 out_unmount:
1804 xfs_filestream_unmount(mp);
1805 xfs_unmountfs(mp);
1806 goto out_free_sb;
1807 }
1808
1809 static int
xfs_fs_get_tree(struct fs_context * fc)1810 xfs_fs_get_tree(
1811 struct fs_context *fc)
1812 {
1813 return get_tree_bdev(fc, xfs_fs_fill_super);
1814 }
1815
1816 static int
xfs_remount_rw(struct xfs_mount * mp)1817 xfs_remount_rw(
1818 struct xfs_mount *mp)
1819 {
1820 struct xfs_sb *sbp = &mp->m_sb;
1821 int error;
1822
1823 if (xfs_has_norecovery(mp)) {
1824 xfs_warn(mp,
1825 "ro->rw transition prohibited on norecovery mount");
1826 return -EINVAL;
1827 }
1828
1829 if (xfs_sb_is_v5(sbp) &&
1830 xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1831 xfs_warn(mp,
1832 "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1833 (sbp->sb_features_ro_compat &
1834 XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1835 return -EINVAL;
1836 }
1837
1838 xfs_clear_readonly(mp);
1839
1840 /*
1841 * If this is the first remount to writeable state we might have some
1842 * superblock changes to update.
1843 */
1844 if (mp->m_update_sb) {
1845 error = xfs_sync_sb(mp, false);
1846 if (error) {
1847 xfs_warn(mp, "failed to write sb changes");
1848 return error;
1849 }
1850 mp->m_update_sb = false;
1851 }
1852
1853 /*
1854 * Fill out the reserve pool if it is empty. Use the stashed value if
1855 * it is non-zero, otherwise go with the default.
1856 */
1857 xfs_restore_resvblks(mp);
1858 xfs_log_work_queue(mp);
1859 xfs_blockgc_start(mp);
1860
1861 /* Create the per-AG metadata reservation pool .*/
1862 error = xfs_fs_reserve_ag_blocks(mp);
1863 if (error && error != -ENOSPC)
1864 return error;
1865
1866 /* Re-enable the background inode inactivation worker. */
1867 xfs_inodegc_start(mp);
1868
1869 return 0;
1870 }
1871
1872 static int
xfs_remount_ro(struct xfs_mount * mp)1873 xfs_remount_ro(
1874 struct xfs_mount *mp)
1875 {
1876 struct xfs_icwalk icw = {
1877 .icw_flags = XFS_ICWALK_FLAG_SYNC,
1878 };
1879 int error;
1880
1881 /* Flush all the dirty data to disk. */
1882 error = sync_filesystem(mp->m_super);
1883 if (error)
1884 return error;
1885
1886 /*
1887 * Cancel background eofb scanning so it cannot race with the final
1888 * log force+buftarg wait and deadlock the remount.
1889 */
1890 xfs_blockgc_stop(mp);
1891
1892 /*
1893 * Clear out all remaining COW staging extents and speculative post-EOF
1894 * preallocations so that we don't leave inodes requiring inactivation
1895 * cleanups during reclaim on a read-only mount. We must process every
1896 * cached inode, so this requires a synchronous cache scan.
1897 */
1898 error = xfs_blockgc_free_space(mp, &icw);
1899 if (error) {
1900 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1901 return error;
1902 }
1903
1904 /*
1905 * Stop the inodegc background worker. xfs_fs_reconfigure already
1906 * flushed all pending inodegc work when it sync'd the filesystem.
1907 * The VFS holds s_umount, so we know that inodes cannot enter
1908 * xfs_fs_destroy_inode during a remount operation. In readonly mode
1909 * we send inodes straight to reclaim, so no inodes will be queued.
1910 */
1911 xfs_inodegc_stop(mp);
1912
1913 /* Free the per-AG metadata reservation pool. */
1914 xfs_fs_unreserve_ag_blocks(mp);
1915
1916 /*
1917 * Before we sync the metadata, we need to free up the reserve block
1918 * pool so that the used block count in the superblock on disk is
1919 * correct at the end of the remount. Stash the current* reserve pool
1920 * size so that if we get remounted rw, we can return it to the same
1921 * size.
1922 */
1923 xfs_save_resvblks(mp);
1924
1925 xfs_log_clean(mp);
1926 xfs_set_readonly(mp);
1927
1928 return 0;
1929 }
1930
1931 /*
1932 * Logically we would return an error here to prevent users from believing
1933 * they might have changed mount options using remount which can't be changed.
1934 *
1935 * But unfortunately mount(8) adds all options from mtab and fstab to the mount
1936 * arguments in some cases so we can't blindly reject options, but have to
1937 * check for each specified option if it actually differs from the currently
1938 * set option and only reject it if that's the case.
1939 *
1940 * Until that is implemented we return success for every remount request, and
1941 * silently ignore all options that we can't actually change.
1942 */
1943 static int
xfs_fs_reconfigure(struct fs_context * fc)1944 xfs_fs_reconfigure(
1945 struct fs_context *fc)
1946 {
1947 struct xfs_mount *mp = XFS_M(fc->root->d_sb);
1948 struct xfs_mount *new_mp = fc->s_fs_info;
1949 int flags = fc->sb_flags;
1950 int error;
1951
1952 /* version 5 superblocks always support version counters. */
1953 if (xfs_has_crc(mp))
1954 fc->sb_flags |= SB_I_VERSION;
1955
1956 error = xfs_fs_validate_params(new_mp);
1957 if (error)
1958 return error;
1959
1960 /* inode32 -> inode64 */
1961 if (xfs_has_small_inums(mp) && !xfs_has_small_inums(new_mp)) {
1962 mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
1963 mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
1964 }
1965
1966 /* inode64 -> inode32 */
1967 if (!xfs_has_small_inums(mp) && xfs_has_small_inums(new_mp)) {
1968 mp->m_features |= XFS_FEAT_SMALL_INUMS;
1969 mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
1970 }
1971
1972 /* ro -> rw */
1973 if (xfs_is_readonly(mp) && !(flags & SB_RDONLY)) {
1974 error = xfs_remount_rw(mp);
1975 if (error)
1976 return error;
1977 }
1978
1979 /* rw -> ro */
1980 if (!xfs_is_readonly(mp) && (flags & SB_RDONLY)) {
1981 error = xfs_remount_ro(mp);
1982 if (error)
1983 return error;
1984 }
1985
1986 return 0;
1987 }
1988
1989 static void
xfs_fs_free(struct fs_context * fc)1990 xfs_fs_free(
1991 struct fs_context *fc)
1992 {
1993 struct xfs_mount *mp = fc->s_fs_info;
1994
1995 /*
1996 * mp is stored in the fs_context when it is initialized.
1997 * mp is transferred to the superblock on a successful mount,
1998 * but if an error occurs before the transfer we have to free
1999 * it here.
2000 */
2001 if (mp)
2002 xfs_mount_free(mp);
2003 }
2004
2005 static const struct fs_context_operations xfs_context_ops = {
2006 .parse_param = xfs_fs_parse_param,
2007 .get_tree = xfs_fs_get_tree,
2008 .reconfigure = xfs_fs_reconfigure,
2009 .free = xfs_fs_free,
2010 };
2011
2012 /*
2013 * WARNING: do not initialise any parameters in this function that depend on
2014 * mount option parsing having already been performed as this can be called from
2015 * fsopen() before any parameters have been set.
2016 */
xfs_init_fs_context(struct fs_context * fc)2017 static int xfs_init_fs_context(
2018 struct fs_context *fc)
2019 {
2020 struct xfs_mount *mp;
2021
2022 mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL | __GFP_NOFAIL);
2023 if (!mp)
2024 return -ENOMEM;
2025
2026 spin_lock_init(&mp->m_sb_lock);
2027 xa_init(&mp->m_perags);
2028 mutex_init(&mp->m_growlock);
2029 INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
2030 INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
2031 mp->m_kobj.kobject.kset = xfs_kset;
2032 /*
2033 * We don't create the finobt per-ag space reservation until after log
2034 * recovery, so we must set this to true so that an ifree transaction
2035 * started during log recovery will not depend on space reservations
2036 * for finobt expansion.
2037 */
2038 mp->m_finobt_nores = true;
2039
2040 /*
2041 * These can be overridden by the mount option parsing.
2042 */
2043 mp->m_logbufs = -1;
2044 mp->m_logbsize = -1;
2045 mp->m_allocsize_log = 16; /* 64k */
2046
2047 xfs_hooks_init(&mp->m_dir_update_hooks);
2048
2049 fc->s_fs_info = mp;
2050 fc->ops = &xfs_context_ops;
2051
2052 return 0;
2053 }
2054
2055 static void
xfs_kill_sb(struct super_block * sb)2056 xfs_kill_sb(
2057 struct super_block *sb)
2058 {
2059 kill_block_super(sb);
2060 xfs_mount_free(XFS_M(sb));
2061 }
2062
2063 static struct file_system_type xfs_fs_type = {
2064 .owner = THIS_MODULE,
2065 .name = "xfs",
2066 .init_fs_context = xfs_init_fs_context,
2067 .parameters = xfs_fs_parameters,
2068 .kill_sb = xfs_kill_sb,
2069 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
2070 };
2071 MODULE_ALIAS_FS("xfs");
2072
2073 STATIC int __init
xfs_init_caches(void)2074 xfs_init_caches(void)
2075 {
2076 int error;
2077
2078 xfs_buf_cache = kmem_cache_create("xfs_buf", sizeof(struct xfs_buf), 0,
2079 SLAB_HWCACHE_ALIGN |
2080 SLAB_RECLAIM_ACCOUNT,
2081 NULL);
2082 if (!xfs_buf_cache)
2083 goto out;
2084
2085 xfs_log_ticket_cache = kmem_cache_create("xfs_log_ticket",
2086 sizeof(struct xlog_ticket),
2087 0, 0, NULL);
2088 if (!xfs_log_ticket_cache)
2089 goto out_destroy_buf_cache;
2090
2091 error = xfs_btree_init_cur_caches();
2092 if (error)
2093 goto out_destroy_log_ticket_cache;
2094
2095 error = rcbagbt_init_cur_cache();
2096 if (error)
2097 goto out_destroy_btree_cur_cache;
2098
2099 error = xfs_defer_init_item_caches();
2100 if (error)
2101 goto out_destroy_rcbagbt_cur_cache;
2102
2103 xfs_da_state_cache = kmem_cache_create("xfs_da_state",
2104 sizeof(struct xfs_da_state),
2105 0, 0, NULL);
2106 if (!xfs_da_state_cache)
2107 goto out_destroy_defer_item_cache;
2108
2109 xfs_ifork_cache = kmem_cache_create("xfs_ifork",
2110 sizeof(struct xfs_ifork),
2111 0, 0, NULL);
2112 if (!xfs_ifork_cache)
2113 goto out_destroy_da_state_cache;
2114
2115 xfs_trans_cache = kmem_cache_create("xfs_trans",
2116 sizeof(struct xfs_trans),
2117 0, 0, NULL);
2118 if (!xfs_trans_cache)
2119 goto out_destroy_ifork_cache;
2120
2121
2122 /*
2123 * The size of the cache-allocated buf log item is the maximum
2124 * size possible under XFS. This wastes a little bit of memory,
2125 * but it is much faster.
2126 */
2127 xfs_buf_item_cache = kmem_cache_create("xfs_buf_item",
2128 sizeof(struct xfs_buf_log_item),
2129 0, 0, NULL);
2130 if (!xfs_buf_item_cache)
2131 goto out_destroy_trans_cache;
2132
2133 xfs_efd_cache = kmem_cache_create("xfs_efd_item",
2134 xfs_efd_log_item_sizeof(XFS_EFD_MAX_FAST_EXTENTS),
2135 0, 0, NULL);
2136 if (!xfs_efd_cache)
2137 goto out_destroy_buf_item_cache;
2138
2139 xfs_efi_cache = kmem_cache_create("xfs_efi_item",
2140 xfs_efi_log_item_sizeof(XFS_EFI_MAX_FAST_EXTENTS),
2141 0, 0, NULL);
2142 if (!xfs_efi_cache)
2143 goto out_destroy_efd_cache;
2144
2145 xfs_inode_cache = kmem_cache_create("xfs_inode",
2146 sizeof(struct xfs_inode), 0,
2147 (SLAB_HWCACHE_ALIGN |
2148 SLAB_RECLAIM_ACCOUNT |
2149 SLAB_ACCOUNT),
2150 xfs_fs_inode_init_once);
2151 if (!xfs_inode_cache)
2152 goto out_destroy_efi_cache;
2153
2154 xfs_ili_cache = kmem_cache_create("xfs_ili",
2155 sizeof(struct xfs_inode_log_item), 0,
2156 SLAB_RECLAIM_ACCOUNT,
2157 NULL);
2158 if (!xfs_ili_cache)
2159 goto out_destroy_inode_cache;
2160
2161 xfs_icreate_cache = kmem_cache_create("xfs_icr",
2162 sizeof(struct xfs_icreate_item),
2163 0, 0, NULL);
2164 if (!xfs_icreate_cache)
2165 goto out_destroy_ili_cache;
2166
2167 xfs_rud_cache = kmem_cache_create("xfs_rud_item",
2168 sizeof(struct xfs_rud_log_item),
2169 0, 0, NULL);
2170 if (!xfs_rud_cache)
2171 goto out_destroy_icreate_cache;
2172
2173 xfs_rui_cache = kmem_cache_create("xfs_rui_item",
2174 xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
2175 0, 0, NULL);
2176 if (!xfs_rui_cache)
2177 goto out_destroy_rud_cache;
2178
2179 xfs_cud_cache = kmem_cache_create("xfs_cud_item",
2180 sizeof(struct xfs_cud_log_item),
2181 0, 0, NULL);
2182 if (!xfs_cud_cache)
2183 goto out_destroy_rui_cache;
2184
2185 xfs_cui_cache = kmem_cache_create("xfs_cui_item",
2186 xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
2187 0, 0, NULL);
2188 if (!xfs_cui_cache)
2189 goto out_destroy_cud_cache;
2190
2191 xfs_bud_cache = kmem_cache_create("xfs_bud_item",
2192 sizeof(struct xfs_bud_log_item),
2193 0, 0, NULL);
2194 if (!xfs_bud_cache)
2195 goto out_destroy_cui_cache;
2196
2197 xfs_bui_cache = kmem_cache_create("xfs_bui_item",
2198 xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
2199 0, 0, NULL);
2200 if (!xfs_bui_cache)
2201 goto out_destroy_bud_cache;
2202
2203 xfs_attrd_cache = kmem_cache_create("xfs_attrd_item",
2204 sizeof(struct xfs_attrd_log_item),
2205 0, 0, NULL);
2206 if (!xfs_attrd_cache)
2207 goto out_destroy_bui_cache;
2208
2209 xfs_attri_cache = kmem_cache_create("xfs_attri_item",
2210 sizeof(struct xfs_attri_log_item),
2211 0, 0, NULL);
2212 if (!xfs_attri_cache)
2213 goto out_destroy_attrd_cache;
2214
2215 xfs_iunlink_cache = kmem_cache_create("xfs_iul_item",
2216 sizeof(struct xfs_iunlink_item),
2217 0, 0, NULL);
2218 if (!xfs_iunlink_cache)
2219 goto out_destroy_attri_cache;
2220
2221 xfs_xmd_cache = kmem_cache_create("xfs_xmd_item",
2222 sizeof(struct xfs_xmd_log_item),
2223 0, 0, NULL);
2224 if (!xfs_xmd_cache)
2225 goto out_destroy_iul_cache;
2226
2227 xfs_xmi_cache = kmem_cache_create("xfs_xmi_item",
2228 sizeof(struct xfs_xmi_log_item),
2229 0, 0, NULL);
2230 if (!xfs_xmi_cache)
2231 goto out_destroy_xmd_cache;
2232
2233 xfs_parent_args_cache = kmem_cache_create("xfs_parent_args",
2234 sizeof(struct xfs_parent_args),
2235 0, 0, NULL);
2236 if (!xfs_parent_args_cache)
2237 goto out_destroy_xmi_cache;
2238
2239 return 0;
2240
2241 out_destroy_xmi_cache:
2242 kmem_cache_destroy(xfs_xmi_cache);
2243 out_destroy_xmd_cache:
2244 kmem_cache_destroy(xfs_xmd_cache);
2245 out_destroy_iul_cache:
2246 kmem_cache_destroy(xfs_iunlink_cache);
2247 out_destroy_attri_cache:
2248 kmem_cache_destroy(xfs_attri_cache);
2249 out_destroy_attrd_cache:
2250 kmem_cache_destroy(xfs_attrd_cache);
2251 out_destroy_bui_cache:
2252 kmem_cache_destroy(xfs_bui_cache);
2253 out_destroy_bud_cache:
2254 kmem_cache_destroy(xfs_bud_cache);
2255 out_destroy_cui_cache:
2256 kmem_cache_destroy(xfs_cui_cache);
2257 out_destroy_cud_cache:
2258 kmem_cache_destroy(xfs_cud_cache);
2259 out_destroy_rui_cache:
2260 kmem_cache_destroy(xfs_rui_cache);
2261 out_destroy_rud_cache:
2262 kmem_cache_destroy(xfs_rud_cache);
2263 out_destroy_icreate_cache:
2264 kmem_cache_destroy(xfs_icreate_cache);
2265 out_destroy_ili_cache:
2266 kmem_cache_destroy(xfs_ili_cache);
2267 out_destroy_inode_cache:
2268 kmem_cache_destroy(xfs_inode_cache);
2269 out_destroy_efi_cache:
2270 kmem_cache_destroy(xfs_efi_cache);
2271 out_destroy_efd_cache:
2272 kmem_cache_destroy(xfs_efd_cache);
2273 out_destroy_buf_item_cache:
2274 kmem_cache_destroy(xfs_buf_item_cache);
2275 out_destroy_trans_cache:
2276 kmem_cache_destroy(xfs_trans_cache);
2277 out_destroy_ifork_cache:
2278 kmem_cache_destroy(xfs_ifork_cache);
2279 out_destroy_da_state_cache:
2280 kmem_cache_destroy(xfs_da_state_cache);
2281 out_destroy_defer_item_cache:
2282 xfs_defer_destroy_item_caches();
2283 out_destroy_rcbagbt_cur_cache:
2284 rcbagbt_destroy_cur_cache();
2285 out_destroy_btree_cur_cache:
2286 xfs_btree_destroy_cur_caches();
2287 out_destroy_log_ticket_cache:
2288 kmem_cache_destroy(xfs_log_ticket_cache);
2289 out_destroy_buf_cache:
2290 kmem_cache_destroy(xfs_buf_cache);
2291 out:
2292 return -ENOMEM;
2293 }
2294
2295 STATIC void
xfs_destroy_caches(void)2296 xfs_destroy_caches(void)
2297 {
2298 /*
2299 * Make sure all delayed rcu free are flushed before we
2300 * destroy caches.
2301 */
2302 rcu_barrier();
2303 kmem_cache_destroy(xfs_parent_args_cache);
2304 kmem_cache_destroy(xfs_xmd_cache);
2305 kmem_cache_destroy(xfs_xmi_cache);
2306 kmem_cache_destroy(xfs_iunlink_cache);
2307 kmem_cache_destroy(xfs_attri_cache);
2308 kmem_cache_destroy(xfs_attrd_cache);
2309 kmem_cache_destroy(xfs_bui_cache);
2310 kmem_cache_destroy(xfs_bud_cache);
2311 kmem_cache_destroy(xfs_cui_cache);
2312 kmem_cache_destroy(xfs_cud_cache);
2313 kmem_cache_destroy(xfs_rui_cache);
2314 kmem_cache_destroy(xfs_rud_cache);
2315 kmem_cache_destroy(xfs_icreate_cache);
2316 kmem_cache_destroy(xfs_ili_cache);
2317 kmem_cache_destroy(xfs_inode_cache);
2318 kmem_cache_destroy(xfs_efi_cache);
2319 kmem_cache_destroy(xfs_efd_cache);
2320 kmem_cache_destroy(xfs_buf_item_cache);
2321 kmem_cache_destroy(xfs_trans_cache);
2322 kmem_cache_destroy(xfs_ifork_cache);
2323 kmem_cache_destroy(xfs_da_state_cache);
2324 xfs_defer_destroy_item_caches();
2325 rcbagbt_destroy_cur_cache();
2326 xfs_btree_destroy_cur_caches();
2327 kmem_cache_destroy(xfs_log_ticket_cache);
2328 kmem_cache_destroy(xfs_buf_cache);
2329 }
2330
2331 STATIC int __init
xfs_init_workqueues(void)2332 xfs_init_workqueues(void)
2333 {
2334 /*
2335 * The allocation workqueue can be used in memory reclaim situations
2336 * (writepage path), and parallelism is only limited by the number of
2337 * AGs in all the filesystems mounted. Hence use the default large
2338 * max_active value for this workqueue.
2339 */
2340 xfs_alloc_wq = alloc_workqueue("xfsalloc",
2341 XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0);
2342 if (!xfs_alloc_wq)
2343 return -ENOMEM;
2344
2345 xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND),
2346 0);
2347 if (!xfs_discard_wq)
2348 goto out_free_alloc_wq;
2349
2350 return 0;
2351 out_free_alloc_wq:
2352 destroy_workqueue(xfs_alloc_wq);
2353 return -ENOMEM;
2354 }
2355
2356 STATIC void
xfs_destroy_workqueues(void)2357 xfs_destroy_workqueues(void)
2358 {
2359 destroy_workqueue(xfs_discard_wq);
2360 destroy_workqueue(xfs_alloc_wq);
2361 }
2362
2363 STATIC int __init
init_xfs_fs(void)2364 init_xfs_fs(void)
2365 {
2366 int error;
2367
2368 xfs_check_ondisk_structs();
2369
2370 error = xfs_dahash_test();
2371 if (error)
2372 return error;
2373
2374 printk(KERN_INFO XFS_VERSION_STRING " with "
2375 XFS_BUILD_OPTIONS " enabled\n");
2376
2377 xfs_dir_startup();
2378
2379 error = xfs_init_caches();
2380 if (error)
2381 goto out;
2382
2383 error = xfs_init_workqueues();
2384 if (error)
2385 goto out_destroy_caches;
2386
2387 error = xfs_mru_cache_init();
2388 if (error)
2389 goto out_destroy_wq;
2390
2391 error = xfs_init_procfs();
2392 if (error)
2393 goto out_mru_cache_uninit;
2394
2395 error = xfs_sysctl_register();
2396 if (error)
2397 goto out_cleanup_procfs;
2398
2399 xfs_debugfs = xfs_debugfs_mkdir("xfs", NULL);
2400
2401 xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2402 if (!xfs_kset) {
2403 error = -ENOMEM;
2404 goto out_debugfs_unregister;
2405 }
2406
2407 xfsstats.xs_kobj.kobject.kset = xfs_kset;
2408
2409 xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2410 if (!xfsstats.xs_stats) {
2411 error = -ENOMEM;
2412 goto out_kset_unregister;
2413 }
2414
2415 error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
2416 "stats");
2417 if (error)
2418 goto out_free_stats;
2419
2420 error = xchk_global_stats_setup(xfs_debugfs);
2421 if (error)
2422 goto out_remove_stats_kobj;
2423
2424 #ifdef DEBUG
2425 xfs_dbg_kobj.kobject.kset = xfs_kset;
2426 error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
2427 if (error)
2428 goto out_remove_scrub_stats;
2429 #endif
2430
2431 error = xfs_qm_init();
2432 if (error)
2433 goto out_remove_dbg_kobj;
2434
2435 error = register_filesystem(&xfs_fs_type);
2436 if (error)
2437 goto out_qm_exit;
2438 return 0;
2439
2440 out_qm_exit:
2441 xfs_qm_exit();
2442 out_remove_dbg_kobj:
2443 #ifdef DEBUG
2444 xfs_sysfs_del(&xfs_dbg_kobj);
2445 out_remove_scrub_stats:
2446 #endif
2447 xchk_global_stats_teardown();
2448 out_remove_stats_kobj:
2449 xfs_sysfs_del(&xfsstats.xs_kobj);
2450 out_free_stats:
2451 free_percpu(xfsstats.xs_stats);
2452 out_kset_unregister:
2453 kset_unregister(xfs_kset);
2454 out_debugfs_unregister:
2455 debugfs_remove(xfs_debugfs);
2456 xfs_sysctl_unregister();
2457 out_cleanup_procfs:
2458 xfs_cleanup_procfs();
2459 out_mru_cache_uninit:
2460 xfs_mru_cache_uninit();
2461 out_destroy_wq:
2462 xfs_destroy_workqueues();
2463 out_destroy_caches:
2464 xfs_destroy_caches();
2465 out:
2466 return error;
2467 }
2468
2469 STATIC void __exit
exit_xfs_fs(void)2470 exit_xfs_fs(void)
2471 {
2472 xfs_qm_exit();
2473 unregister_filesystem(&xfs_fs_type);
2474 #ifdef DEBUG
2475 xfs_sysfs_del(&xfs_dbg_kobj);
2476 #endif
2477 xchk_global_stats_teardown();
2478 xfs_sysfs_del(&xfsstats.xs_kobj);
2479 free_percpu(xfsstats.xs_stats);
2480 kset_unregister(xfs_kset);
2481 debugfs_remove(xfs_debugfs);
2482 xfs_sysctl_unregister();
2483 xfs_cleanup_procfs();
2484 xfs_mru_cache_uninit();
2485 xfs_destroy_workqueues();
2486 xfs_destroy_caches();
2487 xfs_uuid_table_free();
2488 }
2489
2490 module_init(init_xfs_fs);
2491 module_exit(exit_xfs_fs);
2492
2493 MODULE_AUTHOR("Silicon Graphics, Inc.");
2494 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2495 MODULE_LICENSE("GPL");
2496