• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 
7 #include "xfs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sb.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_btree.h"
16 #include "xfs_bmap.h"
17 #include "xfs_alloc.h"
18 #include "xfs_fsops.h"
19 #include "xfs_trans.h"
20 #include "xfs_buf_item.h"
21 #include "xfs_log.h"
22 #include "xfs_log_priv.h"
23 #include "xfs_dir2.h"
24 #include "xfs_extfree_item.h"
25 #include "xfs_mru_cache.h"
26 #include "xfs_inode_item.h"
27 #include "xfs_icache.h"
28 #include "xfs_trace.h"
29 #include "xfs_icreate_item.h"
30 #include "xfs_filestream.h"
31 #include "xfs_quota.h"
32 #include "xfs_sysfs.h"
33 #include "xfs_ondisk.h"
34 #include "xfs_rmap_item.h"
35 #include "xfs_refcount_item.h"
36 #include "xfs_bmap_item.h"
37 #include "xfs_reflink.h"
38 #include "xfs_pwork.h"
39 #include "xfs_ag.h"
40 #include "xfs_defer.h"
41 #include "xfs_attr_item.h"
42 #include "xfs_xattr.h"
43 #include "xfs_iunlink_item.h"
44 
45 #include <linux/magic.h>
46 #include <linux/fs_context.h>
47 #include <linux/fs_parser.h>
48 
49 static const struct super_operations xfs_super_operations;
50 
51 static struct kset *xfs_kset;		/* top-level xfs sysfs dir */
52 #ifdef DEBUG
53 static struct xfs_kobj xfs_dbg_kobj;	/* global debug sysfs attrs */
54 #endif
55 
56 #ifdef CONFIG_HOTPLUG_CPU
57 static LIST_HEAD(xfs_mount_list);
58 static DEFINE_SPINLOCK(xfs_mount_list_lock);
59 
xfs_mount_list_add(struct xfs_mount * mp)60 static inline void xfs_mount_list_add(struct xfs_mount *mp)
61 {
62 	spin_lock(&xfs_mount_list_lock);
63 	list_add(&mp->m_mount_list, &xfs_mount_list);
64 	spin_unlock(&xfs_mount_list_lock);
65 }
66 
xfs_mount_list_del(struct xfs_mount * mp)67 static inline void xfs_mount_list_del(struct xfs_mount *mp)
68 {
69 	spin_lock(&xfs_mount_list_lock);
70 	list_del(&mp->m_mount_list);
71 	spin_unlock(&xfs_mount_list_lock);
72 }
73 #else /* !CONFIG_HOTPLUG_CPU */
xfs_mount_list_add(struct xfs_mount * mp)74 static inline void xfs_mount_list_add(struct xfs_mount *mp) {}
xfs_mount_list_del(struct xfs_mount * mp)75 static inline void xfs_mount_list_del(struct xfs_mount *mp) {}
76 #endif
77 
78 enum xfs_dax_mode {
79 	XFS_DAX_INODE = 0,
80 	XFS_DAX_ALWAYS = 1,
81 	XFS_DAX_NEVER = 2,
82 };
83 
84 static void
xfs_mount_set_dax_mode(struct xfs_mount * mp,enum xfs_dax_mode mode)85 xfs_mount_set_dax_mode(
86 	struct xfs_mount	*mp,
87 	enum xfs_dax_mode	mode)
88 {
89 	switch (mode) {
90 	case XFS_DAX_INODE:
91 		mp->m_features &= ~(XFS_FEAT_DAX_ALWAYS | XFS_FEAT_DAX_NEVER);
92 		break;
93 	case XFS_DAX_ALWAYS:
94 		mp->m_features |= XFS_FEAT_DAX_ALWAYS;
95 		mp->m_features &= ~XFS_FEAT_DAX_NEVER;
96 		break;
97 	case XFS_DAX_NEVER:
98 		mp->m_features |= XFS_FEAT_DAX_NEVER;
99 		mp->m_features &= ~XFS_FEAT_DAX_ALWAYS;
100 		break;
101 	}
102 }
103 
104 static const struct constant_table dax_param_enums[] = {
105 	{"inode",	XFS_DAX_INODE },
106 	{"always",	XFS_DAX_ALWAYS },
107 	{"never",	XFS_DAX_NEVER },
108 	{}
109 };
110 
111 /*
112  * Table driven mount option parser.
113  */
114 enum {
115 	Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
116 	Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
117 	Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
118 	Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
119 	Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
120 	Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
121 	Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
122 	Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
123 	Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum,
124 };
125 
126 static const struct fs_parameter_spec xfs_fs_parameters[] = {
127 	fsparam_u32("logbufs",		Opt_logbufs),
128 	fsparam_string("logbsize",	Opt_logbsize),
129 	fsparam_string("logdev",	Opt_logdev),
130 	fsparam_string("rtdev",		Opt_rtdev),
131 	fsparam_flag("wsync",		Opt_wsync),
132 	fsparam_flag("noalign",		Opt_noalign),
133 	fsparam_flag("swalloc",		Opt_swalloc),
134 	fsparam_u32("sunit",		Opt_sunit),
135 	fsparam_u32("swidth",		Opt_swidth),
136 	fsparam_flag("nouuid",		Opt_nouuid),
137 	fsparam_flag("grpid",		Opt_grpid),
138 	fsparam_flag("nogrpid",		Opt_nogrpid),
139 	fsparam_flag("bsdgroups",	Opt_bsdgroups),
140 	fsparam_flag("sysvgroups",	Opt_sysvgroups),
141 	fsparam_string("allocsize",	Opt_allocsize),
142 	fsparam_flag("norecovery",	Opt_norecovery),
143 	fsparam_flag("inode64",		Opt_inode64),
144 	fsparam_flag("inode32",		Opt_inode32),
145 	fsparam_flag("ikeep",		Opt_ikeep),
146 	fsparam_flag("noikeep",		Opt_noikeep),
147 	fsparam_flag("largeio",		Opt_largeio),
148 	fsparam_flag("nolargeio",	Opt_nolargeio),
149 	fsparam_flag("attr2",		Opt_attr2),
150 	fsparam_flag("noattr2",		Opt_noattr2),
151 	fsparam_flag("filestreams",	Opt_filestreams),
152 	fsparam_flag("quota",		Opt_quota),
153 	fsparam_flag("noquota",		Opt_noquota),
154 	fsparam_flag("usrquota",	Opt_usrquota),
155 	fsparam_flag("grpquota",	Opt_grpquota),
156 	fsparam_flag("prjquota",	Opt_prjquota),
157 	fsparam_flag("uquota",		Opt_uquota),
158 	fsparam_flag("gquota",		Opt_gquota),
159 	fsparam_flag("pquota",		Opt_pquota),
160 	fsparam_flag("uqnoenforce",	Opt_uqnoenforce),
161 	fsparam_flag("gqnoenforce",	Opt_gqnoenforce),
162 	fsparam_flag("pqnoenforce",	Opt_pqnoenforce),
163 	fsparam_flag("qnoenforce",	Opt_qnoenforce),
164 	fsparam_flag("discard",		Opt_discard),
165 	fsparam_flag("nodiscard",	Opt_nodiscard),
166 	fsparam_flag("dax",		Opt_dax),
167 	fsparam_enum("dax",		Opt_dax_enum, dax_param_enums),
168 	{}
169 };
170 
171 struct proc_xfs_info {
172 	uint64_t	flag;
173 	char		*str;
174 };
175 
176 static int
xfs_fs_show_options(struct seq_file * m,struct dentry * root)177 xfs_fs_show_options(
178 	struct seq_file		*m,
179 	struct dentry		*root)
180 {
181 	static struct proc_xfs_info xfs_info_set[] = {
182 		/* the few simple ones we can get from the mount struct */
183 		{ XFS_FEAT_IKEEP,		",ikeep" },
184 		{ XFS_FEAT_WSYNC,		",wsync" },
185 		{ XFS_FEAT_NOALIGN,		",noalign" },
186 		{ XFS_FEAT_SWALLOC,		",swalloc" },
187 		{ XFS_FEAT_NOUUID,		",nouuid" },
188 		{ XFS_FEAT_NORECOVERY,		",norecovery" },
189 		{ XFS_FEAT_ATTR2,		",attr2" },
190 		{ XFS_FEAT_FILESTREAMS,		",filestreams" },
191 		{ XFS_FEAT_GRPID,		",grpid" },
192 		{ XFS_FEAT_DISCARD,		",discard" },
193 		{ XFS_FEAT_LARGE_IOSIZE,	",largeio" },
194 		{ XFS_FEAT_DAX_ALWAYS,		",dax=always" },
195 		{ XFS_FEAT_DAX_NEVER,		",dax=never" },
196 		{ 0, NULL }
197 	};
198 	struct xfs_mount	*mp = XFS_M(root->d_sb);
199 	struct proc_xfs_info	*xfs_infop;
200 
201 	for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
202 		if (mp->m_features & xfs_infop->flag)
203 			seq_puts(m, xfs_infop->str);
204 	}
205 
206 	seq_printf(m, ",inode%d", xfs_has_small_inums(mp) ? 32 : 64);
207 
208 	if (xfs_has_allocsize(mp))
209 		seq_printf(m, ",allocsize=%dk",
210 			   (1 << mp->m_allocsize_log) >> 10);
211 
212 	if (mp->m_logbufs > 0)
213 		seq_printf(m, ",logbufs=%d", mp->m_logbufs);
214 	if (mp->m_logbsize > 0)
215 		seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
216 
217 	if (mp->m_logname)
218 		seq_show_option(m, "logdev", mp->m_logname);
219 	if (mp->m_rtname)
220 		seq_show_option(m, "rtdev", mp->m_rtname);
221 
222 	if (mp->m_dalign > 0)
223 		seq_printf(m, ",sunit=%d",
224 				(int)XFS_FSB_TO_BB(mp, mp->m_dalign));
225 	if (mp->m_swidth > 0)
226 		seq_printf(m, ",swidth=%d",
227 				(int)XFS_FSB_TO_BB(mp, mp->m_swidth));
228 
229 	if (mp->m_qflags & XFS_UQUOTA_ENFD)
230 		seq_puts(m, ",usrquota");
231 	else if (mp->m_qflags & XFS_UQUOTA_ACCT)
232 		seq_puts(m, ",uqnoenforce");
233 
234 	if (mp->m_qflags & XFS_PQUOTA_ENFD)
235 		seq_puts(m, ",prjquota");
236 	else if (mp->m_qflags & XFS_PQUOTA_ACCT)
237 		seq_puts(m, ",pqnoenforce");
238 
239 	if (mp->m_qflags & XFS_GQUOTA_ENFD)
240 		seq_puts(m, ",grpquota");
241 	else if (mp->m_qflags & XFS_GQUOTA_ACCT)
242 		seq_puts(m, ",gqnoenforce");
243 
244 	if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
245 		seq_puts(m, ",noquota");
246 
247 	return 0;
248 }
249 
250 /*
251  * Set parameters for inode allocation heuristics, taking into account
252  * filesystem size and inode32/inode64 mount options; i.e. specifically
253  * whether or not XFS_FEAT_SMALL_INUMS is set.
254  *
255  * Inode allocation patterns are altered only if inode32 is requested
256  * (XFS_FEAT_SMALL_INUMS), and the filesystem is sufficiently large.
257  * If altered, XFS_OPSTATE_INODE32 is set as well.
258  *
259  * An agcount independent of that in the mount structure is provided
260  * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
261  * to the potentially higher ag count.
262  *
263  * Returns the maximum AG index which may contain inodes.
264  */
265 xfs_agnumber_t
xfs_set_inode_alloc(struct xfs_mount * mp,xfs_agnumber_t agcount)266 xfs_set_inode_alloc(
267 	struct xfs_mount *mp,
268 	xfs_agnumber_t	agcount)
269 {
270 	xfs_agnumber_t	index;
271 	xfs_agnumber_t	maxagi = 0;
272 	xfs_sb_t	*sbp = &mp->m_sb;
273 	xfs_agnumber_t	max_metadata;
274 	xfs_agino_t	agino;
275 	xfs_ino_t	ino;
276 
277 	/*
278 	 * Calculate how much should be reserved for inodes to meet
279 	 * the max inode percentage.  Used only for inode32.
280 	 */
281 	if (M_IGEO(mp)->maxicount) {
282 		uint64_t	icount;
283 
284 		icount = sbp->sb_dblocks * sbp->sb_imax_pct;
285 		do_div(icount, 100);
286 		icount += sbp->sb_agblocks - 1;
287 		do_div(icount, sbp->sb_agblocks);
288 		max_metadata = icount;
289 	} else {
290 		max_metadata = agcount;
291 	}
292 
293 	/* Get the last possible inode in the filesystem */
294 	agino =	XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
295 	ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
296 
297 	/*
298 	 * If user asked for no more than 32-bit inodes, and the fs is
299 	 * sufficiently large, set XFS_OPSTATE_INODE32 if we must alter
300 	 * the allocator to accommodate the request.
301 	 */
302 	if (xfs_has_small_inums(mp) && ino > XFS_MAXINUMBER_32)
303 		set_bit(XFS_OPSTATE_INODE32, &mp->m_opstate);
304 	else
305 		clear_bit(XFS_OPSTATE_INODE32, &mp->m_opstate);
306 
307 	for (index = 0; index < agcount; index++) {
308 		struct xfs_perag	*pag;
309 
310 		ino = XFS_AGINO_TO_INO(mp, index, agino);
311 
312 		pag = xfs_perag_get(mp, index);
313 
314 		if (xfs_is_inode32(mp)) {
315 			if (ino > XFS_MAXINUMBER_32) {
316 				pag->pagi_inodeok = 0;
317 				pag->pagf_metadata = 0;
318 			} else {
319 				pag->pagi_inodeok = 1;
320 				maxagi++;
321 				if (index < max_metadata)
322 					pag->pagf_metadata = 1;
323 				else
324 					pag->pagf_metadata = 0;
325 			}
326 		} else {
327 			pag->pagi_inodeok = 1;
328 			pag->pagf_metadata = 0;
329 		}
330 
331 		xfs_perag_put(pag);
332 	}
333 
334 	return xfs_is_inode32(mp) ? maxagi : agcount;
335 }
336 
337 static int
xfs_setup_dax_always(struct xfs_mount * mp)338 xfs_setup_dax_always(
339 	struct xfs_mount	*mp)
340 {
341 	if (!mp->m_ddev_targp->bt_daxdev &&
342 	    (!mp->m_rtdev_targp || !mp->m_rtdev_targp->bt_daxdev)) {
343 		xfs_alert(mp,
344 			"DAX unsupported by block device. Turning off DAX.");
345 		goto disable_dax;
346 	}
347 
348 	if (mp->m_super->s_blocksize != PAGE_SIZE) {
349 		xfs_alert(mp,
350 			"DAX not supported for blocksize. Turning off DAX.");
351 		goto disable_dax;
352 	}
353 
354 	if (xfs_has_reflink(mp) &&
355 	    bdev_is_partition(mp->m_ddev_targp->bt_bdev)) {
356 		xfs_alert(mp,
357 			"DAX and reflink cannot work with multi-partitions!");
358 		return -EINVAL;
359 	}
360 
361 	xfs_warn(mp, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
362 	return 0;
363 
364 disable_dax:
365 	xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER);
366 	return 0;
367 }
368 
369 STATIC int
xfs_blkdev_get(xfs_mount_t * mp,const char * name,struct block_device ** bdevp)370 xfs_blkdev_get(
371 	xfs_mount_t		*mp,
372 	const char		*name,
373 	struct block_device	**bdevp)
374 {
375 	int			error = 0;
376 
377 	*bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
378 				    mp);
379 	if (IS_ERR(*bdevp)) {
380 		error = PTR_ERR(*bdevp);
381 		xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
382 	}
383 
384 	return error;
385 }
386 
387 STATIC void
xfs_blkdev_put(struct block_device * bdev)388 xfs_blkdev_put(
389 	struct block_device	*bdev)
390 {
391 	if (bdev)
392 		blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
393 }
394 
395 STATIC void
xfs_close_devices(struct xfs_mount * mp)396 xfs_close_devices(
397 	struct xfs_mount	*mp)
398 {
399 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
400 		struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
401 
402 		xfs_free_buftarg(mp->m_logdev_targp);
403 		xfs_blkdev_put(logdev);
404 	}
405 	if (mp->m_rtdev_targp) {
406 		struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
407 
408 		xfs_free_buftarg(mp->m_rtdev_targp);
409 		xfs_blkdev_put(rtdev);
410 	}
411 	xfs_free_buftarg(mp->m_ddev_targp);
412 }
413 
414 /*
415  * The file system configurations are:
416  *	(1) device (partition) with data and internal log
417  *	(2) logical volume with data and log subvolumes.
418  *	(3) logical volume with data, log, and realtime subvolumes.
419  *
420  * We only have to handle opening the log and realtime volumes here if
421  * they are present.  The data subvolume has already been opened by
422  * get_sb_bdev() and is stored in sb->s_bdev.
423  */
424 STATIC int
xfs_open_devices(struct xfs_mount * mp)425 xfs_open_devices(
426 	struct xfs_mount	*mp)
427 {
428 	struct block_device	*ddev = mp->m_super->s_bdev;
429 	struct block_device	*logdev = NULL, *rtdev = NULL;
430 	int			error;
431 
432 	/*
433 	 * Open real time and log devices - order is important.
434 	 */
435 	if (mp->m_logname) {
436 		error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
437 		if (error)
438 			return error;
439 	}
440 
441 	if (mp->m_rtname) {
442 		error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
443 		if (error)
444 			goto out_close_logdev;
445 
446 		if (rtdev == ddev || rtdev == logdev) {
447 			xfs_warn(mp,
448 	"Cannot mount filesystem with identical rtdev and ddev/logdev.");
449 			error = -EINVAL;
450 			goto out_close_rtdev;
451 		}
452 	}
453 
454 	/*
455 	 * Setup xfs_mount buffer target pointers
456 	 */
457 	error = -ENOMEM;
458 	mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev);
459 	if (!mp->m_ddev_targp)
460 		goto out_close_rtdev;
461 
462 	if (rtdev) {
463 		mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev);
464 		if (!mp->m_rtdev_targp)
465 			goto out_free_ddev_targ;
466 	}
467 
468 	if (logdev && logdev != ddev) {
469 		mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev);
470 		if (!mp->m_logdev_targp)
471 			goto out_free_rtdev_targ;
472 	} else {
473 		mp->m_logdev_targp = mp->m_ddev_targp;
474 	}
475 
476 	return 0;
477 
478  out_free_rtdev_targ:
479 	if (mp->m_rtdev_targp)
480 		xfs_free_buftarg(mp->m_rtdev_targp);
481  out_free_ddev_targ:
482 	xfs_free_buftarg(mp->m_ddev_targp);
483  out_close_rtdev:
484 	xfs_blkdev_put(rtdev);
485  out_close_logdev:
486 	if (logdev && logdev != ddev)
487 		xfs_blkdev_put(logdev);
488 	return error;
489 }
490 
491 /*
492  * Setup xfs_mount buffer target pointers based on superblock
493  */
494 STATIC int
xfs_setup_devices(struct xfs_mount * mp)495 xfs_setup_devices(
496 	struct xfs_mount	*mp)
497 {
498 	int			error;
499 
500 	error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
501 	if (error)
502 		return error;
503 
504 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
505 		unsigned int	log_sector_size = BBSIZE;
506 
507 		if (xfs_has_sector(mp))
508 			log_sector_size = mp->m_sb.sb_logsectsize;
509 		error = xfs_setsize_buftarg(mp->m_logdev_targp,
510 					    log_sector_size);
511 		if (error)
512 			return error;
513 	}
514 	if (mp->m_rtdev_targp) {
515 		error = xfs_setsize_buftarg(mp->m_rtdev_targp,
516 					    mp->m_sb.sb_sectsize);
517 		if (error)
518 			return error;
519 	}
520 
521 	return 0;
522 }
523 
524 STATIC int
xfs_init_mount_workqueues(struct xfs_mount * mp)525 xfs_init_mount_workqueues(
526 	struct xfs_mount	*mp)
527 {
528 	mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
529 			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
530 			1, mp->m_super->s_id);
531 	if (!mp->m_buf_workqueue)
532 		goto out;
533 
534 	mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
535 			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
536 			0, mp->m_super->s_id);
537 	if (!mp->m_unwritten_workqueue)
538 		goto out_destroy_buf;
539 
540 	mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
541 			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
542 			0, mp->m_super->s_id);
543 	if (!mp->m_reclaim_workqueue)
544 		goto out_destroy_unwritten;
545 
546 	mp->m_blockgc_wq = alloc_workqueue("xfs-blockgc/%s",
547 			XFS_WQFLAGS(WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM),
548 			0, mp->m_super->s_id);
549 	if (!mp->m_blockgc_wq)
550 		goto out_destroy_reclaim;
551 
552 	mp->m_inodegc_wq = alloc_workqueue("xfs-inodegc/%s",
553 			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
554 			1, mp->m_super->s_id);
555 	if (!mp->m_inodegc_wq)
556 		goto out_destroy_blockgc;
557 
558 	mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s",
559 			XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id);
560 	if (!mp->m_sync_workqueue)
561 		goto out_destroy_inodegc;
562 
563 	return 0;
564 
565 out_destroy_inodegc:
566 	destroy_workqueue(mp->m_inodegc_wq);
567 out_destroy_blockgc:
568 	destroy_workqueue(mp->m_blockgc_wq);
569 out_destroy_reclaim:
570 	destroy_workqueue(mp->m_reclaim_workqueue);
571 out_destroy_unwritten:
572 	destroy_workqueue(mp->m_unwritten_workqueue);
573 out_destroy_buf:
574 	destroy_workqueue(mp->m_buf_workqueue);
575 out:
576 	return -ENOMEM;
577 }
578 
579 STATIC void
xfs_destroy_mount_workqueues(struct xfs_mount * mp)580 xfs_destroy_mount_workqueues(
581 	struct xfs_mount	*mp)
582 {
583 	destroy_workqueue(mp->m_sync_workqueue);
584 	destroy_workqueue(mp->m_blockgc_wq);
585 	destroy_workqueue(mp->m_inodegc_wq);
586 	destroy_workqueue(mp->m_reclaim_workqueue);
587 	destroy_workqueue(mp->m_unwritten_workqueue);
588 	destroy_workqueue(mp->m_buf_workqueue);
589 }
590 
591 static void
xfs_flush_inodes_worker(struct work_struct * work)592 xfs_flush_inodes_worker(
593 	struct work_struct	*work)
594 {
595 	struct xfs_mount	*mp = container_of(work, struct xfs_mount,
596 						   m_flush_inodes_work);
597 	struct super_block	*sb = mp->m_super;
598 
599 	if (down_read_trylock(&sb->s_umount)) {
600 		sync_inodes_sb(sb);
601 		up_read(&sb->s_umount);
602 	}
603 }
604 
605 /*
606  * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
607  * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
608  * for IO to complete so that we effectively throttle multiple callers to the
609  * rate at which IO is completing.
610  */
611 void
xfs_flush_inodes(struct xfs_mount * mp)612 xfs_flush_inodes(
613 	struct xfs_mount	*mp)
614 {
615 	/*
616 	 * If flush_work() returns true then that means we waited for a flush
617 	 * which was already in progress.  Don't bother running another scan.
618 	 */
619 	if (flush_work(&mp->m_flush_inodes_work))
620 		return;
621 
622 	queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work);
623 	flush_work(&mp->m_flush_inodes_work);
624 }
625 
626 /* Catch misguided souls that try to use this interface on XFS */
627 STATIC struct inode *
xfs_fs_alloc_inode(struct super_block * sb)628 xfs_fs_alloc_inode(
629 	struct super_block	*sb)
630 {
631 	BUG();
632 	return NULL;
633 }
634 
635 /*
636  * Now that the generic code is guaranteed not to be accessing
637  * the linux inode, we can inactivate and reclaim the inode.
638  */
639 STATIC void
xfs_fs_destroy_inode(struct inode * inode)640 xfs_fs_destroy_inode(
641 	struct inode		*inode)
642 {
643 	struct xfs_inode	*ip = XFS_I(inode);
644 
645 	trace_xfs_destroy_inode(ip);
646 
647 	ASSERT(!rwsem_is_locked(&inode->i_rwsem));
648 	XFS_STATS_INC(ip->i_mount, vn_rele);
649 	XFS_STATS_INC(ip->i_mount, vn_remove);
650 	xfs_inode_mark_reclaimable(ip);
651 }
652 
653 static void
xfs_fs_dirty_inode(struct inode * inode,int flags)654 xfs_fs_dirty_inode(
655 	struct inode			*inode,
656 	int				flags)
657 {
658 	struct xfs_inode		*ip = XFS_I(inode);
659 	struct xfs_mount		*mp = ip->i_mount;
660 	struct xfs_trans		*tp;
661 
662 	if (!(inode->i_sb->s_flags & SB_LAZYTIME))
663 		return;
664 
665 	/*
666 	 * Only do the timestamp update if the inode is dirty (I_DIRTY_SYNC)
667 	 * and has dirty timestamp (I_DIRTY_TIME). I_DIRTY_TIME can be passed
668 	 * in flags possibly together with I_DIRTY_SYNC.
669 	 */
670 	if ((flags & ~I_DIRTY_TIME) != I_DIRTY_SYNC || !(flags & I_DIRTY_TIME))
671 		return;
672 
673 	if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
674 		return;
675 	xfs_ilock(ip, XFS_ILOCK_EXCL);
676 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
677 	xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
678 	xfs_trans_commit(tp);
679 }
680 
681 /*
682  * Slab object creation initialisation for the XFS inode.
683  * This covers only the idempotent fields in the XFS inode;
684  * all other fields need to be initialised on allocation
685  * from the slab. This avoids the need to repeatedly initialise
686  * fields in the xfs inode that left in the initialise state
687  * when freeing the inode.
688  */
689 STATIC void
xfs_fs_inode_init_once(void * inode)690 xfs_fs_inode_init_once(
691 	void			*inode)
692 {
693 	struct xfs_inode	*ip = inode;
694 
695 	memset(ip, 0, sizeof(struct xfs_inode));
696 
697 	/* vfs inode */
698 	inode_init_once(VFS_I(ip));
699 
700 	/* xfs inode */
701 	atomic_set(&ip->i_pincount, 0);
702 	spin_lock_init(&ip->i_flags_lock);
703 
704 	mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
705 		     "xfsino", ip->i_ino);
706 }
707 
708 /*
709  * We do an unlocked check for XFS_IDONTCACHE here because we are already
710  * serialised against cache hits here via the inode->i_lock and igrab() in
711  * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
712  * racing with us, and it avoids needing to grab a spinlock here for every inode
713  * we drop the final reference on.
714  */
715 STATIC int
xfs_fs_drop_inode(struct inode * inode)716 xfs_fs_drop_inode(
717 	struct inode		*inode)
718 {
719 	struct xfs_inode	*ip = XFS_I(inode);
720 
721 	/*
722 	 * If this unlinked inode is in the middle of recovery, don't
723 	 * drop the inode just yet; log recovery will take care of
724 	 * that.  See the comment for this inode flag.
725 	 */
726 	if (ip->i_flags & XFS_IRECOVERY) {
727 		ASSERT(xlog_recovery_needed(ip->i_mount->m_log));
728 		return 0;
729 	}
730 
731 	return generic_drop_inode(inode);
732 }
733 
734 static void
xfs_mount_free(struct xfs_mount * mp)735 xfs_mount_free(
736 	struct xfs_mount	*mp)
737 {
738 	kfree(mp->m_rtname);
739 	kfree(mp->m_logname);
740 	kmem_free(mp);
741 }
742 
743 STATIC int
xfs_fs_sync_fs(struct super_block * sb,int wait)744 xfs_fs_sync_fs(
745 	struct super_block	*sb,
746 	int			wait)
747 {
748 	struct xfs_mount	*mp = XFS_M(sb);
749 	int			error;
750 
751 	trace_xfs_fs_sync_fs(mp, __return_address);
752 
753 	/*
754 	 * Doing anything during the async pass would be counterproductive.
755 	 */
756 	if (!wait)
757 		return 0;
758 
759 	error = xfs_log_force(mp, XFS_LOG_SYNC);
760 	if (error)
761 		return error;
762 
763 	if (laptop_mode) {
764 		/*
765 		 * The disk must be active because we're syncing.
766 		 * We schedule log work now (now that the disk is
767 		 * active) instead of later (when it might not be).
768 		 */
769 		flush_delayed_work(&mp->m_log->l_work);
770 	}
771 
772 	/*
773 	 * If we are called with page faults frozen out, it means we are about
774 	 * to freeze the transaction subsystem. Take the opportunity to shut
775 	 * down inodegc because once SB_FREEZE_FS is set it's too late to
776 	 * prevent inactivation races with freeze. The fs doesn't get called
777 	 * again by the freezing process until after SB_FREEZE_FS has been set,
778 	 * so it's now or never.  Same logic applies to speculative allocation
779 	 * garbage collection.
780 	 *
781 	 * We don't care if this is a normal syncfs call that does this or
782 	 * freeze that does this - we can run this multiple times without issue
783 	 * and we won't race with a restart because a restart can only occur
784 	 * when the state is either SB_FREEZE_FS or SB_FREEZE_COMPLETE.
785 	 */
786 	if (sb->s_writers.frozen == SB_FREEZE_PAGEFAULT) {
787 		xfs_inodegc_stop(mp);
788 		xfs_blockgc_stop(mp);
789 	}
790 
791 	return 0;
792 }
793 
794 STATIC int
xfs_fs_statfs(struct dentry * dentry,struct kstatfs * statp)795 xfs_fs_statfs(
796 	struct dentry		*dentry,
797 	struct kstatfs		*statp)
798 {
799 	struct xfs_mount	*mp = XFS_M(dentry->d_sb);
800 	xfs_sb_t		*sbp = &mp->m_sb;
801 	struct xfs_inode	*ip = XFS_I(d_inode(dentry));
802 	uint64_t		fakeinos, id;
803 	uint64_t		icount;
804 	uint64_t		ifree;
805 	uint64_t		fdblocks;
806 	xfs_extlen_t		lsize;
807 	int64_t			ffree;
808 
809 	/*
810 	 * Expedite background inodegc but don't wait. We do not want to block
811 	 * here waiting hours for a billion extent file to be truncated.
812 	 */
813 	xfs_inodegc_push(mp);
814 
815 	statp->f_type = XFS_SUPER_MAGIC;
816 	statp->f_namelen = MAXNAMELEN - 1;
817 
818 	id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
819 	statp->f_fsid = u64_to_fsid(id);
820 
821 	icount = percpu_counter_sum(&mp->m_icount);
822 	ifree = percpu_counter_sum(&mp->m_ifree);
823 	fdblocks = percpu_counter_sum(&mp->m_fdblocks);
824 
825 	spin_lock(&mp->m_sb_lock);
826 	statp->f_bsize = sbp->sb_blocksize;
827 	lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
828 	statp->f_blocks = sbp->sb_dblocks - lsize;
829 	spin_unlock(&mp->m_sb_lock);
830 
831 	/* make sure statp->f_bfree does not underflow */
832 	statp->f_bfree = max_t(int64_t, 0,
833 				fdblocks - xfs_fdblocks_unavailable(mp));
834 	statp->f_bavail = statp->f_bfree;
835 
836 	fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
837 	statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
838 	if (M_IGEO(mp)->maxicount)
839 		statp->f_files = min_t(typeof(statp->f_files),
840 					statp->f_files,
841 					M_IGEO(mp)->maxicount);
842 
843 	/* If sb_icount overshot maxicount, report actual allocation */
844 	statp->f_files = max_t(typeof(statp->f_files),
845 					statp->f_files,
846 					sbp->sb_icount);
847 
848 	/* make sure statp->f_ffree does not underflow */
849 	ffree = statp->f_files - (icount - ifree);
850 	statp->f_ffree = max_t(int64_t, ffree, 0);
851 
852 
853 	if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
854 	    ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
855 			      (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
856 		xfs_qm_statvfs(ip, statp);
857 
858 	if (XFS_IS_REALTIME_MOUNT(mp) &&
859 	    (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
860 		s64	freertx;
861 
862 		statp->f_blocks = sbp->sb_rblocks;
863 		freertx = percpu_counter_sum_positive(&mp->m_frextents);
864 		statp->f_bavail = statp->f_bfree = freertx * sbp->sb_rextsize;
865 	}
866 
867 	return 0;
868 }
869 
870 STATIC void
xfs_save_resvblks(struct xfs_mount * mp)871 xfs_save_resvblks(struct xfs_mount *mp)
872 {
873 	uint64_t resblks = 0;
874 
875 	mp->m_resblks_save = mp->m_resblks;
876 	xfs_reserve_blocks(mp, &resblks, NULL);
877 }
878 
879 STATIC void
xfs_restore_resvblks(struct xfs_mount * mp)880 xfs_restore_resvblks(struct xfs_mount *mp)
881 {
882 	uint64_t resblks;
883 
884 	if (mp->m_resblks_save) {
885 		resblks = mp->m_resblks_save;
886 		mp->m_resblks_save = 0;
887 	} else
888 		resblks = xfs_default_resblks(mp);
889 
890 	xfs_reserve_blocks(mp, &resblks, NULL);
891 }
892 
893 /*
894  * Second stage of a freeze. The data is already frozen so we only
895  * need to take care of the metadata. Once that's done sync the superblock
896  * to the log to dirty it in case of a crash while frozen. This ensures that we
897  * will recover the unlinked inode lists on the next mount.
898  */
899 STATIC int
xfs_fs_freeze(struct super_block * sb)900 xfs_fs_freeze(
901 	struct super_block	*sb)
902 {
903 	struct xfs_mount	*mp = XFS_M(sb);
904 	unsigned int		flags;
905 	int			ret;
906 
907 	/*
908 	 * The filesystem is now frozen far enough that memory reclaim
909 	 * cannot safely operate on the filesystem. Hence we need to
910 	 * set a GFP_NOFS context here to avoid recursion deadlocks.
911 	 */
912 	flags = memalloc_nofs_save();
913 	xfs_save_resvblks(mp);
914 	ret = xfs_log_quiesce(mp);
915 	memalloc_nofs_restore(flags);
916 
917 	/*
918 	 * For read-write filesystems, we need to restart the inodegc on error
919 	 * because we stopped it at SB_FREEZE_PAGEFAULT level and a thaw is not
920 	 * going to be run to restart it now.  We are at SB_FREEZE_FS level
921 	 * here, so we can restart safely without racing with a stop in
922 	 * xfs_fs_sync_fs().
923 	 */
924 	if (ret && !xfs_is_readonly(mp)) {
925 		xfs_blockgc_start(mp);
926 		xfs_inodegc_start(mp);
927 	}
928 
929 	return ret;
930 }
931 
932 STATIC int
xfs_fs_unfreeze(struct super_block * sb)933 xfs_fs_unfreeze(
934 	struct super_block	*sb)
935 {
936 	struct xfs_mount	*mp = XFS_M(sb);
937 
938 	xfs_restore_resvblks(mp);
939 	xfs_log_work_queue(mp);
940 
941 	/*
942 	 * Don't reactivate the inodegc worker on a readonly filesystem because
943 	 * inodes are sent directly to reclaim.  Don't reactivate the blockgc
944 	 * worker because there are no speculative preallocations on a readonly
945 	 * filesystem.
946 	 */
947 	if (!xfs_is_readonly(mp)) {
948 		xfs_blockgc_start(mp);
949 		xfs_inodegc_start(mp);
950 	}
951 
952 	return 0;
953 }
954 
955 /*
956  * This function fills in xfs_mount_t fields based on mount args.
957  * Note: the superblock _has_ now been read in.
958  */
959 STATIC int
xfs_finish_flags(struct xfs_mount * mp)960 xfs_finish_flags(
961 	struct xfs_mount	*mp)
962 {
963 	/* Fail a mount where the logbuf is smaller than the log stripe */
964 	if (xfs_has_logv2(mp)) {
965 		if (mp->m_logbsize <= 0 &&
966 		    mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
967 			mp->m_logbsize = mp->m_sb.sb_logsunit;
968 		} else if (mp->m_logbsize > 0 &&
969 			   mp->m_logbsize < mp->m_sb.sb_logsunit) {
970 			xfs_warn(mp,
971 		"logbuf size must be greater than or equal to log stripe size");
972 			return -EINVAL;
973 		}
974 	} else {
975 		/* Fail a mount if the logbuf is larger than 32K */
976 		if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
977 			xfs_warn(mp,
978 		"logbuf size for version 1 logs must be 16K or 32K");
979 			return -EINVAL;
980 		}
981 	}
982 
983 	/*
984 	 * V5 filesystems always use attr2 format for attributes.
985 	 */
986 	if (xfs_has_crc(mp) && xfs_has_noattr2(mp)) {
987 		xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
988 			     "attr2 is always enabled for V5 filesystems.");
989 		return -EINVAL;
990 	}
991 
992 	/*
993 	 * prohibit r/w mounts of read-only filesystems
994 	 */
995 	if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !xfs_is_readonly(mp)) {
996 		xfs_warn(mp,
997 			"cannot mount a read-only filesystem as read-write");
998 		return -EROFS;
999 	}
1000 
1001 	if ((mp->m_qflags & XFS_GQUOTA_ACCT) &&
1002 	    (mp->m_qflags & XFS_PQUOTA_ACCT) &&
1003 	    !xfs_has_pquotino(mp)) {
1004 		xfs_warn(mp,
1005 		  "Super block does not support project and group quota together");
1006 		return -EINVAL;
1007 	}
1008 
1009 	return 0;
1010 }
1011 
1012 static int
xfs_init_percpu_counters(struct xfs_mount * mp)1013 xfs_init_percpu_counters(
1014 	struct xfs_mount	*mp)
1015 {
1016 	int		error;
1017 
1018 	error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
1019 	if (error)
1020 		return -ENOMEM;
1021 
1022 	error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
1023 	if (error)
1024 		goto free_icount;
1025 
1026 	error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
1027 	if (error)
1028 		goto free_ifree;
1029 
1030 	error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
1031 	if (error)
1032 		goto free_fdblocks;
1033 
1034 	error = percpu_counter_init(&mp->m_frextents, 0, GFP_KERNEL);
1035 	if (error)
1036 		goto free_delalloc;
1037 
1038 	return 0;
1039 
1040 free_delalloc:
1041 	percpu_counter_destroy(&mp->m_delalloc_blks);
1042 free_fdblocks:
1043 	percpu_counter_destroy(&mp->m_fdblocks);
1044 free_ifree:
1045 	percpu_counter_destroy(&mp->m_ifree);
1046 free_icount:
1047 	percpu_counter_destroy(&mp->m_icount);
1048 	return -ENOMEM;
1049 }
1050 
1051 void
xfs_reinit_percpu_counters(struct xfs_mount * mp)1052 xfs_reinit_percpu_counters(
1053 	struct xfs_mount	*mp)
1054 {
1055 	percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1056 	percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1057 	percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
1058 	percpu_counter_set(&mp->m_frextents, mp->m_sb.sb_frextents);
1059 }
1060 
1061 static void
xfs_destroy_percpu_counters(struct xfs_mount * mp)1062 xfs_destroy_percpu_counters(
1063 	struct xfs_mount	*mp)
1064 {
1065 	percpu_counter_destroy(&mp->m_icount);
1066 	percpu_counter_destroy(&mp->m_ifree);
1067 	percpu_counter_destroy(&mp->m_fdblocks);
1068 	ASSERT(xfs_is_shutdown(mp) ||
1069 	       percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1070 	percpu_counter_destroy(&mp->m_delalloc_blks);
1071 	percpu_counter_destroy(&mp->m_frextents);
1072 }
1073 
1074 static int
xfs_inodegc_init_percpu(struct xfs_mount * mp)1075 xfs_inodegc_init_percpu(
1076 	struct xfs_mount	*mp)
1077 {
1078 	struct xfs_inodegc	*gc;
1079 	int			cpu;
1080 
1081 	mp->m_inodegc = alloc_percpu(struct xfs_inodegc);
1082 	if (!mp->m_inodegc)
1083 		return -ENOMEM;
1084 
1085 	for_each_possible_cpu(cpu) {
1086 		gc = per_cpu_ptr(mp->m_inodegc, cpu);
1087 #if defined(DEBUG) || defined(XFS_WARN)
1088 		gc->cpu = cpu;
1089 #endif
1090 		init_llist_head(&gc->list);
1091 		gc->items = 0;
1092 		INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker);
1093 	}
1094 	return 0;
1095 }
1096 
1097 static void
xfs_inodegc_free_percpu(struct xfs_mount * mp)1098 xfs_inodegc_free_percpu(
1099 	struct xfs_mount	*mp)
1100 {
1101 	if (!mp->m_inodegc)
1102 		return;
1103 	free_percpu(mp->m_inodegc);
1104 }
1105 
1106 static void
xfs_fs_put_super(struct super_block * sb)1107 xfs_fs_put_super(
1108 	struct super_block	*sb)
1109 {
1110 	struct xfs_mount	*mp = XFS_M(sb);
1111 
1112 	/* if ->fill_super failed, we have no mount to tear down */
1113 	if (!sb->s_fs_info)
1114 		return;
1115 
1116 	xfs_notice(mp, "Unmounting Filesystem");
1117 	xfs_filestream_unmount(mp);
1118 	xfs_unmountfs(mp);
1119 
1120 	xfs_freesb(mp);
1121 	free_percpu(mp->m_stats.xs_stats);
1122 	xfs_mount_list_del(mp);
1123 	xfs_inodegc_free_percpu(mp);
1124 	xfs_destroy_percpu_counters(mp);
1125 	xfs_destroy_mount_workqueues(mp);
1126 	xfs_close_devices(mp);
1127 
1128 	sb->s_fs_info = NULL;
1129 	xfs_mount_free(mp);
1130 }
1131 
1132 static long
xfs_fs_nr_cached_objects(struct super_block * sb,struct shrink_control * sc)1133 xfs_fs_nr_cached_objects(
1134 	struct super_block	*sb,
1135 	struct shrink_control	*sc)
1136 {
1137 	/* Paranoia: catch incorrect calls during mount setup or teardown */
1138 	if (WARN_ON_ONCE(!sb->s_fs_info))
1139 		return 0;
1140 	return xfs_reclaim_inodes_count(XFS_M(sb));
1141 }
1142 
1143 static long
xfs_fs_free_cached_objects(struct super_block * sb,struct shrink_control * sc)1144 xfs_fs_free_cached_objects(
1145 	struct super_block	*sb,
1146 	struct shrink_control	*sc)
1147 {
1148 	return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1149 }
1150 
1151 static const struct super_operations xfs_super_operations = {
1152 	.alloc_inode		= xfs_fs_alloc_inode,
1153 	.destroy_inode		= xfs_fs_destroy_inode,
1154 	.dirty_inode		= xfs_fs_dirty_inode,
1155 	.drop_inode		= xfs_fs_drop_inode,
1156 	.put_super		= xfs_fs_put_super,
1157 	.sync_fs		= xfs_fs_sync_fs,
1158 	.freeze_fs		= xfs_fs_freeze,
1159 	.unfreeze_fs		= xfs_fs_unfreeze,
1160 	.statfs			= xfs_fs_statfs,
1161 	.show_options		= xfs_fs_show_options,
1162 	.nr_cached_objects	= xfs_fs_nr_cached_objects,
1163 	.free_cached_objects	= xfs_fs_free_cached_objects,
1164 };
1165 
1166 static int
suffix_kstrtoint(const char * s,unsigned int base,int * res)1167 suffix_kstrtoint(
1168 	const char	*s,
1169 	unsigned int	base,
1170 	int		*res)
1171 {
1172 	int		last, shift_left_factor = 0, _res;
1173 	char		*value;
1174 	int		ret = 0;
1175 
1176 	value = kstrdup(s, GFP_KERNEL);
1177 	if (!value)
1178 		return -ENOMEM;
1179 
1180 	last = strlen(value) - 1;
1181 	if (value[last] == 'K' || value[last] == 'k') {
1182 		shift_left_factor = 10;
1183 		value[last] = '\0';
1184 	}
1185 	if (value[last] == 'M' || value[last] == 'm') {
1186 		shift_left_factor = 20;
1187 		value[last] = '\0';
1188 	}
1189 	if (value[last] == 'G' || value[last] == 'g') {
1190 		shift_left_factor = 30;
1191 		value[last] = '\0';
1192 	}
1193 
1194 	if (kstrtoint(value, base, &_res))
1195 		ret = -EINVAL;
1196 	kfree(value);
1197 	*res = _res << shift_left_factor;
1198 	return ret;
1199 }
1200 
1201 static inline void
xfs_fs_warn_deprecated(struct fs_context * fc,struct fs_parameter * param,uint64_t flag,bool value)1202 xfs_fs_warn_deprecated(
1203 	struct fs_context	*fc,
1204 	struct fs_parameter	*param,
1205 	uint64_t		flag,
1206 	bool			value)
1207 {
1208 	/* Don't print the warning if reconfiguring and current mount point
1209 	 * already had the flag set
1210 	 */
1211 	if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) &&
1212             !!(XFS_M(fc->root->d_sb)->m_features & flag) == value)
1213 		return;
1214 	xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key);
1215 }
1216 
1217 /*
1218  * Set mount state from a mount option.
1219  *
1220  * NOTE: mp->m_super is NULL here!
1221  */
1222 static int
xfs_fs_parse_param(struct fs_context * fc,struct fs_parameter * param)1223 xfs_fs_parse_param(
1224 	struct fs_context	*fc,
1225 	struct fs_parameter	*param)
1226 {
1227 	struct xfs_mount	*parsing_mp = fc->s_fs_info;
1228 	struct fs_parse_result	result;
1229 	int			size = 0;
1230 	int			opt;
1231 
1232 	opt = fs_parse(fc, xfs_fs_parameters, param, &result);
1233 	if (opt < 0)
1234 		return opt;
1235 
1236 	switch (opt) {
1237 	case Opt_logbufs:
1238 		parsing_mp->m_logbufs = result.uint_32;
1239 		return 0;
1240 	case Opt_logbsize:
1241 		if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize))
1242 			return -EINVAL;
1243 		return 0;
1244 	case Opt_logdev:
1245 		kfree(parsing_mp->m_logname);
1246 		parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL);
1247 		if (!parsing_mp->m_logname)
1248 			return -ENOMEM;
1249 		return 0;
1250 	case Opt_rtdev:
1251 		kfree(parsing_mp->m_rtname);
1252 		parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
1253 		if (!parsing_mp->m_rtname)
1254 			return -ENOMEM;
1255 		return 0;
1256 	case Opt_allocsize:
1257 		if (suffix_kstrtoint(param->string, 10, &size))
1258 			return -EINVAL;
1259 		parsing_mp->m_allocsize_log = ffs(size) - 1;
1260 		parsing_mp->m_features |= XFS_FEAT_ALLOCSIZE;
1261 		return 0;
1262 	case Opt_grpid:
1263 	case Opt_bsdgroups:
1264 		parsing_mp->m_features |= XFS_FEAT_GRPID;
1265 		return 0;
1266 	case Opt_nogrpid:
1267 	case Opt_sysvgroups:
1268 		parsing_mp->m_features &= ~XFS_FEAT_GRPID;
1269 		return 0;
1270 	case Opt_wsync:
1271 		parsing_mp->m_features |= XFS_FEAT_WSYNC;
1272 		return 0;
1273 	case Opt_norecovery:
1274 		parsing_mp->m_features |= XFS_FEAT_NORECOVERY;
1275 		return 0;
1276 	case Opt_noalign:
1277 		parsing_mp->m_features |= XFS_FEAT_NOALIGN;
1278 		return 0;
1279 	case Opt_swalloc:
1280 		parsing_mp->m_features |= XFS_FEAT_SWALLOC;
1281 		return 0;
1282 	case Opt_sunit:
1283 		parsing_mp->m_dalign = result.uint_32;
1284 		return 0;
1285 	case Opt_swidth:
1286 		parsing_mp->m_swidth = result.uint_32;
1287 		return 0;
1288 	case Opt_inode32:
1289 		parsing_mp->m_features |= XFS_FEAT_SMALL_INUMS;
1290 		return 0;
1291 	case Opt_inode64:
1292 		parsing_mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
1293 		return 0;
1294 	case Opt_nouuid:
1295 		parsing_mp->m_features |= XFS_FEAT_NOUUID;
1296 		return 0;
1297 	case Opt_largeio:
1298 		parsing_mp->m_features |= XFS_FEAT_LARGE_IOSIZE;
1299 		return 0;
1300 	case Opt_nolargeio:
1301 		parsing_mp->m_features &= ~XFS_FEAT_LARGE_IOSIZE;
1302 		return 0;
1303 	case Opt_filestreams:
1304 		parsing_mp->m_features |= XFS_FEAT_FILESTREAMS;
1305 		return 0;
1306 	case Opt_noquota:
1307 		parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
1308 		parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
1309 		return 0;
1310 	case Opt_quota:
1311 	case Opt_uquota:
1312 	case Opt_usrquota:
1313 		parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD);
1314 		return 0;
1315 	case Opt_qnoenforce:
1316 	case Opt_uqnoenforce:
1317 		parsing_mp->m_qflags |= XFS_UQUOTA_ACCT;
1318 		parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD;
1319 		return 0;
1320 	case Opt_pquota:
1321 	case Opt_prjquota:
1322 		parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD);
1323 		return 0;
1324 	case Opt_pqnoenforce:
1325 		parsing_mp->m_qflags |= XFS_PQUOTA_ACCT;
1326 		parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD;
1327 		return 0;
1328 	case Opt_gquota:
1329 	case Opt_grpquota:
1330 		parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD);
1331 		return 0;
1332 	case Opt_gqnoenforce:
1333 		parsing_mp->m_qflags |= XFS_GQUOTA_ACCT;
1334 		parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD;
1335 		return 0;
1336 	case Opt_discard:
1337 		parsing_mp->m_features |= XFS_FEAT_DISCARD;
1338 		return 0;
1339 	case Opt_nodiscard:
1340 		parsing_mp->m_features &= ~XFS_FEAT_DISCARD;
1341 		return 0;
1342 #ifdef CONFIG_FS_DAX
1343 	case Opt_dax:
1344 		xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS);
1345 		return 0;
1346 	case Opt_dax_enum:
1347 		xfs_mount_set_dax_mode(parsing_mp, result.uint_32);
1348 		return 0;
1349 #endif
1350 	/* Following mount options will be removed in September 2025 */
1351 	case Opt_ikeep:
1352 		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, true);
1353 		parsing_mp->m_features |= XFS_FEAT_IKEEP;
1354 		return 0;
1355 	case Opt_noikeep:
1356 		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, false);
1357 		parsing_mp->m_features &= ~XFS_FEAT_IKEEP;
1358 		return 0;
1359 	case Opt_attr2:
1360 		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_ATTR2, true);
1361 		parsing_mp->m_features |= XFS_FEAT_ATTR2;
1362 		return 0;
1363 	case Opt_noattr2:
1364 		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_NOATTR2, true);
1365 		parsing_mp->m_features |= XFS_FEAT_NOATTR2;
1366 		return 0;
1367 	default:
1368 		xfs_warn(parsing_mp, "unknown mount option [%s].", param->key);
1369 		return -EINVAL;
1370 	}
1371 
1372 	return 0;
1373 }
1374 
1375 static int
xfs_fs_validate_params(struct xfs_mount * mp)1376 xfs_fs_validate_params(
1377 	struct xfs_mount	*mp)
1378 {
1379 	/* No recovery flag requires a read-only mount */
1380 	if (xfs_has_norecovery(mp) && !xfs_is_readonly(mp)) {
1381 		xfs_warn(mp, "no-recovery mounts must be read-only.");
1382 		return -EINVAL;
1383 	}
1384 
1385 	/*
1386 	 * We have not read the superblock at this point, so only the attr2
1387 	 * mount option can set the attr2 feature by this stage.
1388 	 */
1389 	if (xfs_has_attr2(mp) && xfs_has_noattr2(mp)) {
1390 		xfs_warn(mp, "attr2 and noattr2 cannot both be specified.");
1391 		return -EINVAL;
1392 	}
1393 
1394 
1395 	if (xfs_has_noalign(mp) && (mp->m_dalign || mp->m_swidth)) {
1396 		xfs_warn(mp,
1397 	"sunit and swidth options incompatible with the noalign option");
1398 		return -EINVAL;
1399 	}
1400 
1401 	if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) {
1402 		xfs_warn(mp, "quota support not available in this kernel.");
1403 		return -EINVAL;
1404 	}
1405 
1406 	if ((mp->m_dalign && !mp->m_swidth) ||
1407 	    (!mp->m_dalign && mp->m_swidth)) {
1408 		xfs_warn(mp, "sunit and swidth must be specified together");
1409 		return -EINVAL;
1410 	}
1411 
1412 	if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
1413 		xfs_warn(mp,
1414 	"stripe width (%d) must be a multiple of the stripe unit (%d)",
1415 			mp->m_swidth, mp->m_dalign);
1416 		return -EINVAL;
1417 	}
1418 
1419 	if (mp->m_logbufs != -1 &&
1420 	    mp->m_logbufs != 0 &&
1421 	    (mp->m_logbufs < XLOG_MIN_ICLOGS ||
1422 	     mp->m_logbufs > XLOG_MAX_ICLOGS)) {
1423 		xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
1424 			mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
1425 		return -EINVAL;
1426 	}
1427 
1428 	if (mp->m_logbsize != -1 &&
1429 	    mp->m_logbsize !=  0 &&
1430 	    (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
1431 	     mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
1432 	     !is_power_of_2(mp->m_logbsize))) {
1433 		xfs_warn(mp,
1434 			"invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
1435 			mp->m_logbsize);
1436 		return -EINVAL;
1437 	}
1438 
1439 	if (xfs_has_allocsize(mp) &&
1440 	    (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
1441 	     mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
1442 		xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
1443 			mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
1444 		return -EINVAL;
1445 	}
1446 
1447 	return 0;
1448 }
1449 
1450 static int
xfs_fs_fill_super(struct super_block * sb,struct fs_context * fc)1451 xfs_fs_fill_super(
1452 	struct super_block	*sb,
1453 	struct fs_context	*fc)
1454 {
1455 	struct xfs_mount	*mp = sb->s_fs_info;
1456 	struct inode		*root;
1457 	int			flags = 0, error;
1458 
1459 	mp->m_super = sb;
1460 
1461 	/*
1462 	 * Copy VFS mount flags from the context now that all parameter parsing
1463 	 * is guaranteed to have been completed by either the old mount API or
1464 	 * the newer fsopen/fsconfig API.
1465 	 */
1466 	if (fc->sb_flags & SB_RDONLY)
1467 		set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1468 	if (fc->sb_flags & SB_DIRSYNC)
1469 		mp->m_features |= XFS_FEAT_DIRSYNC;
1470 	if (fc->sb_flags & SB_SYNCHRONOUS)
1471 		mp->m_features |= XFS_FEAT_WSYNC;
1472 
1473 	error = xfs_fs_validate_params(mp);
1474 	if (error)
1475 		goto out_free_names;
1476 
1477 	sb_min_blocksize(sb, BBSIZE);
1478 	sb->s_xattr = xfs_xattr_handlers;
1479 	sb->s_export_op = &xfs_export_operations;
1480 #ifdef CONFIG_XFS_QUOTA
1481 	sb->s_qcop = &xfs_quotactl_operations;
1482 	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1483 #endif
1484 	sb->s_op = &xfs_super_operations;
1485 
1486 	/*
1487 	 * Delay mount work if the debug hook is set. This is debug
1488 	 * instrumention to coordinate simulation of xfs mount failures with
1489 	 * VFS superblock operations
1490 	 */
1491 	if (xfs_globals.mount_delay) {
1492 		xfs_notice(mp, "Delaying mount for %d seconds.",
1493 			xfs_globals.mount_delay);
1494 		msleep(xfs_globals.mount_delay * 1000);
1495 	}
1496 
1497 	if (fc->sb_flags & SB_SILENT)
1498 		flags |= XFS_MFSI_QUIET;
1499 
1500 	error = xfs_open_devices(mp);
1501 	if (error)
1502 		goto out_free_names;
1503 
1504 	error = xfs_init_mount_workqueues(mp);
1505 	if (error)
1506 		goto out_close_devices;
1507 
1508 	error = xfs_init_percpu_counters(mp);
1509 	if (error)
1510 		goto out_destroy_workqueues;
1511 
1512 	error = xfs_inodegc_init_percpu(mp);
1513 	if (error)
1514 		goto out_destroy_counters;
1515 
1516 	/*
1517 	 * All percpu data structures requiring cleanup when a cpu goes offline
1518 	 * must be allocated before adding this @mp to the cpu-dead handler's
1519 	 * mount list.
1520 	 */
1521 	xfs_mount_list_add(mp);
1522 
1523 	/* Allocate stats memory before we do operations that might use it */
1524 	mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1525 	if (!mp->m_stats.xs_stats) {
1526 		error = -ENOMEM;
1527 		goto out_destroy_inodegc;
1528 	}
1529 
1530 	error = xfs_readsb(mp, flags);
1531 	if (error)
1532 		goto out_free_stats;
1533 
1534 	error = xfs_finish_flags(mp);
1535 	if (error)
1536 		goto out_free_sb;
1537 
1538 	error = xfs_setup_devices(mp);
1539 	if (error)
1540 		goto out_free_sb;
1541 
1542 	/* V4 support is undergoing deprecation. */
1543 	if (!xfs_has_crc(mp)) {
1544 #ifdef CONFIG_XFS_SUPPORT_V4
1545 		xfs_warn_once(mp,
1546 	"Deprecated V4 format (crc=0) will not be supported after September 2030.");
1547 #else
1548 		xfs_warn(mp,
1549 	"Deprecated V4 format (crc=0) not supported by kernel.");
1550 		error = -EINVAL;
1551 		goto out_free_sb;
1552 #endif
1553 	}
1554 
1555 	/* Filesystem claims it needs repair, so refuse the mount. */
1556 	if (xfs_has_needsrepair(mp)) {
1557 		xfs_warn(mp, "Filesystem needs repair.  Please run xfs_repair.");
1558 		error = -EFSCORRUPTED;
1559 		goto out_free_sb;
1560 	}
1561 
1562 	/*
1563 	 * Don't touch the filesystem if a user tool thinks it owns the primary
1564 	 * superblock.  mkfs doesn't clear the flag from secondary supers, so
1565 	 * we don't check them at all.
1566 	 */
1567 	if (mp->m_sb.sb_inprogress) {
1568 		xfs_warn(mp, "Offline file system operation in progress!");
1569 		error = -EFSCORRUPTED;
1570 		goto out_free_sb;
1571 	}
1572 
1573 	/*
1574 	 * Until this is fixed only page-sized or smaller data blocks work.
1575 	 */
1576 	if (mp->m_sb.sb_blocksize > PAGE_SIZE) {
1577 		xfs_warn(mp,
1578 		"File system with blocksize %d bytes. "
1579 		"Only pagesize (%ld) or less will currently work.",
1580 				mp->m_sb.sb_blocksize, PAGE_SIZE);
1581 		error = -ENOSYS;
1582 		goto out_free_sb;
1583 	}
1584 
1585 	/* Ensure this filesystem fits in the page cache limits */
1586 	if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) ||
1587 	    xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) {
1588 		xfs_warn(mp,
1589 		"file system too large to be mounted on this system.");
1590 		error = -EFBIG;
1591 		goto out_free_sb;
1592 	}
1593 
1594 	/*
1595 	 * XFS block mappings use 54 bits to store the logical block offset.
1596 	 * This should suffice to handle the maximum file size that the VFS
1597 	 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
1598 	 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
1599 	 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
1600 	 * to check this assertion.
1601 	 *
1602 	 * Avoid integer overflow by comparing the maximum bmbt offset to the
1603 	 * maximum pagecache offset in units of fs blocks.
1604 	 */
1605 	if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) {
1606 		xfs_warn(mp,
1607 "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
1608 			 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
1609 			 XFS_MAX_FILEOFF);
1610 		error = -EINVAL;
1611 		goto out_free_sb;
1612 	}
1613 
1614 	error = xfs_filestream_mount(mp);
1615 	if (error)
1616 		goto out_free_sb;
1617 
1618 	/*
1619 	 * we must configure the block size in the superblock before we run the
1620 	 * full mount process as the mount process can lookup and cache inodes.
1621 	 */
1622 	sb->s_magic = XFS_SUPER_MAGIC;
1623 	sb->s_blocksize = mp->m_sb.sb_blocksize;
1624 	sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1625 	sb->s_maxbytes = MAX_LFS_FILESIZE;
1626 	sb->s_max_links = XFS_MAXLINK;
1627 	sb->s_time_gran = 1;
1628 	if (xfs_has_bigtime(mp)) {
1629 		sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN);
1630 		sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX);
1631 	} else {
1632 		sb->s_time_min = XFS_LEGACY_TIME_MIN;
1633 		sb->s_time_max = XFS_LEGACY_TIME_MAX;
1634 	}
1635 	trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max);
1636 	sb->s_iflags |= SB_I_CGROUPWB;
1637 
1638 	set_posix_acl_flag(sb);
1639 
1640 	/* version 5 superblocks support inode version counters. */
1641 	if (xfs_has_crc(mp))
1642 		sb->s_flags |= SB_I_VERSION;
1643 
1644 	if (xfs_has_dax_always(mp)) {
1645 		error = xfs_setup_dax_always(mp);
1646 		if (error)
1647 			goto out_filestream_unmount;
1648 	}
1649 
1650 	if (xfs_has_discard(mp) && !bdev_max_discard_sectors(sb->s_bdev)) {
1651 		xfs_warn(mp,
1652 	"mounting with \"discard\" option, but the device does not support discard");
1653 		mp->m_features &= ~XFS_FEAT_DISCARD;
1654 	}
1655 
1656 	if (xfs_has_reflink(mp)) {
1657 		if (mp->m_sb.sb_rblocks) {
1658 			xfs_alert(mp,
1659 	"reflink not compatible with realtime device!");
1660 			error = -EINVAL;
1661 			goto out_filestream_unmount;
1662 		}
1663 
1664 		if (xfs_globals.always_cow) {
1665 			xfs_info(mp, "using DEBUG-only always_cow mode.");
1666 			mp->m_always_cow = true;
1667 		}
1668 	}
1669 
1670 	if (xfs_has_rmapbt(mp) && mp->m_sb.sb_rblocks) {
1671 		xfs_alert(mp,
1672 	"reverse mapping btree not compatible with realtime device!");
1673 		error = -EINVAL;
1674 		goto out_filestream_unmount;
1675 	}
1676 
1677 	if (xfs_has_large_extent_counts(mp))
1678 		xfs_warn(mp,
1679 	"EXPERIMENTAL Large extent counts feature in use. Use at your own risk!");
1680 
1681 	error = xfs_mountfs(mp);
1682 	if (error)
1683 		goto out_filestream_unmount;
1684 
1685 	root = igrab(VFS_I(mp->m_rootip));
1686 	if (!root) {
1687 		error = -ENOENT;
1688 		goto out_unmount;
1689 	}
1690 	sb->s_root = d_make_root(root);
1691 	if (!sb->s_root) {
1692 		error = -ENOMEM;
1693 		goto out_unmount;
1694 	}
1695 
1696 	return 0;
1697 
1698  out_filestream_unmount:
1699 	xfs_filestream_unmount(mp);
1700  out_free_sb:
1701 	xfs_freesb(mp);
1702  out_free_stats:
1703 	free_percpu(mp->m_stats.xs_stats);
1704  out_destroy_inodegc:
1705 	xfs_mount_list_del(mp);
1706 	xfs_inodegc_free_percpu(mp);
1707  out_destroy_counters:
1708 	xfs_destroy_percpu_counters(mp);
1709  out_destroy_workqueues:
1710 	xfs_destroy_mount_workqueues(mp);
1711  out_close_devices:
1712 	xfs_close_devices(mp);
1713  out_free_names:
1714 	sb->s_fs_info = NULL;
1715 	xfs_mount_free(mp);
1716 	return error;
1717 
1718  out_unmount:
1719 	xfs_filestream_unmount(mp);
1720 	xfs_unmountfs(mp);
1721 	goto out_free_sb;
1722 }
1723 
1724 static int
xfs_fs_get_tree(struct fs_context * fc)1725 xfs_fs_get_tree(
1726 	struct fs_context	*fc)
1727 {
1728 	return get_tree_bdev(fc, xfs_fs_fill_super);
1729 }
1730 
1731 static int
xfs_remount_rw(struct xfs_mount * mp)1732 xfs_remount_rw(
1733 	struct xfs_mount	*mp)
1734 {
1735 	struct xfs_sb		*sbp = &mp->m_sb;
1736 	int error;
1737 
1738 	if (xfs_has_norecovery(mp)) {
1739 		xfs_warn(mp,
1740 			"ro->rw transition prohibited on norecovery mount");
1741 		return -EINVAL;
1742 	}
1743 
1744 	if (xfs_sb_is_v5(sbp) &&
1745 	    xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1746 		xfs_warn(mp,
1747 	"ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1748 			(sbp->sb_features_ro_compat &
1749 				XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1750 		return -EINVAL;
1751 	}
1752 
1753 	clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1754 
1755 	/*
1756 	 * If this is the first remount to writeable state we might have some
1757 	 * superblock changes to update.
1758 	 */
1759 	if (mp->m_update_sb) {
1760 		error = xfs_sync_sb(mp, false);
1761 		if (error) {
1762 			xfs_warn(mp, "failed to write sb changes");
1763 			return error;
1764 		}
1765 		mp->m_update_sb = false;
1766 	}
1767 
1768 	/*
1769 	 * Fill out the reserve pool if it is empty. Use the stashed value if
1770 	 * it is non-zero, otherwise go with the default.
1771 	 */
1772 	xfs_restore_resvblks(mp);
1773 	xfs_log_work_queue(mp);
1774 	xfs_blockgc_start(mp);
1775 
1776 	/* Create the per-AG metadata reservation pool .*/
1777 	error = xfs_fs_reserve_ag_blocks(mp);
1778 	if (error && error != -ENOSPC)
1779 		return error;
1780 
1781 	/* Re-enable the background inode inactivation worker. */
1782 	xfs_inodegc_start(mp);
1783 
1784 	return 0;
1785 }
1786 
1787 static int
xfs_remount_ro(struct xfs_mount * mp)1788 xfs_remount_ro(
1789 	struct xfs_mount	*mp)
1790 {
1791 	struct xfs_icwalk	icw = {
1792 		.icw_flags	= XFS_ICWALK_FLAG_SYNC,
1793 	};
1794 	int			error;
1795 
1796 	/* Flush all the dirty data to disk. */
1797 	error = sync_filesystem(mp->m_super);
1798 	if (error)
1799 		return error;
1800 
1801 	/*
1802 	 * Cancel background eofb scanning so it cannot race with the final
1803 	 * log force+buftarg wait and deadlock the remount.
1804 	 */
1805 	xfs_blockgc_stop(mp);
1806 
1807 	/*
1808 	 * Clear out all remaining COW staging extents and speculative post-EOF
1809 	 * preallocations so that we don't leave inodes requiring inactivation
1810 	 * cleanups during reclaim on a read-only mount.  We must process every
1811 	 * cached inode, so this requires a synchronous cache scan.
1812 	 */
1813 	error = xfs_blockgc_free_space(mp, &icw);
1814 	if (error) {
1815 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1816 		return error;
1817 	}
1818 
1819 	/*
1820 	 * Stop the inodegc background worker.  xfs_fs_reconfigure already
1821 	 * flushed all pending inodegc work when it sync'd the filesystem.
1822 	 * The VFS holds s_umount, so we know that inodes cannot enter
1823 	 * xfs_fs_destroy_inode during a remount operation.  In readonly mode
1824 	 * we send inodes straight to reclaim, so no inodes will be queued.
1825 	 */
1826 	xfs_inodegc_stop(mp);
1827 
1828 	/* Free the per-AG metadata reservation pool. */
1829 	error = xfs_fs_unreserve_ag_blocks(mp);
1830 	if (error) {
1831 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1832 		return error;
1833 	}
1834 
1835 	/*
1836 	 * Before we sync the metadata, we need to free up the reserve block
1837 	 * pool so that the used block count in the superblock on disk is
1838 	 * correct at the end of the remount. Stash the current* reserve pool
1839 	 * size so that if we get remounted rw, we can return it to the same
1840 	 * size.
1841 	 */
1842 	xfs_save_resvblks(mp);
1843 
1844 	xfs_log_clean(mp);
1845 	set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1846 
1847 	return 0;
1848 }
1849 
1850 /*
1851  * Logically we would return an error here to prevent users from believing
1852  * they might have changed mount options using remount which can't be changed.
1853  *
1854  * But unfortunately mount(8) adds all options from mtab and fstab to the mount
1855  * arguments in some cases so we can't blindly reject options, but have to
1856  * check for each specified option if it actually differs from the currently
1857  * set option and only reject it if that's the case.
1858  *
1859  * Until that is implemented we return success for every remount request, and
1860  * silently ignore all options that we can't actually change.
1861  */
1862 static int
xfs_fs_reconfigure(struct fs_context * fc)1863 xfs_fs_reconfigure(
1864 	struct fs_context *fc)
1865 {
1866 	struct xfs_mount	*mp = XFS_M(fc->root->d_sb);
1867 	struct xfs_mount        *new_mp = fc->s_fs_info;
1868 	int			flags = fc->sb_flags;
1869 	int			error;
1870 
1871 	/* version 5 superblocks always support version counters. */
1872 	if (xfs_has_crc(mp))
1873 		fc->sb_flags |= SB_I_VERSION;
1874 
1875 	error = xfs_fs_validate_params(new_mp);
1876 	if (error)
1877 		return error;
1878 
1879 	/* inode32 -> inode64 */
1880 	if (xfs_has_small_inums(mp) && !xfs_has_small_inums(new_mp)) {
1881 		mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
1882 		mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
1883 	}
1884 
1885 	/* inode64 -> inode32 */
1886 	if (!xfs_has_small_inums(mp) && xfs_has_small_inums(new_mp)) {
1887 		mp->m_features |= XFS_FEAT_SMALL_INUMS;
1888 		mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
1889 	}
1890 
1891 	/* ro -> rw */
1892 	if (xfs_is_readonly(mp) && !(flags & SB_RDONLY)) {
1893 		error = xfs_remount_rw(mp);
1894 		if (error)
1895 			return error;
1896 	}
1897 
1898 	/* rw -> ro */
1899 	if (!xfs_is_readonly(mp) && (flags & SB_RDONLY)) {
1900 		error = xfs_remount_ro(mp);
1901 		if (error)
1902 			return error;
1903 	}
1904 
1905 	return 0;
1906 }
1907 
xfs_fs_free(struct fs_context * fc)1908 static void xfs_fs_free(
1909 	struct fs_context	*fc)
1910 {
1911 	struct xfs_mount	*mp = fc->s_fs_info;
1912 
1913 	/*
1914 	 * mp is stored in the fs_context when it is initialized.
1915 	 * mp is transferred to the superblock on a successful mount,
1916 	 * but if an error occurs before the transfer we have to free
1917 	 * it here.
1918 	 */
1919 	if (mp)
1920 		xfs_mount_free(mp);
1921 }
1922 
1923 static const struct fs_context_operations xfs_context_ops = {
1924 	.parse_param = xfs_fs_parse_param,
1925 	.get_tree    = xfs_fs_get_tree,
1926 	.reconfigure = xfs_fs_reconfigure,
1927 	.free        = xfs_fs_free,
1928 };
1929 
1930 /*
1931  * WARNING: do not initialise any parameters in this function that depend on
1932  * mount option parsing having already been performed as this can be called from
1933  * fsopen() before any parameters have been set.
1934  */
xfs_init_fs_context(struct fs_context * fc)1935 static int xfs_init_fs_context(
1936 	struct fs_context	*fc)
1937 {
1938 	struct xfs_mount	*mp;
1939 
1940 	mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
1941 	if (!mp)
1942 		return -ENOMEM;
1943 
1944 	spin_lock_init(&mp->m_sb_lock);
1945 	spin_lock_init(&mp->m_agirotor_lock);
1946 	INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
1947 	spin_lock_init(&mp->m_perag_lock);
1948 	mutex_init(&mp->m_growlock);
1949 	INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
1950 	INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
1951 	mp->m_kobj.kobject.kset = xfs_kset;
1952 	/*
1953 	 * We don't create the finobt per-ag space reservation until after log
1954 	 * recovery, so we must set this to true so that an ifree transaction
1955 	 * started during log recovery will not depend on space reservations
1956 	 * for finobt expansion.
1957 	 */
1958 	mp->m_finobt_nores = true;
1959 
1960 	/*
1961 	 * These can be overridden by the mount option parsing.
1962 	 */
1963 	mp->m_logbufs = -1;
1964 	mp->m_logbsize = -1;
1965 	mp->m_allocsize_log = 16; /* 64k */
1966 
1967 	fc->s_fs_info = mp;
1968 	fc->ops = &xfs_context_ops;
1969 
1970 	return 0;
1971 }
1972 
1973 static struct file_system_type xfs_fs_type = {
1974 	.owner			= THIS_MODULE,
1975 	.name			= "xfs",
1976 	.init_fs_context	= xfs_init_fs_context,
1977 	.parameters		= xfs_fs_parameters,
1978 	.kill_sb		= kill_block_super,
1979 	.fs_flags		= FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
1980 };
1981 MODULE_ALIAS_FS("xfs");
1982 
1983 STATIC int __init
xfs_init_caches(void)1984 xfs_init_caches(void)
1985 {
1986 	int		error;
1987 
1988 	xfs_buf_cache = kmem_cache_create("xfs_buf", sizeof(struct xfs_buf), 0,
1989 					 SLAB_HWCACHE_ALIGN |
1990 					 SLAB_RECLAIM_ACCOUNT |
1991 					 SLAB_MEM_SPREAD,
1992 					 NULL);
1993 	if (!xfs_buf_cache)
1994 		goto out;
1995 
1996 	xfs_log_ticket_cache = kmem_cache_create("xfs_log_ticket",
1997 						sizeof(struct xlog_ticket),
1998 						0, 0, NULL);
1999 	if (!xfs_log_ticket_cache)
2000 		goto out_destroy_buf_cache;
2001 
2002 	error = xfs_btree_init_cur_caches();
2003 	if (error)
2004 		goto out_destroy_log_ticket_cache;
2005 
2006 	error = xfs_defer_init_item_caches();
2007 	if (error)
2008 		goto out_destroy_btree_cur_cache;
2009 
2010 	xfs_da_state_cache = kmem_cache_create("xfs_da_state",
2011 					      sizeof(struct xfs_da_state),
2012 					      0, 0, NULL);
2013 	if (!xfs_da_state_cache)
2014 		goto out_destroy_defer_item_cache;
2015 
2016 	xfs_ifork_cache = kmem_cache_create("xfs_ifork",
2017 					   sizeof(struct xfs_ifork),
2018 					   0, 0, NULL);
2019 	if (!xfs_ifork_cache)
2020 		goto out_destroy_da_state_cache;
2021 
2022 	xfs_trans_cache = kmem_cache_create("xfs_trans",
2023 					   sizeof(struct xfs_trans),
2024 					   0, 0, NULL);
2025 	if (!xfs_trans_cache)
2026 		goto out_destroy_ifork_cache;
2027 
2028 
2029 	/*
2030 	 * The size of the cache-allocated buf log item is the maximum
2031 	 * size possible under XFS.  This wastes a little bit of memory,
2032 	 * but it is much faster.
2033 	 */
2034 	xfs_buf_item_cache = kmem_cache_create("xfs_buf_item",
2035 					      sizeof(struct xfs_buf_log_item),
2036 					      0, 0, NULL);
2037 	if (!xfs_buf_item_cache)
2038 		goto out_destroy_trans_cache;
2039 
2040 	xfs_efd_cache = kmem_cache_create("xfs_efd_item",
2041 			xfs_efd_log_item_sizeof(XFS_EFD_MAX_FAST_EXTENTS),
2042 			0, 0, NULL);
2043 	if (!xfs_efd_cache)
2044 		goto out_destroy_buf_item_cache;
2045 
2046 	xfs_efi_cache = kmem_cache_create("xfs_efi_item",
2047 			xfs_efi_log_item_sizeof(XFS_EFI_MAX_FAST_EXTENTS),
2048 			0, 0, NULL);
2049 	if (!xfs_efi_cache)
2050 		goto out_destroy_efd_cache;
2051 
2052 	xfs_inode_cache = kmem_cache_create("xfs_inode",
2053 					   sizeof(struct xfs_inode), 0,
2054 					   (SLAB_HWCACHE_ALIGN |
2055 					    SLAB_RECLAIM_ACCOUNT |
2056 					    SLAB_MEM_SPREAD | SLAB_ACCOUNT),
2057 					   xfs_fs_inode_init_once);
2058 	if (!xfs_inode_cache)
2059 		goto out_destroy_efi_cache;
2060 
2061 	xfs_ili_cache = kmem_cache_create("xfs_ili",
2062 					 sizeof(struct xfs_inode_log_item), 0,
2063 					 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
2064 					 NULL);
2065 	if (!xfs_ili_cache)
2066 		goto out_destroy_inode_cache;
2067 
2068 	xfs_icreate_cache = kmem_cache_create("xfs_icr",
2069 					     sizeof(struct xfs_icreate_item),
2070 					     0, 0, NULL);
2071 	if (!xfs_icreate_cache)
2072 		goto out_destroy_ili_cache;
2073 
2074 	xfs_rud_cache = kmem_cache_create("xfs_rud_item",
2075 					 sizeof(struct xfs_rud_log_item),
2076 					 0, 0, NULL);
2077 	if (!xfs_rud_cache)
2078 		goto out_destroy_icreate_cache;
2079 
2080 	xfs_rui_cache = kmem_cache_create("xfs_rui_item",
2081 			xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
2082 			0, 0, NULL);
2083 	if (!xfs_rui_cache)
2084 		goto out_destroy_rud_cache;
2085 
2086 	xfs_cud_cache = kmem_cache_create("xfs_cud_item",
2087 					 sizeof(struct xfs_cud_log_item),
2088 					 0, 0, NULL);
2089 	if (!xfs_cud_cache)
2090 		goto out_destroy_rui_cache;
2091 
2092 	xfs_cui_cache = kmem_cache_create("xfs_cui_item",
2093 			xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
2094 			0, 0, NULL);
2095 	if (!xfs_cui_cache)
2096 		goto out_destroy_cud_cache;
2097 
2098 	xfs_bud_cache = kmem_cache_create("xfs_bud_item",
2099 					 sizeof(struct xfs_bud_log_item),
2100 					 0, 0, NULL);
2101 	if (!xfs_bud_cache)
2102 		goto out_destroy_cui_cache;
2103 
2104 	xfs_bui_cache = kmem_cache_create("xfs_bui_item",
2105 			xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
2106 			0, 0, NULL);
2107 	if (!xfs_bui_cache)
2108 		goto out_destroy_bud_cache;
2109 
2110 	xfs_attrd_cache = kmem_cache_create("xfs_attrd_item",
2111 					    sizeof(struct xfs_attrd_log_item),
2112 					    0, 0, NULL);
2113 	if (!xfs_attrd_cache)
2114 		goto out_destroy_bui_cache;
2115 
2116 	xfs_attri_cache = kmem_cache_create("xfs_attri_item",
2117 					    sizeof(struct xfs_attri_log_item),
2118 					    0, 0, NULL);
2119 	if (!xfs_attri_cache)
2120 		goto out_destroy_attrd_cache;
2121 
2122 	xfs_iunlink_cache = kmem_cache_create("xfs_iul_item",
2123 					     sizeof(struct xfs_iunlink_item),
2124 					     0, 0, NULL);
2125 	if (!xfs_iunlink_cache)
2126 		goto out_destroy_attri_cache;
2127 
2128 	return 0;
2129 
2130  out_destroy_attri_cache:
2131 	kmem_cache_destroy(xfs_attri_cache);
2132  out_destroy_attrd_cache:
2133 	kmem_cache_destroy(xfs_attrd_cache);
2134  out_destroy_bui_cache:
2135 	kmem_cache_destroy(xfs_bui_cache);
2136  out_destroy_bud_cache:
2137 	kmem_cache_destroy(xfs_bud_cache);
2138  out_destroy_cui_cache:
2139 	kmem_cache_destroy(xfs_cui_cache);
2140  out_destroy_cud_cache:
2141 	kmem_cache_destroy(xfs_cud_cache);
2142  out_destroy_rui_cache:
2143 	kmem_cache_destroy(xfs_rui_cache);
2144  out_destroy_rud_cache:
2145 	kmem_cache_destroy(xfs_rud_cache);
2146  out_destroy_icreate_cache:
2147 	kmem_cache_destroy(xfs_icreate_cache);
2148  out_destroy_ili_cache:
2149 	kmem_cache_destroy(xfs_ili_cache);
2150  out_destroy_inode_cache:
2151 	kmem_cache_destroy(xfs_inode_cache);
2152  out_destroy_efi_cache:
2153 	kmem_cache_destroy(xfs_efi_cache);
2154  out_destroy_efd_cache:
2155 	kmem_cache_destroy(xfs_efd_cache);
2156  out_destroy_buf_item_cache:
2157 	kmem_cache_destroy(xfs_buf_item_cache);
2158  out_destroy_trans_cache:
2159 	kmem_cache_destroy(xfs_trans_cache);
2160  out_destroy_ifork_cache:
2161 	kmem_cache_destroy(xfs_ifork_cache);
2162  out_destroy_da_state_cache:
2163 	kmem_cache_destroy(xfs_da_state_cache);
2164  out_destroy_defer_item_cache:
2165 	xfs_defer_destroy_item_caches();
2166  out_destroy_btree_cur_cache:
2167 	xfs_btree_destroy_cur_caches();
2168  out_destroy_log_ticket_cache:
2169 	kmem_cache_destroy(xfs_log_ticket_cache);
2170  out_destroy_buf_cache:
2171 	kmem_cache_destroy(xfs_buf_cache);
2172  out:
2173 	return -ENOMEM;
2174 }
2175 
2176 STATIC void
xfs_destroy_caches(void)2177 xfs_destroy_caches(void)
2178 {
2179 	/*
2180 	 * Make sure all delayed rcu free are flushed before we
2181 	 * destroy caches.
2182 	 */
2183 	rcu_barrier();
2184 	kmem_cache_destroy(xfs_iunlink_cache);
2185 	kmem_cache_destroy(xfs_attri_cache);
2186 	kmem_cache_destroy(xfs_attrd_cache);
2187 	kmem_cache_destroy(xfs_bui_cache);
2188 	kmem_cache_destroy(xfs_bud_cache);
2189 	kmem_cache_destroy(xfs_cui_cache);
2190 	kmem_cache_destroy(xfs_cud_cache);
2191 	kmem_cache_destroy(xfs_rui_cache);
2192 	kmem_cache_destroy(xfs_rud_cache);
2193 	kmem_cache_destroy(xfs_icreate_cache);
2194 	kmem_cache_destroy(xfs_ili_cache);
2195 	kmem_cache_destroy(xfs_inode_cache);
2196 	kmem_cache_destroy(xfs_efi_cache);
2197 	kmem_cache_destroy(xfs_efd_cache);
2198 	kmem_cache_destroy(xfs_buf_item_cache);
2199 	kmem_cache_destroy(xfs_trans_cache);
2200 	kmem_cache_destroy(xfs_ifork_cache);
2201 	kmem_cache_destroy(xfs_da_state_cache);
2202 	xfs_defer_destroy_item_caches();
2203 	xfs_btree_destroy_cur_caches();
2204 	kmem_cache_destroy(xfs_log_ticket_cache);
2205 	kmem_cache_destroy(xfs_buf_cache);
2206 }
2207 
2208 STATIC int __init
xfs_init_workqueues(void)2209 xfs_init_workqueues(void)
2210 {
2211 	/*
2212 	 * The allocation workqueue can be used in memory reclaim situations
2213 	 * (writepage path), and parallelism is only limited by the number of
2214 	 * AGs in all the filesystems mounted. Hence use the default large
2215 	 * max_active value for this workqueue.
2216 	 */
2217 	xfs_alloc_wq = alloc_workqueue("xfsalloc",
2218 			XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0);
2219 	if (!xfs_alloc_wq)
2220 		return -ENOMEM;
2221 
2222 	xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND),
2223 			0);
2224 	if (!xfs_discard_wq)
2225 		goto out_free_alloc_wq;
2226 
2227 	return 0;
2228 out_free_alloc_wq:
2229 	destroy_workqueue(xfs_alloc_wq);
2230 	return -ENOMEM;
2231 }
2232 
2233 STATIC void
xfs_destroy_workqueues(void)2234 xfs_destroy_workqueues(void)
2235 {
2236 	destroy_workqueue(xfs_discard_wq);
2237 	destroy_workqueue(xfs_alloc_wq);
2238 }
2239 
2240 #ifdef CONFIG_HOTPLUG_CPU
2241 static int
xfs_cpu_dead(unsigned int cpu)2242 xfs_cpu_dead(
2243 	unsigned int		cpu)
2244 {
2245 	struct xfs_mount	*mp, *n;
2246 
2247 	spin_lock(&xfs_mount_list_lock);
2248 	list_for_each_entry_safe(mp, n, &xfs_mount_list, m_mount_list) {
2249 		spin_unlock(&xfs_mount_list_lock);
2250 		xfs_inodegc_cpu_dead(mp, cpu);
2251 		xlog_cil_pcp_dead(mp->m_log, cpu);
2252 		spin_lock(&xfs_mount_list_lock);
2253 	}
2254 	spin_unlock(&xfs_mount_list_lock);
2255 	return 0;
2256 }
2257 
2258 static int __init
xfs_cpu_hotplug_init(void)2259 xfs_cpu_hotplug_init(void)
2260 {
2261 	int	error;
2262 
2263 	error = cpuhp_setup_state_nocalls(CPUHP_XFS_DEAD, "xfs:dead", NULL,
2264 			xfs_cpu_dead);
2265 	if (error < 0)
2266 		xfs_alert(NULL,
2267 "Failed to initialise CPU hotplug, error %d. XFS is non-functional.",
2268 			error);
2269 	return error;
2270 }
2271 
2272 static void
xfs_cpu_hotplug_destroy(void)2273 xfs_cpu_hotplug_destroy(void)
2274 {
2275 	cpuhp_remove_state_nocalls(CPUHP_XFS_DEAD);
2276 }
2277 
2278 #else /* !CONFIG_HOTPLUG_CPU */
xfs_cpu_hotplug_init(void)2279 static inline int xfs_cpu_hotplug_init(void) { return 0; }
xfs_cpu_hotplug_destroy(void)2280 static inline void xfs_cpu_hotplug_destroy(void) {}
2281 #endif
2282 
2283 STATIC int __init
init_xfs_fs(void)2284 init_xfs_fs(void)
2285 {
2286 	int			error;
2287 
2288 	xfs_check_ondisk_structs();
2289 
2290 	printk(KERN_INFO XFS_VERSION_STRING " with "
2291 			 XFS_BUILD_OPTIONS " enabled\n");
2292 
2293 	xfs_dir_startup();
2294 
2295 	error = xfs_cpu_hotplug_init();
2296 	if (error)
2297 		goto out;
2298 
2299 	error = xfs_init_caches();
2300 	if (error)
2301 		goto out_destroy_hp;
2302 
2303 	error = xfs_init_workqueues();
2304 	if (error)
2305 		goto out_destroy_caches;
2306 
2307 	error = xfs_mru_cache_init();
2308 	if (error)
2309 		goto out_destroy_wq;
2310 
2311 	error = xfs_init_procfs();
2312 	if (error)
2313 		goto out_mru_cache_uninit;
2314 
2315 	error = xfs_sysctl_register();
2316 	if (error)
2317 		goto out_cleanup_procfs;
2318 
2319 	xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2320 	if (!xfs_kset) {
2321 		error = -ENOMEM;
2322 		goto out_sysctl_unregister;
2323 	}
2324 
2325 	xfsstats.xs_kobj.kobject.kset = xfs_kset;
2326 
2327 	xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2328 	if (!xfsstats.xs_stats) {
2329 		error = -ENOMEM;
2330 		goto out_kset_unregister;
2331 	}
2332 
2333 	error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
2334 			       "stats");
2335 	if (error)
2336 		goto out_free_stats;
2337 
2338 #ifdef DEBUG
2339 	xfs_dbg_kobj.kobject.kset = xfs_kset;
2340 	error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
2341 	if (error)
2342 		goto out_remove_stats_kobj;
2343 #endif
2344 
2345 	error = xfs_qm_init();
2346 	if (error)
2347 		goto out_remove_dbg_kobj;
2348 
2349 	error = register_filesystem(&xfs_fs_type);
2350 	if (error)
2351 		goto out_qm_exit;
2352 	return 0;
2353 
2354  out_qm_exit:
2355 	xfs_qm_exit();
2356  out_remove_dbg_kobj:
2357 #ifdef DEBUG
2358 	xfs_sysfs_del(&xfs_dbg_kobj);
2359  out_remove_stats_kobj:
2360 #endif
2361 	xfs_sysfs_del(&xfsstats.xs_kobj);
2362  out_free_stats:
2363 	free_percpu(xfsstats.xs_stats);
2364  out_kset_unregister:
2365 	kset_unregister(xfs_kset);
2366  out_sysctl_unregister:
2367 	xfs_sysctl_unregister();
2368  out_cleanup_procfs:
2369 	xfs_cleanup_procfs();
2370  out_mru_cache_uninit:
2371 	xfs_mru_cache_uninit();
2372  out_destroy_wq:
2373 	xfs_destroy_workqueues();
2374  out_destroy_caches:
2375 	xfs_destroy_caches();
2376  out_destroy_hp:
2377 	xfs_cpu_hotplug_destroy();
2378  out:
2379 	return error;
2380 }
2381 
2382 STATIC void __exit
exit_xfs_fs(void)2383 exit_xfs_fs(void)
2384 {
2385 	xfs_qm_exit();
2386 	unregister_filesystem(&xfs_fs_type);
2387 #ifdef DEBUG
2388 	xfs_sysfs_del(&xfs_dbg_kobj);
2389 #endif
2390 	xfs_sysfs_del(&xfsstats.xs_kobj);
2391 	free_percpu(xfsstats.xs_stats);
2392 	kset_unregister(xfs_kset);
2393 	xfs_sysctl_unregister();
2394 	xfs_cleanup_procfs();
2395 	xfs_mru_cache_uninit();
2396 	xfs_destroy_workqueues();
2397 	xfs_destroy_caches();
2398 	xfs_uuid_table_free();
2399 	xfs_cpu_hotplug_destroy();
2400 }
2401 
2402 module_init(init_xfs_fs);
2403 module_exit(exit_xfs_fs);
2404 
2405 MODULE_AUTHOR("Silicon Graphics, Inc.");
2406 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2407 MODULE_LICENSE("GPL");
2408