• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 
7 #include "xfs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sb.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_btree.h"
16 #include "xfs_bmap.h"
17 #include "xfs_alloc.h"
18 #include "xfs_fsops.h"
19 #include "xfs_trans.h"
20 #include "xfs_buf_item.h"
21 #include "xfs_log.h"
22 #include "xfs_log_priv.h"
23 #include "xfs_dir2.h"
24 #include "xfs_extfree_item.h"
25 #include "xfs_mru_cache.h"
26 #include "xfs_inode_item.h"
27 #include "xfs_icache.h"
28 #include "xfs_trace.h"
29 #include "xfs_icreate_item.h"
30 #include "xfs_filestream.h"
31 #include "xfs_quota.h"
32 #include "xfs_sysfs.h"
33 #include "xfs_ondisk.h"
34 #include "xfs_rmap_item.h"
35 #include "xfs_refcount_item.h"
36 #include "xfs_bmap_item.h"
37 #include "xfs_reflink.h"
38 #include "xfs_pwork.h"
39 #include "xfs_ag.h"
40 
41 #include <linux/magic.h>
42 #include <linux/fs_context.h>
43 #include <linux/fs_parser.h>
44 
45 static const struct super_operations xfs_super_operations;
46 
47 static struct kset *xfs_kset;		/* top-level xfs sysfs dir */
48 #ifdef DEBUG
49 static struct xfs_kobj xfs_dbg_kobj;	/* global debug sysfs attrs */
50 #endif
51 
52 #ifdef CONFIG_HOTPLUG_CPU
53 static LIST_HEAD(xfs_mount_list);
54 static DEFINE_SPINLOCK(xfs_mount_list_lock);
55 
xfs_mount_list_add(struct xfs_mount * mp)56 static inline void xfs_mount_list_add(struct xfs_mount *mp)
57 {
58 	spin_lock(&xfs_mount_list_lock);
59 	list_add(&mp->m_mount_list, &xfs_mount_list);
60 	spin_unlock(&xfs_mount_list_lock);
61 }
62 
xfs_mount_list_del(struct xfs_mount * mp)63 static inline void xfs_mount_list_del(struct xfs_mount *mp)
64 {
65 	spin_lock(&xfs_mount_list_lock);
66 	list_del(&mp->m_mount_list);
67 	spin_unlock(&xfs_mount_list_lock);
68 }
69 #else /* !CONFIG_HOTPLUG_CPU */
xfs_mount_list_add(struct xfs_mount * mp)70 static inline void xfs_mount_list_add(struct xfs_mount *mp) {}
xfs_mount_list_del(struct xfs_mount * mp)71 static inline void xfs_mount_list_del(struct xfs_mount *mp) {}
72 #endif
73 
74 enum xfs_dax_mode {
75 	XFS_DAX_INODE = 0,
76 	XFS_DAX_ALWAYS = 1,
77 	XFS_DAX_NEVER = 2,
78 };
79 
80 static void
xfs_mount_set_dax_mode(struct xfs_mount * mp,enum xfs_dax_mode mode)81 xfs_mount_set_dax_mode(
82 	struct xfs_mount	*mp,
83 	enum xfs_dax_mode	mode)
84 {
85 	switch (mode) {
86 	case XFS_DAX_INODE:
87 		mp->m_features &= ~(XFS_FEAT_DAX_ALWAYS | XFS_FEAT_DAX_NEVER);
88 		break;
89 	case XFS_DAX_ALWAYS:
90 		mp->m_features |= XFS_FEAT_DAX_ALWAYS;
91 		mp->m_features &= ~XFS_FEAT_DAX_NEVER;
92 		break;
93 	case XFS_DAX_NEVER:
94 		mp->m_features |= XFS_FEAT_DAX_NEVER;
95 		mp->m_features &= ~XFS_FEAT_DAX_ALWAYS;
96 		break;
97 	}
98 }
99 
100 static const struct constant_table dax_param_enums[] = {
101 	{"inode",	XFS_DAX_INODE },
102 	{"always",	XFS_DAX_ALWAYS },
103 	{"never",	XFS_DAX_NEVER },
104 	{}
105 };
106 
107 /*
108  * Table driven mount option parser.
109  */
110 enum {
111 	Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
112 	Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
113 	Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
114 	Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
115 	Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
116 	Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
117 	Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
118 	Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
119 	Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum,
120 };
121 
122 static const struct fs_parameter_spec xfs_fs_parameters[] = {
123 	fsparam_u32("logbufs",		Opt_logbufs),
124 	fsparam_string("logbsize",	Opt_logbsize),
125 	fsparam_string("logdev",	Opt_logdev),
126 	fsparam_string("rtdev",		Opt_rtdev),
127 	fsparam_flag("wsync",		Opt_wsync),
128 	fsparam_flag("noalign",		Opt_noalign),
129 	fsparam_flag("swalloc",		Opt_swalloc),
130 	fsparam_u32("sunit",		Opt_sunit),
131 	fsparam_u32("swidth",		Opt_swidth),
132 	fsparam_flag("nouuid",		Opt_nouuid),
133 	fsparam_flag("grpid",		Opt_grpid),
134 	fsparam_flag("nogrpid",		Opt_nogrpid),
135 	fsparam_flag("bsdgroups",	Opt_bsdgroups),
136 	fsparam_flag("sysvgroups",	Opt_sysvgroups),
137 	fsparam_string("allocsize",	Opt_allocsize),
138 	fsparam_flag("norecovery",	Opt_norecovery),
139 	fsparam_flag("inode64",		Opt_inode64),
140 	fsparam_flag("inode32",		Opt_inode32),
141 	fsparam_flag("ikeep",		Opt_ikeep),
142 	fsparam_flag("noikeep",		Opt_noikeep),
143 	fsparam_flag("largeio",		Opt_largeio),
144 	fsparam_flag("nolargeio",	Opt_nolargeio),
145 	fsparam_flag("attr2",		Opt_attr2),
146 	fsparam_flag("noattr2",		Opt_noattr2),
147 	fsparam_flag("filestreams",	Opt_filestreams),
148 	fsparam_flag("quota",		Opt_quota),
149 	fsparam_flag("noquota",		Opt_noquota),
150 	fsparam_flag("usrquota",	Opt_usrquota),
151 	fsparam_flag("grpquota",	Opt_grpquota),
152 	fsparam_flag("prjquota",	Opt_prjquota),
153 	fsparam_flag("uquota",		Opt_uquota),
154 	fsparam_flag("gquota",		Opt_gquota),
155 	fsparam_flag("pquota",		Opt_pquota),
156 	fsparam_flag("uqnoenforce",	Opt_uqnoenforce),
157 	fsparam_flag("gqnoenforce",	Opt_gqnoenforce),
158 	fsparam_flag("pqnoenforce",	Opt_pqnoenforce),
159 	fsparam_flag("qnoenforce",	Opt_qnoenforce),
160 	fsparam_flag("discard",		Opt_discard),
161 	fsparam_flag("nodiscard",	Opt_nodiscard),
162 	fsparam_flag("dax",		Opt_dax),
163 	fsparam_enum("dax",		Opt_dax_enum, dax_param_enums),
164 	{}
165 };
166 
167 struct proc_xfs_info {
168 	uint64_t	flag;
169 	char		*str;
170 };
171 
172 static int
xfs_fs_show_options(struct seq_file * m,struct dentry * root)173 xfs_fs_show_options(
174 	struct seq_file		*m,
175 	struct dentry		*root)
176 {
177 	static struct proc_xfs_info xfs_info_set[] = {
178 		/* the few simple ones we can get from the mount struct */
179 		{ XFS_FEAT_IKEEP,		",ikeep" },
180 		{ XFS_FEAT_WSYNC,		",wsync" },
181 		{ XFS_FEAT_NOALIGN,		",noalign" },
182 		{ XFS_FEAT_SWALLOC,		",swalloc" },
183 		{ XFS_FEAT_NOUUID,		",nouuid" },
184 		{ XFS_FEAT_NORECOVERY,		",norecovery" },
185 		{ XFS_FEAT_ATTR2,		",attr2" },
186 		{ XFS_FEAT_FILESTREAMS,		",filestreams" },
187 		{ XFS_FEAT_GRPID,		",grpid" },
188 		{ XFS_FEAT_DISCARD,		",discard" },
189 		{ XFS_FEAT_LARGE_IOSIZE,	",largeio" },
190 		{ XFS_FEAT_DAX_ALWAYS,		",dax=always" },
191 		{ XFS_FEAT_DAX_NEVER,		",dax=never" },
192 		{ 0, NULL }
193 	};
194 	struct xfs_mount	*mp = XFS_M(root->d_sb);
195 	struct proc_xfs_info	*xfs_infop;
196 
197 	for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
198 		if (mp->m_features & xfs_infop->flag)
199 			seq_puts(m, xfs_infop->str);
200 	}
201 
202 	seq_printf(m, ",inode%d", xfs_has_small_inums(mp) ? 32 : 64);
203 
204 	if (xfs_has_allocsize(mp))
205 		seq_printf(m, ",allocsize=%dk",
206 			   (1 << mp->m_allocsize_log) >> 10);
207 
208 	if (mp->m_logbufs > 0)
209 		seq_printf(m, ",logbufs=%d", mp->m_logbufs);
210 	if (mp->m_logbsize > 0)
211 		seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
212 
213 	if (mp->m_logname)
214 		seq_show_option(m, "logdev", mp->m_logname);
215 	if (mp->m_rtname)
216 		seq_show_option(m, "rtdev", mp->m_rtname);
217 
218 	if (mp->m_dalign > 0)
219 		seq_printf(m, ",sunit=%d",
220 				(int)XFS_FSB_TO_BB(mp, mp->m_dalign));
221 	if (mp->m_swidth > 0)
222 		seq_printf(m, ",swidth=%d",
223 				(int)XFS_FSB_TO_BB(mp, mp->m_swidth));
224 
225 	if (mp->m_qflags & XFS_UQUOTA_ENFD)
226 		seq_puts(m, ",usrquota");
227 	else if (mp->m_qflags & XFS_UQUOTA_ACCT)
228 		seq_puts(m, ",uqnoenforce");
229 
230 	if (mp->m_qflags & XFS_PQUOTA_ENFD)
231 		seq_puts(m, ",prjquota");
232 	else if (mp->m_qflags & XFS_PQUOTA_ACCT)
233 		seq_puts(m, ",pqnoenforce");
234 
235 	if (mp->m_qflags & XFS_GQUOTA_ENFD)
236 		seq_puts(m, ",grpquota");
237 	else if (mp->m_qflags & XFS_GQUOTA_ACCT)
238 		seq_puts(m, ",gqnoenforce");
239 
240 	if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
241 		seq_puts(m, ",noquota");
242 
243 	return 0;
244 }
245 
246 /*
247  * Set parameters for inode allocation heuristics, taking into account
248  * filesystem size and inode32/inode64 mount options; i.e. specifically
249  * whether or not XFS_FEAT_SMALL_INUMS is set.
250  *
251  * Inode allocation patterns are altered only if inode32 is requested
252  * (XFS_FEAT_SMALL_INUMS), and the filesystem is sufficiently large.
253  * If altered, XFS_OPSTATE_INODE32 is set as well.
254  *
255  * An agcount independent of that in the mount structure is provided
256  * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
257  * to the potentially higher ag count.
258  *
259  * Returns the maximum AG index which may contain inodes.
260  */
261 xfs_agnumber_t
xfs_set_inode_alloc(struct xfs_mount * mp,xfs_agnumber_t agcount)262 xfs_set_inode_alloc(
263 	struct xfs_mount *mp,
264 	xfs_agnumber_t	agcount)
265 {
266 	xfs_agnumber_t	index;
267 	xfs_agnumber_t	maxagi = 0;
268 	xfs_sb_t	*sbp = &mp->m_sb;
269 	xfs_agnumber_t	max_metadata;
270 	xfs_agino_t	agino;
271 	xfs_ino_t	ino;
272 
273 	/*
274 	 * Calculate how much should be reserved for inodes to meet
275 	 * the max inode percentage.  Used only for inode32.
276 	 */
277 	if (M_IGEO(mp)->maxicount) {
278 		uint64_t	icount;
279 
280 		icount = sbp->sb_dblocks * sbp->sb_imax_pct;
281 		do_div(icount, 100);
282 		icount += sbp->sb_agblocks - 1;
283 		do_div(icount, sbp->sb_agblocks);
284 		max_metadata = icount;
285 	} else {
286 		max_metadata = agcount;
287 	}
288 
289 	/* Get the last possible inode in the filesystem */
290 	agino =	XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
291 	ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
292 
293 	/*
294 	 * If user asked for no more than 32-bit inodes, and the fs is
295 	 * sufficiently large, set XFS_OPSTATE_INODE32 if we must alter
296 	 * the allocator to accommodate the request.
297 	 */
298 	if (xfs_has_small_inums(mp) && ino > XFS_MAXINUMBER_32)
299 		set_bit(XFS_OPSTATE_INODE32, &mp->m_opstate);
300 	else
301 		clear_bit(XFS_OPSTATE_INODE32, &mp->m_opstate);
302 
303 	for (index = 0; index < agcount; index++) {
304 		struct xfs_perag	*pag;
305 
306 		ino = XFS_AGINO_TO_INO(mp, index, agino);
307 
308 		pag = xfs_perag_get(mp, index);
309 
310 		if (xfs_is_inode32(mp)) {
311 			if (ino > XFS_MAXINUMBER_32) {
312 				pag->pagi_inodeok = 0;
313 				pag->pagf_metadata = 0;
314 			} else {
315 				pag->pagi_inodeok = 1;
316 				maxagi++;
317 				if (index < max_metadata)
318 					pag->pagf_metadata = 1;
319 				else
320 					pag->pagf_metadata = 0;
321 			}
322 		} else {
323 			pag->pagi_inodeok = 1;
324 			pag->pagf_metadata = 0;
325 		}
326 
327 		xfs_perag_put(pag);
328 	}
329 
330 	return xfs_is_inode32(mp) ? maxagi : agcount;
331 }
332 
333 static bool
xfs_buftarg_is_dax(struct super_block * sb,struct xfs_buftarg * bt)334 xfs_buftarg_is_dax(
335 	struct super_block	*sb,
336 	struct xfs_buftarg	*bt)
337 {
338 	return dax_supported(bt->bt_daxdev, bt->bt_bdev, sb->s_blocksize, 0,
339 			bdev_nr_sectors(bt->bt_bdev));
340 }
341 
342 STATIC int
xfs_blkdev_get(xfs_mount_t * mp,const char * name,struct block_device ** bdevp)343 xfs_blkdev_get(
344 	xfs_mount_t		*mp,
345 	const char		*name,
346 	struct block_device	**bdevp)
347 {
348 	int			error = 0;
349 
350 	*bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
351 				    mp);
352 	if (IS_ERR(*bdevp)) {
353 		error = PTR_ERR(*bdevp);
354 		xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
355 	}
356 
357 	return error;
358 }
359 
360 STATIC void
xfs_blkdev_put(struct block_device * bdev)361 xfs_blkdev_put(
362 	struct block_device	*bdev)
363 {
364 	if (bdev)
365 		blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
366 }
367 
368 STATIC void
xfs_close_devices(struct xfs_mount * mp)369 xfs_close_devices(
370 	struct xfs_mount	*mp)
371 {
372 	struct dax_device *dax_ddev = mp->m_ddev_targp->bt_daxdev;
373 
374 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
375 		struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
376 		struct dax_device *dax_logdev = mp->m_logdev_targp->bt_daxdev;
377 
378 		xfs_free_buftarg(mp->m_logdev_targp);
379 		xfs_blkdev_put(logdev);
380 		fs_put_dax(dax_logdev);
381 	}
382 	if (mp->m_rtdev_targp) {
383 		struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
384 		struct dax_device *dax_rtdev = mp->m_rtdev_targp->bt_daxdev;
385 
386 		xfs_free_buftarg(mp->m_rtdev_targp);
387 		xfs_blkdev_put(rtdev);
388 		fs_put_dax(dax_rtdev);
389 	}
390 	xfs_free_buftarg(mp->m_ddev_targp);
391 	fs_put_dax(dax_ddev);
392 }
393 
394 /*
395  * The file system configurations are:
396  *	(1) device (partition) with data and internal log
397  *	(2) logical volume with data and log subvolumes.
398  *	(3) logical volume with data, log, and realtime subvolumes.
399  *
400  * We only have to handle opening the log and realtime volumes here if
401  * they are present.  The data subvolume has already been opened by
402  * get_sb_bdev() and is stored in sb->s_bdev.
403  */
404 STATIC int
xfs_open_devices(struct xfs_mount * mp)405 xfs_open_devices(
406 	struct xfs_mount	*mp)
407 {
408 	struct block_device	*ddev = mp->m_super->s_bdev;
409 	struct dax_device	*dax_ddev = fs_dax_get_by_bdev(ddev);
410 	struct dax_device	*dax_logdev = NULL, *dax_rtdev = NULL;
411 	struct block_device	*logdev = NULL, *rtdev = NULL;
412 	int			error;
413 
414 	/*
415 	 * Open real time and log devices - order is important.
416 	 */
417 	if (mp->m_logname) {
418 		error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
419 		if (error)
420 			goto out;
421 		dax_logdev = fs_dax_get_by_bdev(logdev);
422 	}
423 
424 	if (mp->m_rtname) {
425 		error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
426 		if (error)
427 			goto out_close_logdev;
428 
429 		if (rtdev == ddev || rtdev == logdev) {
430 			xfs_warn(mp,
431 	"Cannot mount filesystem with identical rtdev and ddev/logdev.");
432 			error = -EINVAL;
433 			goto out_close_rtdev;
434 		}
435 		dax_rtdev = fs_dax_get_by_bdev(rtdev);
436 	}
437 
438 	/*
439 	 * Setup xfs_mount buffer target pointers
440 	 */
441 	error = -ENOMEM;
442 	mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, dax_ddev);
443 	if (!mp->m_ddev_targp)
444 		goto out_close_rtdev;
445 
446 	if (rtdev) {
447 		mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, dax_rtdev);
448 		if (!mp->m_rtdev_targp)
449 			goto out_free_ddev_targ;
450 	}
451 
452 	if (logdev && logdev != ddev) {
453 		mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, dax_logdev);
454 		if (!mp->m_logdev_targp)
455 			goto out_free_rtdev_targ;
456 	} else {
457 		mp->m_logdev_targp = mp->m_ddev_targp;
458 	}
459 
460 	return 0;
461 
462  out_free_rtdev_targ:
463 	if (mp->m_rtdev_targp)
464 		xfs_free_buftarg(mp->m_rtdev_targp);
465  out_free_ddev_targ:
466 	xfs_free_buftarg(mp->m_ddev_targp);
467  out_close_rtdev:
468 	xfs_blkdev_put(rtdev);
469 	fs_put_dax(dax_rtdev);
470  out_close_logdev:
471 	if (logdev && logdev != ddev) {
472 		xfs_blkdev_put(logdev);
473 		fs_put_dax(dax_logdev);
474 	}
475  out:
476 	fs_put_dax(dax_ddev);
477 	return error;
478 }
479 
480 /*
481  * Setup xfs_mount buffer target pointers based on superblock
482  */
483 STATIC int
xfs_setup_devices(struct xfs_mount * mp)484 xfs_setup_devices(
485 	struct xfs_mount	*mp)
486 {
487 	int			error;
488 
489 	error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
490 	if (error)
491 		return error;
492 
493 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
494 		unsigned int	log_sector_size = BBSIZE;
495 
496 		if (xfs_has_sector(mp))
497 			log_sector_size = mp->m_sb.sb_logsectsize;
498 		error = xfs_setsize_buftarg(mp->m_logdev_targp,
499 					    log_sector_size);
500 		if (error)
501 			return error;
502 	}
503 	if (mp->m_rtdev_targp) {
504 		error = xfs_setsize_buftarg(mp->m_rtdev_targp,
505 					    mp->m_sb.sb_sectsize);
506 		if (error)
507 			return error;
508 	}
509 
510 	return 0;
511 }
512 
513 STATIC int
xfs_init_mount_workqueues(struct xfs_mount * mp)514 xfs_init_mount_workqueues(
515 	struct xfs_mount	*mp)
516 {
517 	mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
518 			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
519 			1, mp->m_super->s_id);
520 	if (!mp->m_buf_workqueue)
521 		goto out;
522 
523 	mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
524 			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
525 			0, mp->m_super->s_id);
526 	if (!mp->m_unwritten_workqueue)
527 		goto out_destroy_buf;
528 
529 	mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
530 			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
531 			0, mp->m_super->s_id);
532 	if (!mp->m_reclaim_workqueue)
533 		goto out_destroy_unwritten;
534 
535 	mp->m_blockgc_wq = alloc_workqueue("xfs-blockgc/%s",
536 			XFS_WQFLAGS(WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM),
537 			0, mp->m_super->s_id);
538 	if (!mp->m_blockgc_wq)
539 		goto out_destroy_reclaim;
540 
541 	mp->m_inodegc_wq = alloc_workqueue("xfs-inodegc/%s",
542 			XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
543 			1, mp->m_super->s_id);
544 	if (!mp->m_inodegc_wq)
545 		goto out_destroy_blockgc;
546 
547 	mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s",
548 			XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id);
549 	if (!mp->m_sync_workqueue)
550 		goto out_destroy_inodegc;
551 
552 	return 0;
553 
554 out_destroy_inodegc:
555 	destroy_workqueue(mp->m_inodegc_wq);
556 out_destroy_blockgc:
557 	destroy_workqueue(mp->m_blockgc_wq);
558 out_destroy_reclaim:
559 	destroy_workqueue(mp->m_reclaim_workqueue);
560 out_destroy_unwritten:
561 	destroy_workqueue(mp->m_unwritten_workqueue);
562 out_destroy_buf:
563 	destroy_workqueue(mp->m_buf_workqueue);
564 out:
565 	return -ENOMEM;
566 }
567 
568 STATIC void
xfs_destroy_mount_workqueues(struct xfs_mount * mp)569 xfs_destroy_mount_workqueues(
570 	struct xfs_mount	*mp)
571 {
572 	destroy_workqueue(mp->m_sync_workqueue);
573 	destroy_workqueue(mp->m_blockgc_wq);
574 	destroy_workqueue(mp->m_inodegc_wq);
575 	destroy_workqueue(mp->m_reclaim_workqueue);
576 	destroy_workqueue(mp->m_unwritten_workqueue);
577 	destroy_workqueue(mp->m_buf_workqueue);
578 }
579 
580 static void
xfs_flush_inodes_worker(struct work_struct * work)581 xfs_flush_inodes_worker(
582 	struct work_struct	*work)
583 {
584 	struct xfs_mount	*mp = container_of(work, struct xfs_mount,
585 						   m_flush_inodes_work);
586 	struct super_block	*sb = mp->m_super;
587 
588 	if (down_read_trylock(&sb->s_umount)) {
589 		sync_inodes_sb(sb);
590 		up_read(&sb->s_umount);
591 	}
592 }
593 
594 /*
595  * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
596  * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
597  * for IO to complete so that we effectively throttle multiple callers to the
598  * rate at which IO is completing.
599  */
600 void
xfs_flush_inodes(struct xfs_mount * mp)601 xfs_flush_inodes(
602 	struct xfs_mount	*mp)
603 {
604 	/*
605 	 * If flush_work() returns true then that means we waited for a flush
606 	 * which was already in progress.  Don't bother running another scan.
607 	 */
608 	if (flush_work(&mp->m_flush_inodes_work))
609 		return;
610 
611 	queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work);
612 	flush_work(&mp->m_flush_inodes_work);
613 }
614 
615 /* Catch misguided souls that try to use this interface on XFS */
616 STATIC struct inode *
xfs_fs_alloc_inode(struct super_block * sb)617 xfs_fs_alloc_inode(
618 	struct super_block	*sb)
619 {
620 	BUG();
621 	return NULL;
622 }
623 
624 /*
625  * Now that the generic code is guaranteed not to be accessing
626  * the linux inode, we can inactivate and reclaim the inode.
627  */
628 STATIC void
xfs_fs_destroy_inode(struct inode * inode)629 xfs_fs_destroy_inode(
630 	struct inode		*inode)
631 {
632 	struct xfs_inode	*ip = XFS_I(inode);
633 
634 	trace_xfs_destroy_inode(ip);
635 
636 	ASSERT(!rwsem_is_locked(&inode->i_rwsem));
637 	XFS_STATS_INC(ip->i_mount, vn_rele);
638 	XFS_STATS_INC(ip->i_mount, vn_remove);
639 	xfs_inode_mark_reclaimable(ip);
640 }
641 
642 static void
xfs_fs_dirty_inode(struct inode * inode,int flags)643 xfs_fs_dirty_inode(
644 	struct inode			*inode,
645 	int				flags)
646 {
647 	struct xfs_inode		*ip = XFS_I(inode);
648 	struct xfs_mount		*mp = ip->i_mount;
649 	struct xfs_trans		*tp;
650 
651 	if (!(inode->i_sb->s_flags & SB_LAZYTIME))
652 		return;
653 
654 	/*
655 	 * Only do the timestamp update if the inode is dirty (I_DIRTY_SYNC)
656 	 * and has dirty timestamp (I_DIRTY_TIME). I_DIRTY_TIME can be passed
657 	 * in flags possibly together with I_DIRTY_SYNC.
658 	 */
659 	if ((flags & ~I_DIRTY_TIME) != I_DIRTY_SYNC || !(flags & I_DIRTY_TIME))
660 		return;
661 
662 	if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
663 		return;
664 	xfs_ilock(ip, XFS_ILOCK_EXCL);
665 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
666 	xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
667 	xfs_trans_commit(tp);
668 }
669 
670 /*
671  * Slab object creation initialisation for the XFS inode.
672  * This covers only the idempotent fields in the XFS inode;
673  * all other fields need to be initialised on allocation
674  * from the slab. This avoids the need to repeatedly initialise
675  * fields in the xfs inode that left in the initialise state
676  * when freeing the inode.
677  */
678 STATIC void
xfs_fs_inode_init_once(void * inode)679 xfs_fs_inode_init_once(
680 	void			*inode)
681 {
682 	struct xfs_inode	*ip = inode;
683 
684 	memset(ip, 0, sizeof(struct xfs_inode));
685 
686 	/* vfs inode */
687 	inode_init_once(VFS_I(ip));
688 
689 	/* xfs inode */
690 	atomic_set(&ip->i_pincount, 0);
691 	spin_lock_init(&ip->i_flags_lock);
692 
693 	mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
694 		     "xfsino", ip->i_ino);
695 }
696 
697 /*
698  * We do an unlocked check for XFS_IDONTCACHE here because we are already
699  * serialised against cache hits here via the inode->i_lock and igrab() in
700  * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
701  * racing with us, and it avoids needing to grab a spinlock here for every inode
702  * we drop the final reference on.
703  */
704 STATIC int
xfs_fs_drop_inode(struct inode * inode)705 xfs_fs_drop_inode(
706 	struct inode		*inode)
707 {
708 	struct xfs_inode	*ip = XFS_I(inode);
709 
710 	/*
711 	 * If this unlinked inode is in the middle of recovery, don't
712 	 * drop the inode just yet; log recovery will take care of
713 	 * that.  See the comment for this inode flag.
714 	 */
715 	if (ip->i_flags & XFS_IRECOVERY) {
716 		ASSERT(xlog_recovery_needed(ip->i_mount->m_log));
717 		return 0;
718 	}
719 
720 	return generic_drop_inode(inode);
721 }
722 
723 static void
xfs_mount_free(struct xfs_mount * mp)724 xfs_mount_free(
725 	struct xfs_mount	*mp)
726 {
727 	kfree(mp->m_rtname);
728 	kfree(mp->m_logname);
729 	kmem_free(mp);
730 }
731 
732 STATIC int
xfs_fs_sync_fs(struct super_block * sb,int wait)733 xfs_fs_sync_fs(
734 	struct super_block	*sb,
735 	int			wait)
736 {
737 	struct xfs_mount	*mp = XFS_M(sb);
738 	int			error;
739 
740 	trace_xfs_fs_sync_fs(mp, __return_address);
741 
742 	/*
743 	 * Doing anything during the async pass would be counterproductive.
744 	 */
745 	if (!wait)
746 		return 0;
747 
748 	error = xfs_log_force(mp, XFS_LOG_SYNC);
749 	if (error)
750 		return error;
751 
752 	if (laptop_mode) {
753 		/*
754 		 * The disk must be active because we're syncing.
755 		 * We schedule log work now (now that the disk is
756 		 * active) instead of later (when it might not be).
757 		 */
758 		flush_delayed_work(&mp->m_log->l_work);
759 	}
760 
761 	/*
762 	 * If we are called with page faults frozen out, it means we are about
763 	 * to freeze the transaction subsystem. Take the opportunity to shut
764 	 * down inodegc because once SB_FREEZE_FS is set it's too late to
765 	 * prevent inactivation races with freeze. The fs doesn't get called
766 	 * again by the freezing process until after SB_FREEZE_FS has been set,
767 	 * so it's now or never.  Same logic applies to speculative allocation
768 	 * garbage collection.
769 	 *
770 	 * We don't care if this is a normal syncfs call that does this or
771 	 * freeze that does this - we can run this multiple times without issue
772 	 * and we won't race with a restart because a restart can only occur
773 	 * when the state is either SB_FREEZE_FS or SB_FREEZE_COMPLETE.
774 	 */
775 	if (sb->s_writers.frozen == SB_FREEZE_PAGEFAULT) {
776 		xfs_inodegc_stop(mp);
777 		xfs_blockgc_stop(mp);
778 	}
779 
780 	return 0;
781 }
782 
783 STATIC int
xfs_fs_statfs(struct dentry * dentry,struct kstatfs * statp)784 xfs_fs_statfs(
785 	struct dentry		*dentry,
786 	struct kstatfs		*statp)
787 {
788 	struct xfs_mount	*mp = XFS_M(dentry->d_sb);
789 	xfs_sb_t		*sbp = &mp->m_sb;
790 	struct xfs_inode	*ip = XFS_I(d_inode(dentry));
791 	uint64_t		fakeinos, id;
792 	uint64_t		icount;
793 	uint64_t		ifree;
794 	uint64_t		fdblocks;
795 	xfs_extlen_t		lsize;
796 	int64_t			ffree;
797 
798 	/*
799 	 * Expedite background inodegc but don't wait. We do not want to block
800 	 * here waiting hours for a billion extent file to be truncated.
801 	 */
802 	xfs_inodegc_push(mp);
803 
804 	statp->f_type = XFS_SUPER_MAGIC;
805 	statp->f_namelen = MAXNAMELEN - 1;
806 
807 	id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
808 	statp->f_fsid = u64_to_fsid(id);
809 
810 	icount = percpu_counter_sum(&mp->m_icount);
811 	ifree = percpu_counter_sum(&mp->m_ifree);
812 	fdblocks = percpu_counter_sum(&mp->m_fdblocks);
813 
814 	spin_lock(&mp->m_sb_lock);
815 	statp->f_bsize = sbp->sb_blocksize;
816 	lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
817 	statp->f_blocks = sbp->sb_dblocks - lsize;
818 	spin_unlock(&mp->m_sb_lock);
819 
820 	/* make sure statp->f_bfree does not underflow */
821 	statp->f_bfree = max_t(int64_t, fdblocks - mp->m_alloc_set_aside, 0);
822 	statp->f_bavail = statp->f_bfree;
823 
824 	fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
825 	statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
826 	if (M_IGEO(mp)->maxicount)
827 		statp->f_files = min_t(typeof(statp->f_files),
828 					statp->f_files,
829 					M_IGEO(mp)->maxicount);
830 
831 	/* If sb_icount overshot maxicount, report actual allocation */
832 	statp->f_files = max_t(typeof(statp->f_files),
833 					statp->f_files,
834 					sbp->sb_icount);
835 
836 	/* make sure statp->f_ffree does not underflow */
837 	ffree = statp->f_files - (icount - ifree);
838 	statp->f_ffree = max_t(int64_t, ffree, 0);
839 
840 
841 	if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
842 	    ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
843 			      (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
844 		xfs_qm_statvfs(ip, statp);
845 
846 	if (XFS_IS_REALTIME_MOUNT(mp) &&
847 	    (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
848 		statp->f_blocks = sbp->sb_rblocks;
849 		statp->f_bavail = statp->f_bfree =
850 			sbp->sb_frextents * sbp->sb_rextsize;
851 	}
852 
853 	return 0;
854 }
855 
856 STATIC void
xfs_save_resvblks(struct xfs_mount * mp)857 xfs_save_resvblks(struct xfs_mount *mp)
858 {
859 	uint64_t resblks = 0;
860 
861 	mp->m_resblks_save = mp->m_resblks;
862 	xfs_reserve_blocks(mp, &resblks, NULL);
863 }
864 
865 STATIC void
xfs_restore_resvblks(struct xfs_mount * mp)866 xfs_restore_resvblks(struct xfs_mount *mp)
867 {
868 	uint64_t resblks;
869 
870 	if (mp->m_resblks_save) {
871 		resblks = mp->m_resblks_save;
872 		mp->m_resblks_save = 0;
873 	} else
874 		resblks = xfs_default_resblks(mp);
875 
876 	xfs_reserve_blocks(mp, &resblks, NULL);
877 }
878 
879 /*
880  * Second stage of a freeze. The data is already frozen so we only
881  * need to take care of the metadata. Once that's done sync the superblock
882  * to the log to dirty it in case of a crash while frozen. This ensures that we
883  * will recover the unlinked inode lists on the next mount.
884  */
885 STATIC int
xfs_fs_freeze(struct super_block * sb)886 xfs_fs_freeze(
887 	struct super_block	*sb)
888 {
889 	struct xfs_mount	*mp = XFS_M(sb);
890 	unsigned int		flags;
891 	int			ret;
892 
893 	/*
894 	 * The filesystem is now frozen far enough that memory reclaim
895 	 * cannot safely operate on the filesystem. Hence we need to
896 	 * set a GFP_NOFS context here to avoid recursion deadlocks.
897 	 */
898 	flags = memalloc_nofs_save();
899 	xfs_save_resvblks(mp);
900 	ret = xfs_log_quiesce(mp);
901 	memalloc_nofs_restore(flags);
902 
903 	/*
904 	 * For read-write filesystems, we need to restart the inodegc on error
905 	 * because we stopped it at SB_FREEZE_PAGEFAULT level and a thaw is not
906 	 * going to be run to restart it now.  We are at SB_FREEZE_FS level
907 	 * here, so we can restart safely without racing with a stop in
908 	 * xfs_fs_sync_fs().
909 	 */
910 	if (ret && !xfs_is_readonly(mp)) {
911 		xfs_blockgc_start(mp);
912 		xfs_inodegc_start(mp);
913 	}
914 
915 	return ret;
916 }
917 
918 STATIC int
xfs_fs_unfreeze(struct super_block * sb)919 xfs_fs_unfreeze(
920 	struct super_block	*sb)
921 {
922 	struct xfs_mount	*mp = XFS_M(sb);
923 
924 	xfs_restore_resvblks(mp);
925 	xfs_log_work_queue(mp);
926 
927 	/*
928 	 * Don't reactivate the inodegc worker on a readonly filesystem because
929 	 * inodes are sent directly to reclaim.  Don't reactivate the blockgc
930 	 * worker because there are no speculative preallocations on a readonly
931 	 * filesystem.
932 	 */
933 	if (!xfs_is_readonly(mp)) {
934 		xfs_blockgc_start(mp);
935 		xfs_inodegc_start(mp);
936 	}
937 
938 	return 0;
939 }
940 
941 /*
942  * This function fills in xfs_mount_t fields based on mount args.
943  * Note: the superblock _has_ now been read in.
944  */
945 STATIC int
xfs_finish_flags(struct xfs_mount * mp)946 xfs_finish_flags(
947 	struct xfs_mount	*mp)
948 {
949 	/* Fail a mount where the logbuf is smaller than the log stripe */
950 	if (xfs_has_logv2(mp)) {
951 		if (mp->m_logbsize <= 0 &&
952 		    mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
953 			mp->m_logbsize = mp->m_sb.sb_logsunit;
954 		} else if (mp->m_logbsize > 0 &&
955 			   mp->m_logbsize < mp->m_sb.sb_logsunit) {
956 			xfs_warn(mp,
957 		"logbuf size must be greater than or equal to log stripe size");
958 			return -EINVAL;
959 		}
960 	} else {
961 		/* Fail a mount if the logbuf is larger than 32K */
962 		if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
963 			xfs_warn(mp,
964 		"logbuf size for version 1 logs must be 16K or 32K");
965 			return -EINVAL;
966 		}
967 	}
968 
969 	/*
970 	 * V5 filesystems always use attr2 format for attributes.
971 	 */
972 	if (xfs_has_crc(mp) && xfs_has_noattr2(mp)) {
973 		xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
974 			     "attr2 is always enabled for V5 filesystems.");
975 		return -EINVAL;
976 	}
977 
978 	/*
979 	 * prohibit r/w mounts of read-only filesystems
980 	 */
981 	if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !xfs_is_readonly(mp)) {
982 		xfs_warn(mp,
983 			"cannot mount a read-only filesystem as read-write");
984 		return -EROFS;
985 	}
986 
987 	if ((mp->m_qflags & XFS_GQUOTA_ACCT) &&
988 	    (mp->m_qflags & XFS_PQUOTA_ACCT) &&
989 	    !xfs_has_pquotino(mp)) {
990 		xfs_warn(mp,
991 		  "Super block does not support project and group quota together");
992 		return -EINVAL;
993 	}
994 
995 	return 0;
996 }
997 
998 static int
xfs_init_percpu_counters(struct xfs_mount * mp)999 xfs_init_percpu_counters(
1000 	struct xfs_mount	*mp)
1001 {
1002 	int		error;
1003 
1004 	error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
1005 	if (error)
1006 		return -ENOMEM;
1007 
1008 	error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
1009 	if (error)
1010 		goto free_icount;
1011 
1012 	error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
1013 	if (error)
1014 		goto free_ifree;
1015 
1016 	error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
1017 	if (error)
1018 		goto free_fdblocks;
1019 
1020 	return 0;
1021 
1022 free_fdblocks:
1023 	percpu_counter_destroy(&mp->m_fdblocks);
1024 free_ifree:
1025 	percpu_counter_destroy(&mp->m_ifree);
1026 free_icount:
1027 	percpu_counter_destroy(&mp->m_icount);
1028 	return -ENOMEM;
1029 }
1030 
1031 void
xfs_reinit_percpu_counters(struct xfs_mount * mp)1032 xfs_reinit_percpu_counters(
1033 	struct xfs_mount	*mp)
1034 {
1035 	percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1036 	percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1037 	percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
1038 }
1039 
1040 static void
xfs_destroy_percpu_counters(struct xfs_mount * mp)1041 xfs_destroy_percpu_counters(
1042 	struct xfs_mount	*mp)
1043 {
1044 	percpu_counter_destroy(&mp->m_icount);
1045 	percpu_counter_destroy(&mp->m_ifree);
1046 	percpu_counter_destroy(&mp->m_fdblocks);
1047 	ASSERT(xfs_is_shutdown(mp) ||
1048 	       percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1049 	percpu_counter_destroy(&mp->m_delalloc_blks);
1050 }
1051 
1052 static int
xfs_inodegc_init_percpu(struct xfs_mount * mp)1053 xfs_inodegc_init_percpu(
1054 	struct xfs_mount	*mp)
1055 {
1056 	struct xfs_inodegc	*gc;
1057 	int			cpu;
1058 
1059 	mp->m_inodegc = alloc_percpu(struct xfs_inodegc);
1060 	if (!mp->m_inodegc)
1061 		return -ENOMEM;
1062 
1063 	for_each_possible_cpu(cpu) {
1064 		gc = per_cpu_ptr(mp->m_inodegc, cpu);
1065 #if defined(DEBUG) || defined(XFS_WARN)
1066 		gc->cpu = cpu;
1067 #endif
1068 		init_llist_head(&gc->list);
1069 		gc->items = 0;
1070 		INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker);
1071 	}
1072 	return 0;
1073 }
1074 
1075 static void
xfs_inodegc_free_percpu(struct xfs_mount * mp)1076 xfs_inodegc_free_percpu(
1077 	struct xfs_mount	*mp)
1078 {
1079 	if (!mp->m_inodegc)
1080 		return;
1081 	free_percpu(mp->m_inodegc);
1082 }
1083 
1084 static void
xfs_fs_put_super(struct super_block * sb)1085 xfs_fs_put_super(
1086 	struct super_block	*sb)
1087 {
1088 	struct xfs_mount	*mp = XFS_M(sb);
1089 
1090 	/* if ->fill_super failed, we have no mount to tear down */
1091 	if (!sb->s_fs_info)
1092 		return;
1093 
1094 	xfs_notice(mp, "Unmounting Filesystem");
1095 	xfs_filestream_unmount(mp);
1096 	xfs_unmountfs(mp);
1097 
1098 	xfs_freesb(mp);
1099 	free_percpu(mp->m_stats.xs_stats);
1100 	xfs_mount_list_del(mp);
1101 	xfs_inodegc_free_percpu(mp);
1102 	xfs_destroy_percpu_counters(mp);
1103 	xfs_destroy_mount_workqueues(mp);
1104 	xfs_close_devices(mp);
1105 
1106 	sb->s_fs_info = NULL;
1107 	xfs_mount_free(mp);
1108 }
1109 
1110 static long
xfs_fs_nr_cached_objects(struct super_block * sb,struct shrink_control * sc)1111 xfs_fs_nr_cached_objects(
1112 	struct super_block	*sb,
1113 	struct shrink_control	*sc)
1114 {
1115 	/* Paranoia: catch incorrect calls during mount setup or teardown */
1116 	if (WARN_ON_ONCE(!sb->s_fs_info))
1117 		return 0;
1118 	return xfs_reclaim_inodes_count(XFS_M(sb));
1119 }
1120 
1121 static long
xfs_fs_free_cached_objects(struct super_block * sb,struct shrink_control * sc)1122 xfs_fs_free_cached_objects(
1123 	struct super_block	*sb,
1124 	struct shrink_control	*sc)
1125 {
1126 	return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1127 }
1128 
1129 static const struct super_operations xfs_super_operations = {
1130 	.alloc_inode		= xfs_fs_alloc_inode,
1131 	.destroy_inode		= xfs_fs_destroy_inode,
1132 	.dirty_inode		= xfs_fs_dirty_inode,
1133 	.drop_inode		= xfs_fs_drop_inode,
1134 	.put_super		= xfs_fs_put_super,
1135 	.sync_fs		= xfs_fs_sync_fs,
1136 	.freeze_fs		= xfs_fs_freeze,
1137 	.unfreeze_fs		= xfs_fs_unfreeze,
1138 	.statfs			= xfs_fs_statfs,
1139 	.show_options		= xfs_fs_show_options,
1140 	.nr_cached_objects	= xfs_fs_nr_cached_objects,
1141 	.free_cached_objects	= xfs_fs_free_cached_objects,
1142 };
1143 
1144 static int
suffix_kstrtoint(const char * s,unsigned int base,int * res)1145 suffix_kstrtoint(
1146 	const char	*s,
1147 	unsigned int	base,
1148 	int		*res)
1149 {
1150 	int		last, shift_left_factor = 0, _res;
1151 	char		*value;
1152 	int		ret = 0;
1153 
1154 	value = kstrdup(s, GFP_KERNEL);
1155 	if (!value)
1156 		return -ENOMEM;
1157 
1158 	last = strlen(value) - 1;
1159 	if (value[last] == 'K' || value[last] == 'k') {
1160 		shift_left_factor = 10;
1161 		value[last] = '\0';
1162 	}
1163 	if (value[last] == 'M' || value[last] == 'm') {
1164 		shift_left_factor = 20;
1165 		value[last] = '\0';
1166 	}
1167 	if (value[last] == 'G' || value[last] == 'g') {
1168 		shift_left_factor = 30;
1169 		value[last] = '\0';
1170 	}
1171 
1172 	if (kstrtoint(value, base, &_res))
1173 		ret = -EINVAL;
1174 	kfree(value);
1175 	*res = _res << shift_left_factor;
1176 	return ret;
1177 }
1178 
1179 static inline void
xfs_fs_warn_deprecated(struct fs_context * fc,struct fs_parameter * param,uint64_t flag,bool value)1180 xfs_fs_warn_deprecated(
1181 	struct fs_context	*fc,
1182 	struct fs_parameter	*param,
1183 	uint64_t		flag,
1184 	bool			value)
1185 {
1186 	/* Don't print the warning if reconfiguring and current mount point
1187 	 * already had the flag set
1188 	 */
1189 	if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) &&
1190             !!(XFS_M(fc->root->d_sb)->m_features & flag) == value)
1191 		return;
1192 	xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key);
1193 }
1194 
1195 /*
1196  * Set mount state from a mount option.
1197  *
1198  * NOTE: mp->m_super is NULL here!
1199  */
1200 static int
xfs_fs_parse_param(struct fs_context * fc,struct fs_parameter * param)1201 xfs_fs_parse_param(
1202 	struct fs_context	*fc,
1203 	struct fs_parameter	*param)
1204 {
1205 	struct xfs_mount	*parsing_mp = fc->s_fs_info;
1206 	struct fs_parse_result	result;
1207 	int			size = 0;
1208 	int			opt;
1209 
1210 	opt = fs_parse(fc, xfs_fs_parameters, param, &result);
1211 	if (opt < 0)
1212 		return opt;
1213 
1214 	switch (opt) {
1215 	case Opt_logbufs:
1216 		parsing_mp->m_logbufs = result.uint_32;
1217 		return 0;
1218 	case Opt_logbsize:
1219 		if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize))
1220 			return -EINVAL;
1221 		return 0;
1222 	case Opt_logdev:
1223 		kfree(parsing_mp->m_logname);
1224 		parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL);
1225 		if (!parsing_mp->m_logname)
1226 			return -ENOMEM;
1227 		return 0;
1228 	case Opt_rtdev:
1229 		kfree(parsing_mp->m_rtname);
1230 		parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
1231 		if (!parsing_mp->m_rtname)
1232 			return -ENOMEM;
1233 		return 0;
1234 	case Opt_allocsize:
1235 		if (suffix_kstrtoint(param->string, 10, &size))
1236 			return -EINVAL;
1237 		parsing_mp->m_allocsize_log = ffs(size) - 1;
1238 		parsing_mp->m_features |= XFS_FEAT_ALLOCSIZE;
1239 		return 0;
1240 	case Opt_grpid:
1241 	case Opt_bsdgroups:
1242 		parsing_mp->m_features |= XFS_FEAT_GRPID;
1243 		return 0;
1244 	case Opt_nogrpid:
1245 	case Opt_sysvgroups:
1246 		parsing_mp->m_features &= ~XFS_FEAT_GRPID;
1247 		return 0;
1248 	case Opt_wsync:
1249 		parsing_mp->m_features |= XFS_FEAT_WSYNC;
1250 		return 0;
1251 	case Opt_norecovery:
1252 		parsing_mp->m_features |= XFS_FEAT_NORECOVERY;
1253 		return 0;
1254 	case Opt_noalign:
1255 		parsing_mp->m_features |= XFS_FEAT_NOALIGN;
1256 		return 0;
1257 	case Opt_swalloc:
1258 		parsing_mp->m_features |= XFS_FEAT_SWALLOC;
1259 		return 0;
1260 	case Opt_sunit:
1261 		parsing_mp->m_dalign = result.uint_32;
1262 		return 0;
1263 	case Opt_swidth:
1264 		parsing_mp->m_swidth = result.uint_32;
1265 		return 0;
1266 	case Opt_inode32:
1267 		parsing_mp->m_features |= XFS_FEAT_SMALL_INUMS;
1268 		return 0;
1269 	case Opt_inode64:
1270 		parsing_mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
1271 		return 0;
1272 	case Opt_nouuid:
1273 		parsing_mp->m_features |= XFS_FEAT_NOUUID;
1274 		return 0;
1275 	case Opt_largeio:
1276 		parsing_mp->m_features |= XFS_FEAT_LARGE_IOSIZE;
1277 		return 0;
1278 	case Opt_nolargeio:
1279 		parsing_mp->m_features &= ~XFS_FEAT_LARGE_IOSIZE;
1280 		return 0;
1281 	case Opt_filestreams:
1282 		parsing_mp->m_features |= XFS_FEAT_FILESTREAMS;
1283 		return 0;
1284 	case Opt_noquota:
1285 		parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
1286 		parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
1287 		return 0;
1288 	case Opt_quota:
1289 	case Opt_uquota:
1290 	case Opt_usrquota:
1291 		parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD);
1292 		return 0;
1293 	case Opt_qnoenforce:
1294 	case Opt_uqnoenforce:
1295 		parsing_mp->m_qflags |= XFS_UQUOTA_ACCT;
1296 		parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD;
1297 		return 0;
1298 	case Opt_pquota:
1299 	case Opt_prjquota:
1300 		parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD);
1301 		return 0;
1302 	case Opt_pqnoenforce:
1303 		parsing_mp->m_qflags |= XFS_PQUOTA_ACCT;
1304 		parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD;
1305 		return 0;
1306 	case Opt_gquota:
1307 	case Opt_grpquota:
1308 		parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD);
1309 		return 0;
1310 	case Opt_gqnoenforce:
1311 		parsing_mp->m_qflags |= XFS_GQUOTA_ACCT;
1312 		parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD;
1313 		return 0;
1314 	case Opt_discard:
1315 		parsing_mp->m_features |= XFS_FEAT_DISCARD;
1316 		return 0;
1317 	case Opt_nodiscard:
1318 		parsing_mp->m_features &= ~XFS_FEAT_DISCARD;
1319 		return 0;
1320 #ifdef CONFIG_FS_DAX
1321 	case Opt_dax:
1322 		xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS);
1323 		return 0;
1324 	case Opt_dax_enum:
1325 		xfs_mount_set_dax_mode(parsing_mp, result.uint_32);
1326 		return 0;
1327 #endif
1328 	/* Following mount options will be removed in September 2025 */
1329 	case Opt_ikeep:
1330 		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, true);
1331 		parsing_mp->m_features |= XFS_FEAT_IKEEP;
1332 		return 0;
1333 	case Opt_noikeep:
1334 		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, false);
1335 		parsing_mp->m_features &= ~XFS_FEAT_IKEEP;
1336 		return 0;
1337 	case Opt_attr2:
1338 		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_ATTR2, true);
1339 		parsing_mp->m_features |= XFS_FEAT_ATTR2;
1340 		return 0;
1341 	case Opt_noattr2:
1342 		xfs_fs_warn_deprecated(fc, param, XFS_FEAT_NOATTR2, true);
1343 		parsing_mp->m_features |= XFS_FEAT_NOATTR2;
1344 		return 0;
1345 	default:
1346 		xfs_warn(parsing_mp, "unknown mount option [%s].", param->key);
1347 		return -EINVAL;
1348 	}
1349 
1350 	return 0;
1351 }
1352 
1353 static int
xfs_fs_validate_params(struct xfs_mount * mp)1354 xfs_fs_validate_params(
1355 	struct xfs_mount	*mp)
1356 {
1357 	/* No recovery flag requires a read-only mount */
1358 	if (xfs_has_norecovery(mp) && !xfs_is_readonly(mp)) {
1359 		xfs_warn(mp, "no-recovery mounts must be read-only.");
1360 		return -EINVAL;
1361 	}
1362 
1363 	/*
1364 	 * We have not read the superblock at this point, so only the attr2
1365 	 * mount option can set the attr2 feature by this stage.
1366 	 */
1367 	if (xfs_has_attr2(mp) && xfs_has_noattr2(mp)) {
1368 		xfs_warn(mp, "attr2 and noattr2 cannot both be specified.");
1369 		return -EINVAL;
1370 	}
1371 
1372 
1373 	if (xfs_has_noalign(mp) && (mp->m_dalign || mp->m_swidth)) {
1374 		xfs_warn(mp,
1375 	"sunit and swidth options incompatible with the noalign option");
1376 		return -EINVAL;
1377 	}
1378 
1379 	if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) {
1380 		xfs_warn(mp, "quota support not available in this kernel.");
1381 		return -EINVAL;
1382 	}
1383 
1384 	if ((mp->m_dalign && !mp->m_swidth) ||
1385 	    (!mp->m_dalign && mp->m_swidth)) {
1386 		xfs_warn(mp, "sunit and swidth must be specified together");
1387 		return -EINVAL;
1388 	}
1389 
1390 	if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
1391 		xfs_warn(mp,
1392 	"stripe width (%d) must be a multiple of the stripe unit (%d)",
1393 			mp->m_swidth, mp->m_dalign);
1394 		return -EINVAL;
1395 	}
1396 
1397 	if (mp->m_logbufs != -1 &&
1398 	    mp->m_logbufs != 0 &&
1399 	    (mp->m_logbufs < XLOG_MIN_ICLOGS ||
1400 	     mp->m_logbufs > XLOG_MAX_ICLOGS)) {
1401 		xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
1402 			mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
1403 		return -EINVAL;
1404 	}
1405 
1406 	if (mp->m_logbsize != -1 &&
1407 	    mp->m_logbsize !=  0 &&
1408 	    (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
1409 	     mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
1410 	     !is_power_of_2(mp->m_logbsize))) {
1411 		xfs_warn(mp,
1412 			"invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
1413 			mp->m_logbsize);
1414 		return -EINVAL;
1415 	}
1416 
1417 	if (xfs_has_allocsize(mp) &&
1418 	    (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
1419 	     mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
1420 		xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
1421 			mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
1422 		return -EINVAL;
1423 	}
1424 
1425 	return 0;
1426 }
1427 
1428 static int
xfs_fs_fill_super(struct super_block * sb,struct fs_context * fc)1429 xfs_fs_fill_super(
1430 	struct super_block	*sb,
1431 	struct fs_context	*fc)
1432 {
1433 	struct xfs_mount	*mp = sb->s_fs_info;
1434 	struct inode		*root;
1435 	int			flags = 0, error;
1436 
1437 	mp->m_super = sb;
1438 
1439 	/*
1440 	 * Copy VFS mount flags from the context now that all parameter parsing
1441 	 * is guaranteed to have been completed by either the old mount API or
1442 	 * the newer fsopen/fsconfig API.
1443 	 */
1444 	if (fc->sb_flags & SB_RDONLY)
1445 		set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1446 	if (fc->sb_flags & SB_DIRSYNC)
1447 		mp->m_features |= XFS_FEAT_DIRSYNC;
1448 	if (fc->sb_flags & SB_SYNCHRONOUS)
1449 		mp->m_features |= XFS_FEAT_WSYNC;
1450 
1451 	error = xfs_fs_validate_params(mp);
1452 	if (error)
1453 		goto out_free_names;
1454 
1455 	sb_min_blocksize(sb, BBSIZE);
1456 	sb->s_xattr = xfs_xattr_handlers;
1457 	sb->s_export_op = &xfs_export_operations;
1458 #ifdef CONFIG_XFS_QUOTA
1459 	sb->s_qcop = &xfs_quotactl_operations;
1460 	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1461 #endif
1462 	sb->s_op = &xfs_super_operations;
1463 
1464 	/*
1465 	 * Delay mount work if the debug hook is set. This is debug
1466 	 * instrumention to coordinate simulation of xfs mount failures with
1467 	 * VFS superblock operations
1468 	 */
1469 	if (xfs_globals.mount_delay) {
1470 		xfs_notice(mp, "Delaying mount for %d seconds.",
1471 			xfs_globals.mount_delay);
1472 		msleep(xfs_globals.mount_delay * 1000);
1473 	}
1474 
1475 	if (fc->sb_flags & SB_SILENT)
1476 		flags |= XFS_MFSI_QUIET;
1477 
1478 	error = xfs_open_devices(mp);
1479 	if (error)
1480 		goto out_free_names;
1481 
1482 	error = xfs_init_mount_workqueues(mp);
1483 	if (error)
1484 		goto out_close_devices;
1485 
1486 	error = xfs_init_percpu_counters(mp);
1487 	if (error)
1488 		goto out_destroy_workqueues;
1489 
1490 	error = xfs_inodegc_init_percpu(mp);
1491 	if (error)
1492 		goto out_destroy_counters;
1493 
1494 	/*
1495 	 * All percpu data structures requiring cleanup when a cpu goes offline
1496 	 * must be allocated before adding this @mp to the cpu-dead handler's
1497 	 * mount list.
1498 	 */
1499 	xfs_mount_list_add(mp);
1500 
1501 	/* Allocate stats memory before we do operations that might use it */
1502 	mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1503 	if (!mp->m_stats.xs_stats) {
1504 		error = -ENOMEM;
1505 		goto out_destroy_inodegc;
1506 	}
1507 
1508 	error = xfs_readsb(mp, flags);
1509 	if (error)
1510 		goto out_free_stats;
1511 
1512 	error = xfs_finish_flags(mp);
1513 	if (error)
1514 		goto out_free_sb;
1515 
1516 	error = xfs_setup_devices(mp);
1517 	if (error)
1518 		goto out_free_sb;
1519 
1520 	/* V4 support is undergoing deprecation. */
1521 	if (!xfs_has_crc(mp)) {
1522 #ifdef CONFIG_XFS_SUPPORT_V4
1523 		xfs_warn_once(mp,
1524 	"Deprecated V4 format (crc=0) will not be supported after September 2030.");
1525 #else
1526 		xfs_warn(mp,
1527 	"Deprecated V4 format (crc=0) not supported by kernel.");
1528 		error = -EINVAL;
1529 		goto out_free_sb;
1530 #endif
1531 	}
1532 
1533 	/* Filesystem claims it needs repair, so refuse the mount. */
1534 	if (xfs_has_needsrepair(mp)) {
1535 		xfs_warn(mp, "Filesystem needs repair.  Please run xfs_repair.");
1536 		error = -EFSCORRUPTED;
1537 		goto out_free_sb;
1538 	}
1539 
1540 	/*
1541 	 * Don't touch the filesystem if a user tool thinks it owns the primary
1542 	 * superblock.  mkfs doesn't clear the flag from secondary supers, so
1543 	 * we don't check them at all.
1544 	 */
1545 	if (mp->m_sb.sb_inprogress) {
1546 		xfs_warn(mp, "Offline file system operation in progress!");
1547 		error = -EFSCORRUPTED;
1548 		goto out_free_sb;
1549 	}
1550 
1551 	/*
1552 	 * Until this is fixed only page-sized or smaller data blocks work.
1553 	 */
1554 	if (mp->m_sb.sb_blocksize > PAGE_SIZE) {
1555 		xfs_warn(mp,
1556 		"File system with blocksize %d bytes. "
1557 		"Only pagesize (%ld) or less will currently work.",
1558 				mp->m_sb.sb_blocksize, PAGE_SIZE);
1559 		error = -ENOSYS;
1560 		goto out_free_sb;
1561 	}
1562 
1563 	/* Ensure this filesystem fits in the page cache limits */
1564 	if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) ||
1565 	    xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) {
1566 		xfs_warn(mp,
1567 		"file system too large to be mounted on this system.");
1568 		error = -EFBIG;
1569 		goto out_free_sb;
1570 	}
1571 
1572 	/*
1573 	 * XFS block mappings use 54 bits to store the logical block offset.
1574 	 * This should suffice to handle the maximum file size that the VFS
1575 	 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
1576 	 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
1577 	 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
1578 	 * to check this assertion.
1579 	 *
1580 	 * Avoid integer overflow by comparing the maximum bmbt offset to the
1581 	 * maximum pagecache offset in units of fs blocks.
1582 	 */
1583 	if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) {
1584 		xfs_warn(mp,
1585 "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
1586 			 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
1587 			 XFS_MAX_FILEOFF);
1588 		error = -EINVAL;
1589 		goto out_free_sb;
1590 	}
1591 
1592 	error = xfs_filestream_mount(mp);
1593 	if (error)
1594 		goto out_free_sb;
1595 
1596 	/*
1597 	 * we must configure the block size in the superblock before we run the
1598 	 * full mount process as the mount process can lookup and cache inodes.
1599 	 */
1600 	sb->s_magic = XFS_SUPER_MAGIC;
1601 	sb->s_blocksize = mp->m_sb.sb_blocksize;
1602 	sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1603 	sb->s_maxbytes = MAX_LFS_FILESIZE;
1604 	sb->s_max_links = XFS_MAXLINK;
1605 	sb->s_time_gran = 1;
1606 	if (xfs_has_bigtime(mp)) {
1607 		sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN);
1608 		sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX);
1609 	} else {
1610 		sb->s_time_min = XFS_LEGACY_TIME_MIN;
1611 		sb->s_time_max = XFS_LEGACY_TIME_MAX;
1612 	}
1613 	trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max);
1614 	sb->s_iflags |= SB_I_CGROUPWB;
1615 
1616 	set_posix_acl_flag(sb);
1617 
1618 	/* version 5 superblocks support inode version counters. */
1619 	if (xfs_has_crc(mp))
1620 		sb->s_flags |= SB_I_VERSION;
1621 
1622 	if (xfs_has_dax_always(mp)) {
1623 		bool rtdev_is_dax = false, datadev_is_dax;
1624 
1625 		xfs_warn(mp,
1626 		"DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
1627 
1628 		datadev_is_dax = xfs_buftarg_is_dax(sb, mp->m_ddev_targp);
1629 		if (mp->m_rtdev_targp)
1630 			rtdev_is_dax = xfs_buftarg_is_dax(sb,
1631 						mp->m_rtdev_targp);
1632 		if (!rtdev_is_dax && !datadev_is_dax) {
1633 			xfs_alert(mp,
1634 			"DAX unsupported by block device. Turning off DAX.");
1635 			xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER);
1636 		}
1637 		if (xfs_has_reflink(mp)) {
1638 			xfs_alert(mp,
1639 		"DAX and reflink cannot be used together!");
1640 			error = -EINVAL;
1641 			goto out_filestream_unmount;
1642 		}
1643 	}
1644 
1645 	if (xfs_has_discard(mp)) {
1646 		struct request_queue *q = bdev_get_queue(sb->s_bdev);
1647 
1648 		if (!blk_queue_discard(q)) {
1649 			xfs_warn(mp, "mounting with \"discard\" option, but "
1650 					"the device does not support discard");
1651 			mp->m_features &= ~XFS_FEAT_DISCARD;
1652 		}
1653 	}
1654 
1655 	if (xfs_has_reflink(mp)) {
1656 		if (mp->m_sb.sb_rblocks) {
1657 			xfs_alert(mp,
1658 	"reflink not compatible with realtime device!");
1659 			error = -EINVAL;
1660 			goto out_filestream_unmount;
1661 		}
1662 
1663 		if (xfs_globals.always_cow) {
1664 			xfs_info(mp, "using DEBUG-only always_cow mode.");
1665 			mp->m_always_cow = true;
1666 		}
1667 	}
1668 
1669 	if (xfs_has_rmapbt(mp) && mp->m_sb.sb_rblocks) {
1670 		xfs_alert(mp,
1671 	"reverse mapping btree not compatible with realtime device!");
1672 		error = -EINVAL;
1673 		goto out_filestream_unmount;
1674 	}
1675 
1676 	error = xfs_mountfs(mp);
1677 	if (error)
1678 		goto out_filestream_unmount;
1679 
1680 	root = igrab(VFS_I(mp->m_rootip));
1681 	if (!root) {
1682 		error = -ENOENT;
1683 		goto out_unmount;
1684 	}
1685 	sb->s_root = d_make_root(root);
1686 	if (!sb->s_root) {
1687 		error = -ENOMEM;
1688 		goto out_unmount;
1689 	}
1690 
1691 	return 0;
1692 
1693  out_filestream_unmount:
1694 	xfs_filestream_unmount(mp);
1695  out_free_sb:
1696 	xfs_freesb(mp);
1697  out_free_stats:
1698 	free_percpu(mp->m_stats.xs_stats);
1699  out_destroy_inodegc:
1700 	xfs_mount_list_del(mp);
1701 	xfs_inodegc_free_percpu(mp);
1702  out_destroy_counters:
1703 	xfs_destroy_percpu_counters(mp);
1704  out_destroy_workqueues:
1705 	xfs_destroy_mount_workqueues(mp);
1706  out_close_devices:
1707 	xfs_close_devices(mp);
1708  out_free_names:
1709 	sb->s_fs_info = NULL;
1710 	xfs_mount_free(mp);
1711 	return error;
1712 
1713  out_unmount:
1714 	xfs_filestream_unmount(mp);
1715 	xfs_unmountfs(mp);
1716 	goto out_free_sb;
1717 }
1718 
1719 static int
xfs_fs_get_tree(struct fs_context * fc)1720 xfs_fs_get_tree(
1721 	struct fs_context	*fc)
1722 {
1723 	return get_tree_bdev(fc, xfs_fs_fill_super);
1724 }
1725 
1726 static int
xfs_remount_rw(struct xfs_mount * mp)1727 xfs_remount_rw(
1728 	struct xfs_mount	*mp)
1729 {
1730 	struct xfs_sb		*sbp = &mp->m_sb;
1731 	int error;
1732 
1733 	if (xfs_has_norecovery(mp)) {
1734 		xfs_warn(mp,
1735 			"ro->rw transition prohibited on norecovery mount");
1736 		return -EINVAL;
1737 	}
1738 
1739 	if (xfs_sb_is_v5(sbp) &&
1740 	    xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1741 		xfs_warn(mp,
1742 	"ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1743 			(sbp->sb_features_ro_compat &
1744 				XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1745 		return -EINVAL;
1746 	}
1747 
1748 	clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1749 
1750 	/*
1751 	 * If this is the first remount to writeable state we might have some
1752 	 * superblock changes to update.
1753 	 */
1754 	if (mp->m_update_sb) {
1755 		error = xfs_sync_sb(mp, false);
1756 		if (error) {
1757 			xfs_warn(mp, "failed to write sb changes");
1758 			return error;
1759 		}
1760 		mp->m_update_sb = false;
1761 	}
1762 
1763 	/*
1764 	 * Fill out the reserve pool if it is empty. Use the stashed value if
1765 	 * it is non-zero, otherwise go with the default.
1766 	 */
1767 	xfs_restore_resvblks(mp);
1768 	xfs_log_work_queue(mp);
1769 	xfs_blockgc_start(mp);
1770 
1771 	/* Create the per-AG metadata reservation pool .*/
1772 	error = xfs_fs_reserve_ag_blocks(mp);
1773 	if (error && error != -ENOSPC)
1774 		return error;
1775 
1776 	/* Re-enable the background inode inactivation worker. */
1777 	xfs_inodegc_start(mp);
1778 
1779 	return 0;
1780 }
1781 
1782 static int
xfs_remount_ro(struct xfs_mount * mp)1783 xfs_remount_ro(
1784 	struct xfs_mount	*mp)
1785 {
1786 	struct xfs_icwalk	icw = {
1787 		.icw_flags	= XFS_ICWALK_FLAG_SYNC,
1788 	};
1789 	int			error;
1790 
1791 	/* Flush all the dirty data to disk. */
1792 	error = sync_filesystem(mp->m_super);
1793 	if (error)
1794 		return error;
1795 
1796 	/*
1797 	 * Cancel background eofb scanning so it cannot race with the final
1798 	 * log force+buftarg wait and deadlock the remount.
1799 	 */
1800 	xfs_blockgc_stop(mp);
1801 
1802 	/*
1803 	 * Clear out all remaining COW staging extents and speculative post-EOF
1804 	 * preallocations so that we don't leave inodes requiring inactivation
1805 	 * cleanups during reclaim on a read-only mount.  We must process every
1806 	 * cached inode, so this requires a synchronous cache scan.
1807 	 */
1808 	error = xfs_blockgc_free_space(mp, &icw);
1809 	if (error) {
1810 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1811 		return error;
1812 	}
1813 
1814 	/*
1815 	 * Stop the inodegc background worker.  xfs_fs_reconfigure already
1816 	 * flushed all pending inodegc work when it sync'd the filesystem.
1817 	 * The VFS holds s_umount, so we know that inodes cannot enter
1818 	 * xfs_fs_destroy_inode during a remount operation.  In readonly mode
1819 	 * we send inodes straight to reclaim, so no inodes will be queued.
1820 	 */
1821 	xfs_inodegc_stop(mp);
1822 
1823 	/* Free the per-AG metadata reservation pool. */
1824 	error = xfs_fs_unreserve_ag_blocks(mp);
1825 	if (error) {
1826 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1827 		return error;
1828 	}
1829 
1830 	/*
1831 	 * Before we sync the metadata, we need to free up the reserve block
1832 	 * pool so that the used block count in the superblock on disk is
1833 	 * correct at the end of the remount. Stash the current* reserve pool
1834 	 * size so that if we get remounted rw, we can return it to the same
1835 	 * size.
1836 	 */
1837 	xfs_save_resvblks(mp);
1838 
1839 	xfs_log_clean(mp);
1840 	set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1841 
1842 	return 0;
1843 }
1844 
1845 /*
1846  * Logically we would return an error here to prevent users from believing
1847  * they might have changed mount options using remount which can't be changed.
1848  *
1849  * But unfortunately mount(8) adds all options from mtab and fstab to the mount
1850  * arguments in some cases so we can't blindly reject options, but have to
1851  * check for each specified option if it actually differs from the currently
1852  * set option and only reject it if that's the case.
1853  *
1854  * Until that is implemented we return success for every remount request, and
1855  * silently ignore all options that we can't actually change.
1856  */
1857 static int
xfs_fs_reconfigure(struct fs_context * fc)1858 xfs_fs_reconfigure(
1859 	struct fs_context *fc)
1860 {
1861 	struct xfs_mount	*mp = XFS_M(fc->root->d_sb);
1862 	struct xfs_mount        *new_mp = fc->s_fs_info;
1863 	int			flags = fc->sb_flags;
1864 	int			error;
1865 
1866 	/* version 5 superblocks always support version counters. */
1867 	if (xfs_has_crc(mp))
1868 		fc->sb_flags |= SB_I_VERSION;
1869 
1870 	error = xfs_fs_validate_params(new_mp);
1871 	if (error)
1872 		return error;
1873 
1874 	/* inode32 -> inode64 */
1875 	if (xfs_has_small_inums(mp) && !xfs_has_small_inums(new_mp)) {
1876 		mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
1877 		mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
1878 	}
1879 
1880 	/* inode64 -> inode32 */
1881 	if (!xfs_has_small_inums(mp) && xfs_has_small_inums(new_mp)) {
1882 		mp->m_features |= XFS_FEAT_SMALL_INUMS;
1883 		mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
1884 	}
1885 
1886 	/* ro -> rw */
1887 	if (xfs_is_readonly(mp) && !(flags & SB_RDONLY)) {
1888 		error = xfs_remount_rw(mp);
1889 		if (error)
1890 			return error;
1891 	}
1892 
1893 	/* rw -> ro */
1894 	if (!xfs_is_readonly(mp) && (flags & SB_RDONLY)) {
1895 		error = xfs_remount_ro(mp);
1896 		if (error)
1897 			return error;
1898 	}
1899 
1900 	return 0;
1901 }
1902 
xfs_fs_free(struct fs_context * fc)1903 static void xfs_fs_free(
1904 	struct fs_context	*fc)
1905 {
1906 	struct xfs_mount	*mp = fc->s_fs_info;
1907 
1908 	/*
1909 	 * mp is stored in the fs_context when it is initialized.
1910 	 * mp is transferred to the superblock on a successful mount,
1911 	 * but if an error occurs before the transfer we have to free
1912 	 * it here.
1913 	 */
1914 	if (mp)
1915 		xfs_mount_free(mp);
1916 }
1917 
1918 static const struct fs_context_operations xfs_context_ops = {
1919 	.parse_param = xfs_fs_parse_param,
1920 	.get_tree    = xfs_fs_get_tree,
1921 	.reconfigure = xfs_fs_reconfigure,
1922 	.free        = xfs_fs_free,
1923 };
1924 
1925 /*
1926  * WARNING: do not initialise any parameters in this function that depend on
1927  * mount option parsing having already been performed as this can be called from
1928  * fsopen() before any parameters have been set.
1929  */
xfs_init_fs_context(struct fs_context * fc)1930 static int xfs_init_fs_context(
1931 	struct fs_context	*fc)
1932 {
1933 	struct xfs_mount	*mp;
1934 
1935 	mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
1936 	if (!mp)
1937 		return -ENOMEM;
1938 
1939 	spin_lock_init(&mp->m_sb_lock);
1940 	spin_lock_init(&mp->m_agirotor_lock);
1941 	INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
1942 	spin_lock_init(&mp->m_perag_lock);
1943 	mutex_init(&mp->m_growlock);
1944 	INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
1945 	INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
1946 	mp->m_kobj.kobject.kset = xfs_kset;
1947 	/*
1948 	 * We don't create the finobt per-ag space reservation until after log
1949 	 * recovery, so we must set this to true so that an ifree transaction
1950 	 * started during log recovery will not depend on space reservations
1951 	 * for finobt expansion.
1952 	 */
1953 	mp->m_finobt_nores = true;
1954 
1955 	/*
1956 	 * These can be overridden by the mount option parsing.
1957 	 */
1958 	mp->m_logbufs = -1;
1959 	mp->m_logbsize = -1;
1960 	mp->m_allocsize_log = 16; /* 64k */
1961 
1962 	fc->s_fs_info = mp;
1963 	fc->ops = &xfs_context_ops;
1964 
1965 	return 0;
1966 }
1967 
1968 static struct file_system_type xfs_fs_type = {
1969 	.owner			= THIS_MODULE,
1970 	.name			= "xfs",
1971 	.init_fs_context	= xfs_init_fs_context,
1972 	.parameters		= xfs_fs_parameters,
1973 	.kill_sb		= kill_block_super,
1974 	.fs_flags		= FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
1975 };
1976 MODULE_ALIAS_FS("xfs");
1977 
1978 STATIC int __init
xfs_init_zones(void)1979 xfs_init_zones(void)
1980 {
1981 	xfs_log_ticket_zone = kmem_cache_create("xfs_log_ticket",
1982 						sizeof(struct xlog_ticket),
1983 						0, 0, NULL);
1984 	if (!xfs_log_ticket_zone)
1985 		goto out;
1986 
1987 	xfs_bmap_free_item_zone = kmem_cache_create("xfs_bmap_free_item",
1988 					sizeof(struct xfs_extent_free_item),
1989 					0, 0, NULL);
1990 	if (!xfs_bmap_free_item_zone)
1991 		goto out_destroy_log_ticket_zone;
1992 
1993 	xfs_btree_cur_zone = kmem_cache_create("xfs_btree_cur",
1994 					       sizeof(struct xfs_btree_cur),
1995 					       0, 0, NULL);
1996 	if (!xfs_btree_cur_zone)
1997 		goto out_destroy_bmap_free_item_zone;
1998 
1999 	xfs_da_state_zone = kmem_cache_create("xfs_da_state",
2000 					      sizeof(struct xfs_da_state),
2001 					      0, 0, NULL);
2002 	if (!xfs_da_state_zone)
2003 		goto out_destroy_btree_cur_zone;
2004 
2005 	xfs_ifork_zone = kmem_cache_create("xfs_ifork",
2006 					   sizeof(struct xfs_ifork),
2007 					   0, 0, NULL);
2008 	if (!xfs_ifork_zone)
2009 		goto out_destroy_da_state_zone;
2010 
2011 	xfs_trans_zone = kmem_cache_create("xfs_trans",
2012 					   sizeof(struct xfs_trans),
2013 					   0, 0, NULL);
2014 	if (!xfs_trans_zone)
2015 		goto out_destroy_ifork_zone;
2016 
2017 
2018 	/*
2019 	 * The size of the zone allocated buf log item is the maximum
2020 	 * size possible under XFS.  This wastes a little bit of memory,
2021 	 * but it is much faster.
2022 	 */
2023 	xfs_buf_item_zone = kmem_cache_create("xfs_buf_item",
2024 					      sizeof(struct xfs_buf_log_item),
2025 					      0, 0, NULL);
2026 	if (!xfs_buf_item_zone)
2027 		goto out_destroy_trans_zone;
2028 
2029 	xfs_efd_zone = kmem_cache_create("xfs_efd_item",
2030 					(sizeof(struct xfs_efd_log_item) +
2031 					(XFS_EFD_MAX_FAST_EXTENTS - 1) *
2032 					sizeof(struct xfs_extent)),
2033 					0, 0, NULL);
2034 	if (!xfs_efd_zone)
2035 		goto out_destroy_buf_item_zone;
2036 
2037 	xfs_efi_zone = kmem_cache_create("xfs_efi_item",
2038 					 (sizeof(struct xfs_efi_log_item) +
2039 					 (XFS_EFI_MAX_FAST_EXTENTS - 1) *
2040 					 sizeof(struct xfs_extent)),
2041 					 0, 0, NULL);
2042 	if (!xfs_efi_zone)
2043 		goto out_destroy_efd_zone;
2044 
2045 	xfs_inode_zone = kmem_cache_create("xfs_inode",
2046 					   sizeof(struct xfs_inode), 0,
2047 					   (SLAB_HWCACHE_ALIGN |
2048 					    SLAB_RECLAIM_ACCOUNT |
2049 					    SLAB_MEM_SPREAD | SLAB_ACCOUNT),
2050 					   xfs_fs_inode_init_once);
2051 	if (!xfs_inode_zone)
2052 		goto out_destroy_efi_zone;
2053 
2054 	xfs_ili_zone = kmem_cache_create("xfs_ili",
2055 					 sizeof(struct xfs_inode_log_item), 0,
2056 					 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
2057 					 NULL);
2058 	if (!xfs_ili_zone)
2059 		goto out_destroy_inode_zone;
2060 
2061 	xfs_icreate_zone = kmem_cache_create("xfs_icr",
2062 					     sizeof(struct xfs_icreate_item),
2063 					     0, 0, NULL);
2064 	if (!xfs_icreate_zone)
2065 		goto out_destroy_ili_zone;
2066 
2067 	xfs_rud_zone = kmem_cache_create("xfs_rud_item",
2068 					 sizeof(struct xfs_rud_log_item),
2069 					 0, 0, NULL);
2070 	if (!xfs_rud_zone)
2071 		goto out_destroy_icreate_zone;
2072 
2073 	xfs_rui_zone = kmem_cache_create("xfs_rui_item",
2074 			xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
2075 			0, 0, NULL);
2076 	if (!xfs_rui_zone)
2077 		goto out_destroy_rud_zone;
2078 
2079 	xfs_cud_zone = kmem_cache_create("xfs_cud_item",
2080 					 sizeof(struct xfs_cud_log_item),
2081 					 0, 0, NULL);
2082 	if (!xfs_cud_zone)
2083 		goto out_destroy_rui_zone;
2084 
2085 	xfs_cui_zone = kmem_cache_create("xfs_cui_item",
2086 			xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
2087 			0, 0, NULL);
2088 	if (!xfs_cui_zone)
2089 		goto out_destroy_cud_zone;
2090 
2091 	xfs_bud_zone = kmem_cache_create("xfs_bud_item",
2092 					 sizeof(struct xfs_bud_log_item),
2093 					 0, 0, NULL);
2094 	if (!xfs_bud_zone)
2095 		goto out_destroy_cui_zone;
2096 
2097 	xfs_bui_zone = kmem_cache_create("xfs_bui_item",
2098 			xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
2099 			0, 0, NULL);
2100 	if (!xfs_bui_zone)
2101 		goto out_destroy_bud_zone;
2102 
2103 	return 0;
2104 
2105  out_destroy_bud_zone:
2106 	kmem_cache_destroy(xfs_bud_zone);
2107  out_destroy_cui_zone:
2108 	kmem_cache_destroy(xfs_cui_zone);
2109  out_destroy_cud_zone:
2110 	kmem_cache_destroy(xfs_cud_zone);
2111  out_destroy_rui_zone:
2112 	kmem_cache_destroy(xfs_rui_zone);
2113  out_destroy_rud_zone:
2114 	kmem_cache_destroy(xfs_rud_zone);
2115  out_destroy_icreate_zone:
2116 	kmem_cache_destroy(xfs_icreate_zone);
2117  out_destroy_ili_zone:
2118 	kmem_cache_destroy(xfs_ili_zone);
2119  out_destroy_inode_zone:
2120 	kmem_cache_destroy(xfs_inode_zone);
2121  out_destroy_efi_zone:
2122 	kmem_cache_destroy(xfs_efi_zone);
2123  out_destroy_efd_zone:
2124 	kmem_cache_destroy(xfs_efd_zone);
2125  out_destroy_buf_item_zone:
2126 	kmem_cache_destroy(xfs_buf_item_zone);
2127  out_destroy_trans_zone:
2128 	kmem_cache_destroy(xfs_trans_zone);
2129  out_destroy_ifork_zone:
2130 	kmem_cache_destroy(xfs_ifork_zone);
2131  out_destroy_da_state_zone:
2132 	kmem_cache_destroy(xfs_da_state_zone);
2133  out_destroy_btree_cur_zone:
2134 	kmem_cache_destroy(xfs_btree_cur_zone);
2135  out_destroy_bmap_free_item_zone:
2136 	kmem_cache_destroy(xfs_bmap_free_item_zone);
2137  out_destroy_log_ticket_zone:
2138 	kmem_cache_destroy(xfs_log_ticket_zone);
2139  out:
2140 	return -ENOMEM;
2141 }
2142 
2143 STATIC void
xfs_destroy_zones(void)2144 xfs_destroy_zones(void)
2145 {
2146 	/*
2147 	 * Make sure all delayed rcu free are flushed before we
2148 	 * destroy caches.
2149 	 */
2150 	rcu_barrier();
2151 	kmem_cache_destroy(xfs_bui_zone);
2152 	kmem_cache_destroy(xfs_bud_zone);
2153 	kmem_cache_destroy(xfs_cui_zone);
2154 	kmem_cache_destroy(xfs_cud_zone);
2155 	kmem_cache_destroy(xfs_rui_zone);
2156 	kmem_cache_destroy(xfs_rud_zone);
2157 	kmem_cache_destroy(xfs_icreate_zone);
2158 	kmem_cache_destroy(xfs_ili_zone);
2159 	kmem_cache_destroy(xfs_inode_zone);
2160 	kmem_cache_destroy(xfs_efi_zone);
2161 	kmem_cache_destroy(xfs_efd_zone);
2162 	kmem_cache_destroy(xfs_buf_item_zone);
2163 	kmem_cache_destroy(xfs_trans_zone);
2164 	kmem_cache_destroy(xfs_ifork_zone);
2165 	kmem_cache_destroy(xfs_da_state_zone);
2166 	kmem_cache_destroy(xfs_btree_cur_zone);
2167 	kmem_cache_destroy(xfs_bmap_free_item_zone);
2168 	kmem_cache_destroy(xfs_log_ticket_zone);
2169 }
2170 
2171 STATIC int __init
xfs_init_workqueues(void)2172 xfs_init_workqueues(void)
2173 {
2174 	/*
2175 	 * The allocation workqueue can be used in memory reclaim situations
2176 	 * (writepage path), and parallelism is only limited by the number of
2177 	 * AGs in all the filesystems mounted. Hence use the default large
2178 	 * max_active value for this workqueue.
2179 	 */
2180 	xfs_alloc_wq = alloc_workqueue("xfsalloc",
2181 			XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0);
2182 	if (!xfs_alloc_wq)
2183 		return -ENOMEM;
2184 
2185 	xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND),
2186 			0);
2187 	if (!xfs_discard_wq)
2188 		goto out_free_alloc_wq;
2189 
2190 	return 0;
2191 out_free_alloc_wq:
2192 	destroy_workqueue(xfs_alloc_wq);
2193 	return -ENOMEM;
2194 }
2195 
2196 STATIC void
xfs_destroy_workqueues(void)2197 xfs_destroy_workqueues(void)
2198 {
2199 	destroy_workqueue(xfs_discard_wq);
2200 	destroy_workqueue(xfs_alloc_wq);
2201 }
2202 
2203 #ifdef CONFIG_HOTPLUG_CPU
2204 static int
xfs_cpu_dead(unsigned int cpu)2205 xfs_cpu_dead(
2206 	unsigned int		cpu)
2207 {
2208 	struct xfs_mount	*mp, *n;
2209 
2210 	spin_lock(&xfs_mount_list_lock);
2211 	list_for_each_entry_safe(mp, n, &xfs_mount_list, m_mount_list) {
2212 		spin_unlock(&xfs_mount_list_lock);
2213 		xfs_inodegc_cpu_dead(mp, cpu);
2214 		spin_lock(&xfs_mount_list_lock);
2215 	}
2216 	spin_unlock(&xfs_mount_list_lock);
2217 	return 0;
2218 }
2219 
2220 static int __init
xfs_cpu_hotplug_init(void)2221 xfs_cpu_hotplug_init(void)
2222 {
2223 	int	error;
2224 
2225 	error = cpuhp_setup_state_nocalls(CPUHP_XFS_DEAD, "xfs:dead", NULL,
2226 			xfs_cpu_dead);
2227 	if (error < 0)
2228 		xfs_alert(NULL,
2229 "Failed to initialise CPU hotplug, error %d. XFS is non-functional.",
2230 			error);
2231 	return error;
2232 }
2233 
2234 static void
xfs_cpu_hotplug_destroy(void)2235 xfs_cpu_hotplug_destroy(void)
2236 {
2237 	cpuhp_remove_state_nocalls(CPUHP_XFS_DEAD);
2238 }
2239 
2240 #else /* !CONFIG_HOTPLUG_CPU */
xfs_cpu_hotplug_init(void)2241 static inline int xfs_cpu_hotplug_init(void) { return 0; }
xfs_cpu_hotplug_destroy(void)2242 static inline void xfs_cpu_hotplug_destroy(void) {}
2243 #endif
2244 
2245 STATIC int __init
init_xfs_fs(void)2246 init_xfs_fs(void)
2247 {
2248 	int			error;
2249 
2250 	xfs_check_ondisk_structs();
2251 
2252 	printk(KERN_INFO XFS_VERSION_STRING " with "
2253 			 XFS_BUILD_OPTIONS " enabled\n");
2254 
2255 	xfs_dir_startup();
2256 
2257 	error = xfs_cpu_hotplug_init();
2258 	if (error)
2259 		goto out;
2260 
2261 	error = xfs_init_zones();
2262 	if (error)
2263 		goto out_destroy_hp;
2264 
2265 	error = xfs_init_workqueues();
2266 	if (error)
2267 		goto out_destroy_zones;
2268 
2269 	error = xfs_mru_cache_init();
2270 	if (error)
2271 		goto out_destroy_wq;
2272 
2273 	error = xfs_buf_init();
2274 	if (error)
2275 		goto out_mru_cache_uninit;
2276 
2277 	error = xfs_init_procfs();
2278 	if (error)
2279 		goto out_buf_terminate;
2280 
2281 	error = xfs_sysctl_register();
2282 	if (error)
2283 		goto out_cleanup_procfs;
2284 
2285 	xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2286 	if (!xfs_kset) {
2287 		error = -ENOMEM;
2288 		goto out_sysctl_unregister;
2289 	}
2290 
2291 	xfsstats.xs_kobj.kobject.kset = xfs_kset;
2292 
2293 	xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2294 	if (!xfsstats.xs_stats) {
2295 		error = -ENOMEM;
2296 		goto out_kset_unregister;
2297 	}
2298 
2299 	error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
2300 			       "stats");
2301 	if (error)
2302 		goto out_free_stats;
2303 
2304 #ifdef DEBUG
2305 	xfs_dbg_kobj.kobject.kset = xfs_kset;
2306 	error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
2307 	if (error)
2308 		goto out_remove_stats_kobj;
2309 #endif
2310 
2311 	error = xfs_qm_init();
2312 	if (error)
2313 		goto out_remove_dbg_kobj;
2314 
2315 	error = register_filesystem(&xfs_fs_type);
2316 	if (error)
2317 		goto out_qm_exit;
2318 	return 0;
2319 
2320  out_qm_exit:
2321 	xfs_qm_exit();
2322  out_remove_dbg_kobj:
2323 #ifdef DEBUG
2324 	xfs_sysfs_del(&xfs_dbg_kobj);
2325  out_remove_stats_kobj:
2326 #endif
2327 	xfs_sysfs_del(&xfsstats.xs_kobj);
2328  out_free_stats:
2329 	free_percpu(xfsstats.xs_stats);
2330  out_kset_unregister:
2331 	kset_unregister(xfs_kset);
2332  out_sysctl_unregister:
2333 	xfs_sysctl_unregister();
2334  out_cleanup_procfs:
2335 	xfs_cleanup_procfs();
2336  out_buf_terminate:
2337 	xfs_buf_terminate();
2338  out_mru_cache_uninit:
2339 	xfs_mru_cache_uninit();
2340  out_destroy_wq:
2341 	xfs_destroy_workqueues();
2342  out_destroy_zones:
2343 	xfs_destroy_zones();
2344  out_destroy_hp:
2345 	xfs_cpu_hotplug_destroy();
2346  out:
2347 	return error;
2348 }
2349 
2350 STATIC void __exit
exit_xfs_fs(void)2351 exit_xfs_fs(void)
2352 {
2353 	xfs_qm_exit();
2354 	unregister_filesystem(&xfs_fs_type);
2355 #ifdef DEBUG
2356 	xfs_sysfs_del(&xfs_dbg_kobj);
2357 #endif
2358 	xfs_sysfs_del(&xfsstats.xs_kobj);
2359 	free_percpu(xfsstats.xs_stats);
2360 	kset_unregister(xfs_kset);
2361 	xfs_sysctl_unregister();
2362 	xfs_cleanup_procfs();
2363 	xfs_buf_terminate();
2364 	xfs_mru_cache_uninit();
2365 	xfs_destroy_workqueues();
2366 	xfs_destroy_zones();
2367 	xfs_uuid_table_free();
2368 	xfs_cpu_hotplug_destroy();
2369 }
2370 
2371 module_init(init_xfs_fs);
2372 module_exit(exit_xfs_fs);
2373 
2374 MODULE_AUTHOR("Silicon Graphics, Inc.");
2375 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2376 MODULE_LICENSE("GPL");
2377 MODULE_IMPORT_NS(ANDROID_GKI_VFS_EXPORT_ONLY);
2378