• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 
7 #include "xfs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sb.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_btree.h"
16 #include "xfs_bmap.h"
17 #include "xfs_alloc.h"
18 #include "xfs_fsops.h"
19 #include "xfs_trans.h"
20 #include "xfs_buf_item.h"
21 #include "xfs_log.h"
22 #include "xfs_log_priv.h"
23 #include "xfs_dir2.h"
24 #include "xfs_extfree_item.h"
25 #include "xfs_mru_cache.h"
26 #include "xfs_inode_item.h"
27 #include "xfs_icache.h"
28 #include "xfs_trace.h"
29 #include "xfs_icreate_item.h"
30 #include "xfs_filestream.h"
31 #include "xfs_quota.h"
32 #include "xfs_sysfs.h"
33 #include "xfs_ondisk.h"
34 #include "xfs_rmap_item.h"
35 #include "xfs_refcount_item.h"
36 #include "xfs_bmap_item.h"
37 #include "xfs_reflink.h"
38 
39 #include <linux/magic.h>
40 #include <linux/parser.h>
41 
42 static const struct super_operations xfs_super_operations;
43 struct bio_set xfs_ioend_bioset;
44 
45 static struct kset *xfs_kset;		/* top-level xfs sysfs dir */
46 #ifdef DEBUG
47 static struct xfs_kobj xfs_dbg_kobj;	/* global debug sysfs attrs */
48 #endif
49 
50 /*
51  * Table driven mount option parser.
52  */
53 enum {
54 	Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev, Opt_biosize,
55 	Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
56 	Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
57 	Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
58 	Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
59 	Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
60 	Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
61 	Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
62 	Opt_discard, Opt_nodiscard, Opt_dax, Opt_err,
63 };
64 
65 static const match_table_t tokens = {
66 	{Opt_logbufs,	"logbufs=%u"},	/* number of XFS log buffers */
67 	{Opt_logbsize,	"logbsize=%s"},	/* size of XFS log buffers */
68 	{Opt_logdev,	"logdev=%s"},	/* log device */
69 	{Opt_rtdev,	"rtdev=%s"},	/* realtime I/O device */
70 	{Opt_biosize,	"biosize=%u"},	/* log2 of preferred buffered io size */
71 	{Opt_wsync,	"wsync"},	/* safe-mode nfs compatible mount */
72 	{Opt_noalign,	"noalign"},	/* turn off stripe alignment */
73 	{Opt_swalloc,	"swalloc"},	/* turn on stripe width allocation */
74 	{Opt_sunit,	"sunit=%u"},	/* data volume stripe unit */
75 	{Opt_swidth,	"swidth=%u"},	/* data volume stripe width */
76 	{Opt_nouuid,	"nouuid"},	/* ignore filesystem UUID */
77 	{Opt_grpid,	"grpid"},	/* group-ID from parent directory */
78 	{Opt_nogrpid,	"nogrpid"},	/* group-ID from current process */
79 	{Opt_bsdgroups,	"bsdgroups"},	/* group-ID from parent directory */
80 	{Opt_sysvgroups,"sysvgroups"},	/* group-ID from current process */
81 	{Opt_allocsize,	"allocsize=%s"},/* preferred allocation size */
82 	{Opt_norecovery,"norecovery"},	/* don't run XFS recovery */
83 	{Opt_inode64,	"inode64"},	/* inodes can be allocated anywhere */
84 	{Opt_inode32,   "inode32"},	/* inode allocation limited to
85 					 * XFS_MAXINUMBER_32 */
86 	{Opt_ikeep,	"ikeep"},	/* do not free empty inode clusters */
87 	{Opt_noikeep,	"noikeep"},	/* free empty inode clusters */
88 	{Opt_largeio,	"largeio"},	/* report large I/O sizes in stat() */
89 	{Opt_nolargeio,	"nolargeio"},	/* do not report large I/O sizes
90 					 * in stat(). */
91 	{Opt_attr2,	"attr2"},	/* do use attr2 attribute format */
92 	{Opt_noattr2,	"noattr2"},	/* do not use attr2 attribute format */
93 	{Opt_filestreams,"filestreams"},/* use filestreams allocator */
94 	{Opt_quota,	"quota"},	/* disk quotas (user) */
95 	{Opt_noquota,	"noquota"},	/* no quotas */
96 	{Opt_usrquota,	"usrquota"},	/* user quota enabled */
97 	{Opt_grpquota,	"grpquota"},	/* group quota enabled */
98 	{Opt_prjquota,	"prjquota"},	/* project quota enabled */
99 	{Opt_uquota,	"uquota"},	/* user quota (IRIX variant) */
100 	{Opt_gquota,	"gquota"},	/* group quota (IRIX variant) */
101 	{Opt_pquota,	"pquota"},	/* project quota (IRIX variant) */
102 	{Opt_uqnoenforce,"uqnoenforce"},/* user quota limit enforcement */
103 	{Opt_gqnoenforce,"gqnoenforce"},/* group quota limit enforcement */
104 	{Opt_pqnoenforce,"pqnoenforce"},/* project quota limit enforcement */
105 	{Opt_qnoenforce, "qnoenforce"},	/* same as uqnoenforce */
106 	{Opt_discard,	"discard"},	/* Discard unused blocks */
107 	{Opt_nodiscard,	"nodiscard"},	/* Do not discard unused blocks */
108 	{Opt_dax,	"dax"},		/* Enable direct access to bdev pages */
109 	{Opt_err,	NULL},
110 };
111 
112 
113 STATIC int
suffix_kstrtoint(const substring_t * s,unsigned int base,int * res)114 suffix_kstrtoint(const substring_t *s, unsigned int base, int *res)
115 {
116 	int	last, shift_left_factor = 0, _res;
117 	char	*value;
118 	int	ret = 0;
119 
120 	value = match_strdup(s);
121 	if (!value)
122 		return -ENOMEM;
123 
124 	last = strlen(value) - 1;
125 	if (value[last] == 'K' || value[last] == 'k') {
126 		shift_left_factor = 10;
127 		value[last] = '\0';
128 	}
129 	if (value[last] == 'M' || value[last] == 'm') {
130 		shift_left_factor = 20;
131 		value[last] = '\0';
132 	}
133 	if (value[last] == 'G' || value[last] == 'g') {
134 		shift_left_factor = 30;
135 		value[last] = '\0';
136 	}
137 
138 	if (kstrtoint(value, base, &_res))
139 		ret = -EINVAL;
140 	kfree(value);
141 	*res = _res << shift_left_factor;
142 	return ret;
143 }
144 
145 /*
146  * This function fills in xfs_mount_t fields based on mount args.
147  * Note: the superblock has _not_ yet been read in.
148  *
149  * Note that this function leaks the various device name allocations on
150  * failure.  The caller takes care of them.
151  *
152  * *sb is const because this is also used to test options on the remount
153  * path, and we don't want this to have any side effects at remount time.
154  * Today this function does not change *sb, but just to future-proof...
155  */
156 STATIC int
xfs_parseargs(struct xfs_mount * mp,char * options)157 xfs_parseargs(
158 	struct xfs_mount	*mp,
159 	char			*options)
160 {
161 	const struct super_block *sb = mp->m_super;
162 	char			*p;
163 	substring_t		args[MAX_OPT_ARGS];
164 	int			dsunit = 0;
165 	int			dswidth = 0;
166 	int			iosize = 0;
167 	uint8_t			iosizelog = 0;
168 
169 	/*
170 	 * set up the mount name first so all the errors will refer to the
171 	 * correct device.
172 	 */
173 	mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL);
174 	if (!mp->m_fsname)
175 		return -ENOMEM;
176 	mp->m_fsname_len = strlen(mp->m_fsname) + 1;
177 
178 	/*
179 	 * Copy binary VFS mount flags we are interested in.
180 	 */
181 	if (sb_rdonly(sb))
182 		mp->m_flags |= XFS_MOUNT_RDONLY;
183 	if (sb->s_flags & SB_DIRSYNC)
184 		mp->m_flags |= XFS_MOUNT_DIRSYNC;
185 	if (sb->s_flags & SB_SYNCHRONOUS)
186 		mp->m_flags |= XFS_MOUNT_WSYNC;
187 
188 	/*
189 	 * Set some default flags that could be cleared by the mount option
190 	 * parsing.
191 	 */
192 	mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
193 
194 	/*
195 	 * These can be overridden by the mount option parsing.
196 	 */
197 	mp->m_logbufs = -1;
198 	mp->m_logbsize = -1;
199 
200 	if (!options)
201 		goto done;
202 
203 	while ((p = strsep(&options, ",")) != NULL) {
204 		int		token;
205 
206 		if (!*p)
207 			continue;
208 
209 		token = match_token(p, tokens, args);
210 		switch (token) {
211 		case Opt_logbufs:
212 			if (match_int(args, &mp->m_logbufs))
213 				return -EINVAL;
214 			break;
215 		case Opt_logbsize:
216 			if (suffix_kstrtoint(args, 10, &mp->m_logbsize))
217 				return -EINVAL;
218 			break;
219 		case Opt_logdev:
220 			kfree(mp->m_logname);
221 			mp->m_logname = match_strdup(args);
222 			if (!mp->m_logname)
223 				return -ENOMEM;
224 			break;
225 		case Opt_rtdev:
226 			kfree(mp->m_rtname);
227 			mp->m_rtname = match_strdup(args);
228 			if (!mp->m_rtname)
229 				return -ENOMEM;
230 			break;
231 		case Opt_allocsize:
232 		case Opt_biosize:
233 			if (suffix_kstrtoint(args, 10, &iosize))
234 				return -EINVAL;
235 			iosizelog = ffs(iosize) - 1;
236 			break;
237 		case Opt_grpid:
238 		case Opt_bsdgroups:
239 			mp->m_flags |= XFS_MOUNT_GRPID;
240 			break;
241 		case Opt_nogrpid:
242 		case Opt_sysvgroups:
243 			mp->m_flags &= ~XFS_MOUNT_GRPID;
244 			break;
245 		case Opt_wsync:
246 			mp->m_flags |= XFS_MOUNT_WSYNC;
247 			break;
248 		case Opt_norecovery:
249 			mp->m_flags |= XFS_MOUNT_NORECOVERY;
250 			break;
251 		case Opt_noalign:
252 			mp->m_flags |= XFS_MOUNT_NOALIGN;
253 			break;
254 		case Opt_swalloc:
255 			mp->m_flags |= XFS_MOUNT_SWALLOC;
256 			break;
257 		case Opt_sunit:
258 			if (match_int(args, &dsunit))
259 				return -EINVAL;
260 			break;
261 		case Opt_swidth:
262 			if (match_int(args, &dswidth))
263 				return -EINVAL;
264 			break;
265 		case Opt_inode32:
266 			mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
267 			break;
268 		case Opt_inode64:
269 			mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
270 			break;
271 		case Opt_nouuid:
272 			mp->m_flags |= XFS_MOUNT_NOUUID;
273 			break;
274 		case Opt_ikeep:
275 			mp->m_flags |= XFS_MOUNT_IKEEP;
276 			break;
277 		case Opt_noikeep:
278 			mp->m_flags &= ~XFS_MOUNT_IKEEP;
279 			break;
280 		case Opt_largeio:
281 			mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE;
282 			break;
283 		case Opt_nolargeio:
284 			mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
285 			break;
286 		case Opt_attr2:
287 			mp->m_flags |= XFS_MOUNT_ATTR2;
288 			break;
289 		case Opt_noattr2:
290 			mp->m_flags &= ~XFS_MOUNT_ATTR2;
291 			mp->m_flags |= XFS_MOUNT_NOATTR2;
292 			break;
293 		case Opt_filestreams:
294 			mp->m_flags |= XFS_MOUNT_FILESTREAMS;
295 			break;
296 		case Opt_noquota:
297 			mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
298 			mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
299 			mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE;
300 			break;
301 		case Opt_quota:
302 		case Opt_uquota:
303 		case Opt_usrquota:
304 			mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
305 					 XFS_UQUOTA_ENFD);
306 			break;
307 		case Opt_qnoenforce:
308 		case Opt_uqnoenforce:
309 			mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
310 			mp->m_qflags &= ~XFS_UQUOTA_ENFD;
311 			break;
312 		case Opt_pquota:
313 		case Opt_prjquota:
314 			mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
315 					 XFS_PQUOTA_ENFD);
316 			break;
317 		case Opt_pqnoenforce:
318 			mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
319 			mp->m_qflags &= ~XFS_PQUOTA_ENFD;
320 			break;
321 		case Opt_gquota:
322 		case Opt_grpquota:
323 			mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
324 					 XFS_GQUOTA_ENFD);
325 			break;
326 		case Opt_gqnoenforce:
327 			mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
328 			mp->m_qflags &= ~XFS_GQUOTA_ENFD;
329 			break;
330 		case Opt_discard:
331 			mp->m_flags |= XFS_MOUNT_DISCARD;
332 			break;
333 		case Opt_nodiscard:
334 			mp->m_flags &= ~XFS_MOUNT_DISCARD;
335 			break;
336 #ifdef CONFIG_FS_DAX
337 		case Opt_dax:
338 			mp->m_flags |= XFS_MOUNT_DAX;
339 			break;
340 #endif
341 		default:
342 			xfs_warn(mp, "unknown mount option [%s].", p);
343 			return -EINVAL;
344 		}
345 	}
346 
347 	/*
348 	 * no recovery flag requires a read-only mount
349 	 */
350 	if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
351 	    !(mp->m_flags & XFS_MOUNT_RDONLY)) {
352 		xfs_warn(mp, "no-recovery mounts must be read-only.");
353 		return -EINVAL;
354 	}
355 
356 	if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) {
357 		xfs_warn(mp,
358 	"sunit and swidth options incompatible with the noalign option");
359 		return -EINVAL;
360 	}
361 
362 #ifndef CONFIG_XFS_QUOTA
363 	if (XFS_IS_QUOTA_RUNNING(mp)) {
364 		xfs_warn(mp, "quota support not available in this kernel.");
365 		return -EINVAL;
366 	}
367 #endif
368 
369 	if ((dsunit && !dswidth) || (!dsunit && dswidth)) {
370 		xfs_warn(mp, "sunit and swidth must be specified together");
371 		return -EINVAL;
372 	}
373 
374 	if (dsunit && (dswidth % dsunit != 0)) {
375 		xfs_warn(mp,
376 	"stripe width (%d) must be a multiple of the stripe unit (%d)",
377 			dswidth, dsunit);
378 		return -EINVAL;
379 	}
380 
381 done:
382 	if (dsunit && !(mp->m_flags & XFS_MOUNT_NOALIGN)) {
383 		/*
384 		 * At this point the superblock has not been read
385 		 * in, therefore we do not know the block size.
386 		 * Before the mount call ends we will convert
387 		 * these to FSBs.
388 		 */
389 		mp->m_dalign = dsunit;
390 		mp->m_swidth = dswidth;
391 	}
392 
393 	if (mp->m_logbufs != -1 &&
394 	    mp->m_logbufs != 0 &&
395 	    (mp->m_logbufs < XLOG_MIN_ICLOGS ||
396 	     mp->m_logbufs > XLOG_MAX_ICLOGS)) {
397 		xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
398 			mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
399 		return -EINVAL;
400 	}
401 	if (mp->m_logbsize != -1 &&
402 	    mp->m_logbsize !=  0 &&
403 	    (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
404 	     mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
405 	     !is_power_of_2(mp->m_logbsize))) {
406 		xfs_warn(mp,
407 			"invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
408 			mp->m_logbsize);
409 		return -EINVAL;
410 	}
411 
412 	if (iosizelog) {
413 		if (iosizelog > XFS_MAX_IO_LOG ||
414 		    iosizelog < XFS_MIN_IO_LOG) {
415 			xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
416 				iosizelog, XFS_MIN_IO_LOG,
417 				XFS_MAX_IO_LOG);
418 			return -EINVAL;
419 		}
420 
421 		mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE;
422 		mp->m_readio_log = iosizelog;
423 		mp->m_writeio_log = iosizelog;
424 	}
425 
426 	return 0;
427 }
428 
429 struct proc_xfs_info {
430 	uint64_t	flag;
431 	char		*str;
432 };
433 
434 STATIC void
xfs_showargs(struct xfs_mount * mp,struct seq_file * m)435 xfs_showargs(
436 	struct xfs_mount	*mp,
437 	struct seq_file		*m)
438 {
439 	static struct proc_xfs_info xfs_info_set[] = {
440 		/* the few simple ones we can get from the mount struct */
441 		{ XFS_MOUNT_IKEEP,		",ikeep" },
442 		{ XFS_MOUNT_WSYNC,		",wsync" },
443 		{ XFS_MOUNT_NOALIGN,		",noalign" },
444 		{ XFS_MOUNT_SWALLOC,		",swalloc" },
445 		{ XFS_MOUNT_NOUUID,		",nouuid" },
446 		{ XFS_MOUNT_NORECOVERY,		",norecovery" },
447 		{ XFS_MOUNT_ATTR2,		",attr2" },
448 		{ XFS_MOUNT_FILESTREAMS,	",filestreams" },
449 		{ XFS_MOUNT_GRPID,		",grpid" },
450 		{ XFS_MOUNT_DISCARD,		",discard" },
451 		{ XFS_MOUNT_SMALL_INUMS,	",inode32" },
452 		{ XFS_MOUNT_DAX,		",dax" },
453 		{ 0, NULL }
454 	};
455 	static struct proc_xfs_info xfs_info_unset[] = {
456 		/* the few simple ones we can get from the mount struct */
457 		{ XFS_MOUNT_COMPAT_IOSIZE,	",largeio" },
458 		{ XFS_MOUNT_SMALL_INUMS,	",inode64" },
459 		{ 0, NULL }
460 	};
461 	struct proc_xfs_info	*xfs_infop;
462 
463 	for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
464 		if (mp->m_flags & xfs_infop->flag)
465 			seq_puts(m, xfs_infop->str);
466 	}
467 	for (xfs_infop = xfs_info_unset; xfs_infop->flag; xfs_infop++) {
468 		if (!(mp->m_flags & xfs_infop->flag))
469 			seq_puts(m, xfs_infop->str);
470 	}
471 
472 	if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)
473 		seq_printf(m, ",allocsize=%dk",
474 				(int)(1 << mp->m_writeio_log) >> 10);
475 
476 	if (mp->m_logbufs > 0)
477 		seq_printf(m, ",logbufs=%d", mp->m_logbufs);
478 	if (mp->m_logbsize > 0)
479 		seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
480 
481 	if (mp->m_logname)
482 		seq_show_option(m, "logdev", mp->m_logname);
483 	if (mp->m_rtname)
484 		seq_show_option(m, "rtdev", mp->m_rtname);
485 
486 	if (mp->m_dalign > 0)
487 		seq_printf(m, ",sunit=%d",
488 				(int)XFS_FSB_TO_BB(mp, mp->m_dalign));
489 	if (mp->m_swidth > 0)
490 		seq_printf(m, ",swidth=%d",
491 				(int)XFS_FSB_TO_BB(mp, mp->m_swidth));
492 
493 	if (mp->m_qflags & XFS_UQUOTA_ACCT) {
494 		if (mp->m_qflags & XFS_UQUOTA_ENFD)
495 			seq_puts(m, ",usrquota");
496 		else
497 			seq_puts(m, ",uqnoenforce");
498 	}
499 
500 	if (mp->m_qflags & XFS_PQUOTA_ACCT) {
501 		if (mp->m_qflags & XFS_PQUOTA_ENFD)
502 			seq_puts(m, ",prjquota");
503 		else
504 			seq_puts(m, ",pqnoenforce");
505 	}
506 	if (mp->m_qflags & XFS_GQUOTA_ACCT) {
507 		if (mp->m_qflags & XFS_GQUOTA_ENFD)
508 			seq_puts(m, ",grpquota");
509 		else
510 			seq_puts(m, ",gqnoenforce");
511 	}
512 
513 	if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
514 		seq_puts(m, ",noquota");
515 }
516 
517 /*
518  * Set parameters for inode allocation heuristics, taking into account
519  * filesystem size and inode32/inode64 mount options; i.e. specifically
520  * whether or not XFS_MOUNT_SMALL_INUMS is set.
521  *
522  * Inode allocation patterns are altered only if inode32 is requested
523  * (XFS_MOUNT_SMALL_INUMS), and the filesystem is sufficiently large.
524  * If altered, XFS_MOUNT_32BITINODES is set as well.
525  *
526  * An agcount independent of that in the mount structure is provided
527  * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
528  * to the potentially higher ag count.
529  *
530  * Returns the maximum AG index which may contain inodes.
531  */
532 xfs_agnumber_t
xfs_set_inode_alloc(struct xfs_mount * mp,xfs_agnumber_t agcount)533 xfs_set_inode_alloc(
534 	struct xfs_mount *mp,
535 	xfs_agnumber_t	agcount)
536 {
537 	xfs_agnumber_t	index;
538 	xfs_agnumber_t	maxagi = 0;
539 	xfs_sb_t	*sbp = &mp->m_sb;
540 	xfs_agnumber_t	max_metadata;
541 	xfs_agino_t	agino;
542 	xfs_ino_t	ino;
543 
544 	/*
545 	 * Calculate how much should be reserved for inodes to meet
546 	 * the max inode percentage.  Used only for inode32.
547 	 */
548 	if (M_IGEO(mp)->maxicount) {
549 		uint64_t	icount;
550 
551 		icount = sbp->sb_dblocks * sbp->sb_imax_pct;
552 		do_div(icount, 100);
553 		icount += sbp->sb_agblocks - 1;
554 		do_div(icount, sbp->sb_agblocks);
555 		max_metadata = icount;
556 	} else {
557 		max_metadata = agcount;
558 	}
559 
560 	/* Get the last possible inode in the filesystem */
561 	agino =	XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
562 	ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
563 
564 	/*
565 	 * If user asked for no more than 32-bit inodes, and the fs is
566 	 * sufficiently large, set XFS_MOUNT_32BITINODES if we must alter
567 	 * the allocator to accommodate the request.
568 	 */
569 	if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32)
570 		mp->m_flags |= XFS_MOUNT_32BITINODES;
571 	else
572 		mp->m_flags &= ~XFS_MOUNT_32BITINODES;
573 
574 	for (index = 0; index < agcount; index++) {
575 		struct xfs_perag	*pag;
576 
577 		ino = XFS_AGINO_TO_INO(mp, index, agino);
578 
579 		pag = xfs_perag_get(mp, index);
580 
581 		if (mp->m_flags & XFS_MOUNT_32BITINODES) {
582 			if (ino > XFS_MAXINUMBER_32) {
583 				pag->pagi_inodeok = 0;
584 				pag->pagf_metadata = 0;
585 			} else {
586 				pag->pagi_inodeok = 1;
587 				maxagi++;
588 				if (index < max_metadata)
589 					pag->pagf_metadata = 1;
590 				else
591 					pag->pagf_metadata = 0;
592 			}
593 		} else {
594 			pag->pagi_inodeok = 1;
595 			pag->pagf_metadata = 0;
596 		}
597 
598 		xfs_perag_put(pag);
599 	}
600 
601 	return (mp->m_flags & XFS_MOUNT_32BITINODES) ? maxagi : agcount;
602 }
603 
604 STATIC int
xfs_blkdev_get(xfs_mount_t * mp,const char * name,struct block_device ** bdevp)605 xfs_blkdev_get(
606 	xfs_mount_t		*mp,
607 	const char		*name,
608 	struct block_device	**bdevp)
609 {
610 	int			error = 0;
611 
612 	*bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
613 				    mp);
614 	if (IS_ERR(*bdevp)) {
615 		error = PTR_ERR(*bdevp);
616 		xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
617 	}
618 
619 	return error;
620 }
621 
622 STATIC void
xfs_blkdev_put(struct block_device * bdev)623 xfs_blkdev_put(
624 	struct block_device	*bdev)
625 {
626 	if (bdev)
627 		blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
628 }
629 
630 void
xfs_blkdev_issue_flush(xfs_buftarg_t * buftarg)631 xfs_blkdev_issue_flush(
632 	xfs_buftarg_t		*buftarg)
633 {
634 	blkdev_issue_flush(buftarg->bt_bdev, GFP_NOFS, NULL);
635 }
636 
637 STATIC void
xfs_close_devices(struct xfs_mount * mp)638 xfs_close_devices(
639 	struct xfs_mount	*mp)
640 {
641 	struct dax_device *dax_ddev = mp->m_ddev_targp->bt_daxdev;
642 
643 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
644 		struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
645 		struct dax_device *dax_logdev = mp->m_logdev_targp->bt_daxdev;
646 
647 		xfs_free_buftarg(mp->m_logdev_targp);
648 		xfs_blkdev_put(logdev);
649 		fs_put_dax(dax_logdev);
650 	}
651 	if (mp->m_rtdev_targp) {
652 		struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
653 		struct dax_device *dax_rtdev = mp->m_rtdev_targp->bt_daxdev;
654 
655 		xfs_free_buftarg(mp->m_rtdev_targp);
656 		xfs_blkdev_put(rtdev);
657 		fs_put_dax(dax_rtdev);
658 	}
659 	xfs_free_buftarg(mp->m_ddev_targp);
660 	fs_put_dax(dax_ddev);
661 }
662 
663 /*
664  * The file system configurations are:
665  *	(1) device (partition) with data and internal log
666  *	(2) logical volume with data and log subvolumes.
667  *	(3) logical volume with data, log, and realtime subvolumes.
668  *
669  * We only have to handle opening the log and realtime volumes here if
670  * they are present.  The data subvolume has already been opened by
671  * get_sb_bdev() and is stored in sb->s_bdev.
672  */
673 STATIC int
xfs_open_devices(struct xfs_mount * mp)674 xfs_open_devices(
675 	struct xfs_mount	*mp)
676 {
677 	struct block_device	*ddev = mp->m_super->s_bdev;
678 	struct dax_device	*dax_ddev = fs_dax_get_by_bdev(ddev);
679 	struct dax_device	*dax_logdev = NULL, *dax_rtdev = NULL;
680 	struct block_device	*logdev = NULL, *rtdev = NULL;
681 	int			error;
682 
683 	/*
684 	 * Open real time and log devices - order is important.
685 	 */
686 	if (mp->m_logname) {
687 		error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
688 		if (error)
689 			goto out;
690 		dax_logdev = fs_dax_get_by_bdev(logdev);
691 	}
692 
693 	if (mp->m_rtname) {
694 		error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
695 		if (error)
696 			goto out_close_logdev;
697 
698 		if (rtdev == ddev || rtdev == logdev) {
699 			xfs_warn(mp,
700 	"Cannot mount filesystem with identical rtdev and ddev/logdev.");
701 			error = -EINVAL;
702 			goto out_close_rtdev;
703 		}
704 		dax_rtdev = fs_dax_get_by_bdev(rtdev);
705 	}
706 
707 	/*
708 	 * Setup xfs_mount buffer target pointers
709 	 */
710 	error = -ENOMEM;
711 	mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, dax_ddev);
712 	if (!mp->m_ddev_targp)
713 		goto out_close_rtdev;
714 
715 	if (rtdev) {
716 		mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, dax_rtdev);
717 		if (!mp->m_rtdev_targp)
718 			goto out_free_ddev_targ;
719 	}
720 
721 	if (logdev && logdev != ddev) {
722 		mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, dax_logdev);
723 		if (!mp->m_logdev_targp)
724 			goto out_free_rtdev_targ;
725 	} else {
726 		mp->m_logdev_targp = mp->m_ddev_targp;
727 	}
728 
729 	return 0;
730 
731  out_free_rtdev_targ:
732 	if (mp->m_rtdev_targp)
733 		xfs_free_buftarg(mp->m_rtdev_targp);
734  out_free_ddev_targ:
735 	xfs_free_buftarg(mp->m_ddev_targp);
736  out_close_rtdev:
737 	xfs_blkdev_put(rtdev);
738 	fs_put_dax(dax_rtdev);
739  out_close_logdev:
740 	if (logdev && logdev != ddev) {
741 		xfs_blkdev_put(logdev);
742 		fs_put_dax(dax_logdev);
743 	}
744  out:
745 	fs_put_dax(dax_ddev);
746 	return error;
747 }
748 
749 /*
750  * Setup xfs_mount buffer target pointers based on superblock
751  */
752 STATIC int
xfs_setup_devices(struct xfs_mount * mp)753 xfs_setup_devices(
754 	struct xfs_mount	*mp)
755 {
756 	int			error;
757 
758 	error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
759 	if (error)
760 		return error;
761 
762 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
763 		unsigned int	log_sector_size = BBSIZE;
764 
765 		if (xfs_sb_version_hassector(&mp->m_sb))
766 			log_sector_size = mp->m_sb.sb_logsectsize;
767 		error = xfs_setsize_buftarg(mp->m_logdev_targp,
768 					    log_sector_size);
769 		if (error)
770 			return error;
771 	}
772 	if (mp->m_rtdev_targp) {
773 		error = xfs_setsize_buftarg(mp->m_rtdev_targp,
774 					    mp->m_sb.sb_sectsize);
775 		if (error)
776 			return error;
777 	}
778 
779 	return 0;
780 }
781 
782 STATIC int
xfs_init_mount_workqueues(struct xfs_mount * mp)783 xfs_init_mount_workqueues(
784 	struct xfs_mount	*mp)
785 {
786 	mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
787 			WQ_MEM_RECLAIM|WQ_FREEZABLE, 1, mp->m_fsname);
788 	if (!mp->m_buf_workqueue)
789 		goto out;
790 
791 	mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
792 			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
793 	if (!mp->m_unwritten_workqueue)
794 		goto out_destroy_buf;
795 
796 	mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s",
797 			WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND,
798 			0, mp->m_fsname);
799 	if (!mp->m_cil_workqueue)
800 		goto out_destroy_unwritten;
801 
802 	mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
803 			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
804 	if (!mp->m_reclaim_workqueue)
805 		goto out_destroy_cil;
806 
807 	mp->m_eofblocks_workqueue = alloc_workqueue("xfs-eofblocks/%s",
808 			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
809 	if (!mp->m_eofblocks_workqueue)
810 		goto out_destroy_reclaim;
811 
812 	mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s", WQ_FREEZABLE, 0,
813 					       mp->m_fsname);
814 	if (!mp->m_sync_workqueue)
815 		goto out_destroy_eofb;
816 
817 	return 0;
818 
819 out_destroy_eofb:
820 	destroy_workqueue(mp->m_eofblocks_workqueue);
821 out_destroy_reclaim:
822 	destroy_workqueue(mp->m_reclaim_workqueue);
823 out_destroy_cil:
824 	destroy_workqueue(mp->m_cil_workqueue);
825 out_destroy_unwritten:
826 	destroy_workqueue(mp->m_unwritten_workqueue);
827 out_destroy_buf:
828 	destroy_workqueue(mp->m_buf_workqueue);
829 out:
830 	return -ENOMEM;
831 }
832 
833 STATIC void
xfs_destroy_mount_workqueues(struct xfs_mount * mp)834 xfs_destroy_mount_workqueues(
835 	struct xfs_mount	*mp)
836 {
837 	destroy_workqueue(mp->m_sync_workqueue);
838 	destroy_workqueue(mp->m_eofblocks_workqueue);
839 	destroy_workqueue(mp->m_reclaim_workqueue);
840 	destroy_workqueue(mp->m_cil_workqueue);
841 	destroy_workqueue(mp->m_unwritten_workqueue);
842 	destroy_workqueue(mp->m_buf_workqueue);
843 }
844 
845 static void
xfs_flush_inodes_worker(struct work_struct * work)846 xfs_flush_inodes_worker(
847 	struct work_struct	*work)
848 {
849 	struct xfs_mount	*mp = container_of(work, struct xfs_mount,
850 						   m_flush_inodes_work);
851 	struct super_block	*sb = mp->m_super;
852 
853 	if (down_read_trylock(&sb->s_umount)) {
854 		sync_inodes_sb(sb);
855 		up_read(&sb->s_umount);
856 	}
857 }
858 
859 /*
860  * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
861  * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
862  * for IO to complete so that we effectively throttle multiple callers to the
863  * rate at which IO is completing.
864  */
865 void
xfs_flush_inodes(struct xfs_mount * mp)866 xfs_flush_inodes(
867 	struct xfs_mount	*mp)
868 {
869 	/*
870 	 * If flush_work() returns true then that means we waited for a flush
871 	 * which was already in progress.  Don't bother running another scan.
872 	 */
873 	if (flush_work(&mp->m_flush_inodes_work))
874 		return;
875 
876 	queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work);
877 	flush_work(&mp->m_flush_inodes_work);
878 }
879 
880 /* Catch misguided souls that try to use this interface on XFS */
881 STATIC struct inode *
xfs_fs_alloc_inode(struct super_block * sb)882 xfs_fs_alloc_inode(
883 	struct super_block	*sb)
884 {
885 	BUG();
886 	return NULL;
887 }
888 
889 #ifdef DEBUG
890 static void
xfs_check_delalloc(struct xfs_inode * ip,int whichfork)891 xfs_check_delalloc(
892 	struct xfs_inode	*ip,
893 	int			whichfork)
894 {
895 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, whichfork);
896 	struct xfs_bmbt_irec	got;
897 	struct xfs_iext_cursor	icur;
898 
899 	if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
900 		return;
901 	do {
902 		if (isnullstartblock(got.br_startblock)) {
903 			xfs_warn(ip->i_mount,
904 	"ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
905 				ip->i_ino,
906 				whichfork == XFS_DATA_FORK ? "data" : "cow",
907 				got.br_startoff, got.br_blockcount);
908 		}
909 	} while (xfs_iext_next_extent(ifp, &icur, &got));
910 }
911 #else
912 #define xfs_check_delalloc(ip, whichfork)	do { } while (0)
913 #endif
914 
915 /*
916  * Now that the generic code is guaranteed not to be accessing
917  * the linux inode, we can inactivate and reclaim the inode.
918  */
919 STATIC void
xfs_fs_destroy_inode(struct inode * inode)920 xfs_fs_destroy_inode(
921 	struct inode		*inode)
922 {
923 	struct xfs_inode	*ip = XFS_I(inode);
924 
925 	trace_xfs_destroy_inode(ip);
926 
927 	ASSERT(!rwsem_is_locked(&inode->i_rwsem));
928 	XFS_STATS_INC(ip->i_mount, vn_rele);
929 	XFS_STATS_INC(ip->i_mount, vn_remove);
930 
931 	xfs_inactive(ip);
932 
933 	if (!XFS_FORCED_SHUTDOWN(ip->i_mount) && ip->i_delayed_blks) {
934 		xfs_check_delalloc(ip, XFS_DATA_FORK);
935 		xfs_check_delalloc(ip, XFS_COW_FORK);
936 		ASSERT(0);
937 	}
938 
939 	XFS_STATS_INC(ip->i_mount, vn_reclaim);
940 
941 	/*
942 	 * We should never get here with one of the reclaim flags already set.
943 	 */
944 	ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
945 	ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
946 
947 	/*
948 	 * We always use background reclaim here because even if the
949 	 * inode is clean, it still may be under IO and hence we have
950 	 * to take the flush lock. The background reclaim path handles
951 	 * this more efficiently than we can here, so simply let background
952 	 * reclaim tear down all inodes.
953 	 */
954 	xfs_inode_set_reclaim_tag(ip);
955 }
956 
957 static void
xfs_fs_dirty_inode(struct inode * inode,int flag)958 xfs_fs_dirty_inode(
959 	struct inode			*inode,
960 	int				flag)
961 {
962 	struct xfs_inode		*ip = XFS_I(inode);
963 	struct xfs_mount		*mp = ip->i_mount;
964 	struct xfs_trans		*tp;
965 
966 	if (!(inode->i_sb->s_flags & SB_LAZYTIME))
967 		return;
968 	if (flag != I_DIRTY_SYNC || !(inode->i_state & I_DIRTY_TIME))
969 		return;
970 
971 	if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
972 		return;
973 	xfs_ilock(ip, XFS_ILOCK_EXCL);
974 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
975 	xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
976 	xfs_trans_commit(tp);
977 }
978 
979 /*
980  * Slab object creation initialisation for the XFS inode.
981  * This covers only the idempotent fields in the XFS inode;
982  * all other fields need to be initialised on allocation
983  * from the slab. This avoids the need to repeatedly initialise
984  * fields in the xfs inode that left in the initialise state
985  * when freeing the inode.
986  */
987 STATIC void
xfs_fs_inode_init_once(void * inode)988 xfs_fs_inode_init_once(
989 	void			*inode)
990 {
991 	struct xfs_inode	*ip = inode;
992 
993 	memset(ip, 0, sizeof(struct xfs_inode));
994 
995 	/* vfs inode */
996 	inode_init_once(VFS_I(ip));
997 
998 	/* xfs inode */
999 	atomic_set(&ip->i_pincount, 0);
1000 	spin_lock_init(&ip->i_flags_lock);
1001 
1002 	mrlock_init(&ip->i_mmaplock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
1003 		     "xfsino", ip->i_ino);
1004 	mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
1005 		     "xfsino", ip->i_ino);
1006 }
1007 
1008 /*
1009  * We do an unlocked check for XFS_IDONTCACHE here because we are already
1010  * serialised against cache hits here via the inode->i_lock and igrab() in
1011  * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
1012  * racing with us, and it avoids needing to grab a spinlock here for every inode
1013  * we drop the final reference on.
1014  */
1015 STATIC int
xfs_fs_drop_inode(struct inode * inode)1016 xfs_fs_drop_inode(
1017 	struct inode		*inode)
1018 {
1019 	struct xfs_inode	*ip = XFS_I(inode);
1020 
1021 	/*
1022 	 * If this unlinked inode is in the middle of recovery, don't
1023 	 * drop the inode just yet; log recovery will take care of
1024 	 * that.  See the comment for this inode flag.
1025 	 */
1026 	if (ip->i_flags & XFS_IRECOVERY) {
1027 		ASSERT(ip->i_mount->m_log->l_flags & XLOG_RECOVERY_NEEDED);
1028 		return 0;
1029 	}
1030 
1031 	return generic_drop_inode(inode) || (ip->i_flags & XFS_IDONTCACHE);
1032 }
1033 
1034 STATIC void
xfs_free_fsname(struct xfs_mount * mp)1035 xfs_free_fsname(
1036 	struct xfs_mount	*mp)
1037 {
1038 	kfree(mp->m_fsname);
1039 	kfree(mp->m_rtname);
1040 	kfree(mp->m_logname);
1041 }
1042 
1043 STATIC int
xfs_fs_sync_fs(struct super_block * sb,int wait)1044 xfs_fs_sync_fs(
1045 	struct super_block	*sb,
1046 	int			wait)
1047 {
1048 	struct xfs_mount	*mp = XFS_M(sb);
1049 
1050 	/*
1051 	 * Doing anything during the async pass would be counterproductive.
1052 	 */
1053 	if (!wait)
1054 		return 0;
1055 
1056 	xfs_log_force(mp, XFS_LOG_SYNC);
1057 	if (laptop_mode) {
1058 		/*
1059 		 * The disk must be active because we're syncing.
1060 		 * We schedule log work now (now that the disk is
1061 		 * active) instead of later (when it might not be).
1062 		 */
1063 		flush_delayed_work(&mp->m_log->l_work);
1064 	}
1065 
1066 	return 0;
1067 }
1068 
1069 STATIC int
xfs_fs_statfs(struct dentry * dentry,struct kstatfs * statp)1070 xfs_fs_statfs(
1071 	struct dentry		*dentry,
1072 	struct kstatfs		*statp)
1073 {
1074 	struct xfs_mount	*mp = XFS_M(dentry->d_sb);
1075 	xfs_sb_t		*sbp = &mp->m_sb;
1076 	struct xfs_inode	*ip = XFS_I(d_inode(dentry));
1077 	uint64_t		fakeinos, id;
1078 	uint64_t		icount;
1079 	uint64_t		ifree;
1080 	uint64_t		fdblocks;
1081 	xfs_extlen_t		lsize;
1082 	int64_t			ffree;
1083 
1084 	statp->f_type = XFS_SUPER_MAGIC;
1085 	statp->f_namelen = MAXNAMELEN - 1;
1086 
1087 	id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
1088 	statp->f_fsid.val[0] = (u32)id;
1089 	statp->f_fsid.val[1] = (u32)(id >> 32);
1090 
1091 	icount = percpu_counter_sum(&mp->m_icount);
1092 	ifree = percpu_counter_sum(&mp->m_ifree);
1093 	fdblocks = percpu_counter_sum(&mp->m_fdblocks);
1094 
1095 	spin_lock(&mp->m_sb_lock);
1096 	statp->f_bsize = sbp->sb_blocksize;
1097 	lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
1098 	statp->f_blocks = sbp->sb_dblocks - lsize;
1099 	spin_unlock(&mp->m_sb_lock);
1100 
1101 	statp->f_bfree = fdblocks - mp->m_alloc_set_aside;
1102 	statp->f_bavail = statp->f_bfree;
1103 
1104 	fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
1105 	statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
1106 	if (M_IGEO(mp)->maxicount)
1107 		statp->f_files = min_t(typeof(statp->f_files),
1108 					statp->f_files,
1109 					M_IGEO(mp)->maxicount);
1110 
1111 	/* If sb_icount overshot maxicount, report actual allocation */
1112 	statp->f_files = max_t(typeof(statp->f_files),
1113 					statp->f_files,
1114 					sbp->sb_icount);
1115 
1116 	/* make sure statp->f_ffree does not underflow */
1117 	ffree = statp->f_files - (icount - ifree);
1118 	statp->f_ffree = max_t(int64_t, ffree, 0);
1119 
1120 
1121 	if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
1122 	    ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
1123 			      (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
1124 		xfs_qm_statvfs(ip, statp);
1125 
1126 	if (XFS_IS_REALTIME_MOUNT(mp) &&
1127 	    (ip->i_d.di_flags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
1128 		statp->f_blocks = sbp->sb_rblocks;
1129 		statp->f_bavail = statp->f_bfree =
1130 			sbp->sb_frextents * sbp->sb_rextsize;
1131 	}
1132 
1133 	return 0;
1134 }
1135 
1136 STATIC void
xfs_save_resvblks(struct xfs_mount * mp)1137 xfs_save_resvblks(struct xfs_mount *mp)
1138 {
1139 	uint64_t resblks = 0;
1140 
1141 	mp->m_resblks_save = mp->m_resblks;
1142 	xfs_reserve_blocks(mp, &resblks, NULL);
1143 }
1144 
1145 STATIC void
xfs_restore_resvblks(struct xfs_mount * mp)1146 xfs_restore_resvblks(struct xfs_mount *mp)
1147 {
1148 	uint64_t resblks;
1149 
1150 	if (mp->m_resblks_save) {
1151 		resblks = mp->m_resblks_save;
1152 		mp->m_resblks_save = 0;
1153 	} else
1154 		resblks = xfs_default_resblks(mp);
1155 
1156 	xfs_reserve_blocks(mp, &resblks, NULL);
1157 }
1158 
1159 /*
1160  * Trigger writeback of all the dirty metadata in the file system.
1161  *
1162  * This ensures that the metadata is written to their location on disk rather
1163  * than just existing in transactions in the log. This means after a quiesce
1164  * there is no log replay required to write the inodes to disk - this is the
1165  * primary difference between a sync and a quiesce.
1166  *
1167  * Note: xfs_log_quiesce() stops background log work - the callers must ensure
1168  * it is started again when appropriate.
1169  */
1170 void
xfs_quiesce_attr(struct xfs_mount * mp)1171 xfs_quiesce_attr(
1172 	struct xfs_mount	*mp)
1173 {
1174 	int	error = 0;
1175 
1176 	/* wait for all modifications to complete */
1177 	while (atomic_read(&mp->m_active_trans) > 0)
1178 		delay(100);
1179 
1180 	/* force the log to unpin objects from the now complete transactions */
1181 	xfs_log_force(mp, XFS_LOG_SYNC);
1182 
1183 	/* reclaim inodes to do any IO before the freeze completes */
1184 	xfs_reclaim_inodes(mp, 0);
1185 	xfs_reclaim_inodes(mp, SYNC_WAIT);
1186 
1187 	/* Push the superblock and write an unmount record */
1188 	error = xfs_log_sbcount(mp);
1189 	if (error)
1190 		xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. "
1191 				"Frozen image may not be consistent.");
1192 	/*
1193 	 * Just warn here till VFS can correctly support
1194 	 * read-only remount without racing.
1195 	 */
1196 	WARN_ON(atomic_read(&mp->m_active_trans) != 0);
1197 
1198 	xfs_log_quiesce(mp);
1199 }
1200 
1201 STATIC int
xfs_test_remount_options(struct super_block * sb,char * options)1202 xfs_test_remount_options(
1203 	struct super_block	*sb,
1204 	char			*options)
1205 {
1206 	int			error = 0;
1207 	struct xfs_mount	*tmp_mp;
1208 
1209 	tmp_mp = kmem_zalloc(sizeof(*tmp_mp), KM_MAYFAIL);
1210 	if (!tmp_mp)
1211 		return -ENOMEM;
1212 
1213 	tmp_mp->m_super = sb;
1214 	error = xfs_parseargs(tmp_mp, options);
1215 	xfs_free_fsname(tmp_mp);
1216 	kmem_free(tmp_mp);
1217 
1218 	return error;
1219 }
1220 
1221 STATIC int
xfs_fs_remount(struct super_block * sb,int * flags,char * options)1222 xfs_fs_remount(
1223 	struct super_block	*sb,
1224 	int			*flags,
1225 	char			*options)
1226 {
1227 	struct xfs_mount	*mp = XFS_M(sb);
1228 	xfs_sb_t		*sbp = &mp->m_sb;
1229 	substring_t		args[MAX_OPT_ARGS];
1230 	char			*p;
1231 	int			error;
1232 
1233 	/* version 5 superblocks always support version counters. */
1234 	if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
1235 		*flags |= SB_I_VERSION;
1236 
1237 	/* First, check for complete junk; i.e. invalid options */
1238 	error = xfs_test_remount_options(sb, options);
1239 	if (error)
1240 		return error;
1241 
1242 	sync_filesystem(sb);
1243 	while ((p = strsep(&options, ",")) != NULL) {
1244 		int token;
1245 
1246 		if (!*p)
1247 			continue;
1248 
1249 		token = match_token(p, tokens, args);
1250 		switch (token) {
1251 		case Opt_inode64:
1252 			mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
1253 			mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1254 			break;
1255 		case Opt_inode32:
1256 			mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
1257 			mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1258 			break;
1259 		default:
1260 			/*
1261 			 * Logically we would return an error here to prevent
1262 			 * users from believing they might have changed
1263 			 * mount options using remount which can't be changed.
1264 			 *
1265 			 * But unfortunately mount(8) adds all options from
1266 			 * mtab and fstab to the mount arguments in some cases
1267 			 * so we can't blindly reject options, but have to
1268 			 * check for each specified option if it actually
1269 			 * differs from the currently set option and only
1270 			 * reject it if that's the case.
1271 			 *
1272 			 * Until that is implemented we return success for
1273 			 * every remount request, and silently ignore all
1274 			 * options that we can't actually change.
1275 			 */
1276 #if 0
1277 			xfs_info(mp,
1278 		"mount option \"%s\" not supported for remount", p);
1279 			return -EINVAL;
1280 #else
1281 			break;
1282 #endif
1283 		}
1284 	}
1285 
1286 	/* ro -> rw */
1287 	if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & SB_RDONLY)) {
1288 		if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
1289 			xfs_warn(mp,
1290 		"ro->rw transition prohibited on norecovery mount");
1291 			return -EINVAL;
1292 		}
1293 
1294 		if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
1295 		    xfs_sb_has_ro_compat_feature(sbp,
1296 					XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1297 			xfs_warn(mp,
1298 "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1299 				(sbp->sb_features_ro_compat &
1300 					XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1301 			return -EINVAL;
1302 		}
1303 
1304 		mp->m_flags &= ~XFS_MOUNT_RDONLY;
1305 
1306 		/*
1307 		 * If this is the first remount to writeable state we
1308 		 * might have some superblock changes to update.
1309 		 */
1310 		if (mp->m_update_sb) {
1311 			error = xfs_sync_sb(mp, false);
1312 			if (error) {
1313 				xfs_warn(mp, "failed to write sb changes");
1314 				return error;
1315 			}
1316 			mp->m_update_sb = false;
1317 		}
1318 
1319 		/*
1320 		 * Fill out the reserve pool if it is empty. Use the stashed
1321 		 * value if it is non-zero, otherwise go with the default.
1322 		 */
1323 		xfs_restore_resvblks(mp);
1324 		xfs_log_work_queue(mp);
1325 
1326 		/* Recover any CoW blocks that never got remapped. */
1327 		error = xfs_reflink_recover_cow(mp);
1328 		if (error) {
1329 			xfs_err(mp,
1330 	"Error %d recovering leftover CoW allocations.", error);
1331 			xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1332 			return error;
1333 		}
1334 		xfs_start_block_reaping(mp);
1335 
1336 		/* Create the per-AG metadata reservation pool .*/
1337 		error = xfs_fs_reserve_ag_blocks(mp);
1338 		if (error && error != -ENOSPC)
1339 			return error;
1340 	}
1341 
1342 	/* rw -> ro */
1343 	if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & SB_RDONLY)) {
1344 		/*
1345 		 * Cancel background eofb scanning so it cannot race with the
1346 		 * final log force+buftarg wait and deadlock the remount.
1347 		 */
1348 		xfs_stop_block_reaping(mp);
1349 
1350 		/* Get rid of any leftover CoW reservations... */
1351 		error = xfs_icache_free_cowblocks(mp, NULL);
1352 		if (error) {
1353 			xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1354 			return error;
1355 		}
1356 
1357 		/* Free the per-AG metadata reservation pool. */
1358 		error = xfs_fs_unreserve_ag_blocks(mp);
1359 		if (error) {
1360 			xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1361 			return error;
1362 		}
1363 
1364 		/*
1365 		 * Before we sync the metadata, we need to free up the reserve
1366 		 * block pool so that the used block count in the superblock on
1367 		 * disk is correct at the end of the remount. Stash the current
1368 		 * reserve pool size so that if we get remounted rw, we can
1369 		 * return it to the same size.
1370 		 */
1371 		xfs_save_resvblks(mp);
1372 
1373 		xfs_quiesce_attr(mp);
1374 		mp->m_flags |= XFS_MOUNT_RDONLY;
1375 	}
1376 
1377 	return 0;
1378 }
1379 
1380 /*
1381  * Second stage of a freeze. The data is already frozen so we only
1382  * need to take care of the metadata. Once that's done sync the superblock
1383  * to the log to dirty it in case of a crash while frozen. This ensures that we
1384  * will recover the unlinked inode lists on the next mount.
1385  */
1386 STATIC int
xfs_fs_freeze(struct super_block * sb)1387 xfs_fs_freeze(
1388 	struct super_block	*sb)
1389 {
1390 	struct xfs_mount	*mp = XFS_M(sb);
1391 
1392 	xfs_stop_block_reaping(mp);
1393 	xfs_save_resvblks(mp);
1394 	xfs_quiesce_attr(mp);
1395 	return xfs_sync_sb(mp, true);
1396 }
1397 
1398 STATIC int
xfs_fs_unfreeze(struct super_block * sb)1399 xfs_fs_unfreeze(
1400 	struct super_block	*sb)
1401 {
1402 	struct xfs_mount	*mp = XFS_M(sb);
1403 
1404 	xfs_restore_resvblks(mp);
1405 	xfs_log_work_queue(mp);
1406 	xfs_start_block_reaping(mp);
1407 	return 0;
1408 }
1409 
1410 STATIC int
xfs_fs_show_options(struct seq_file * m,struct dentry * root)1411 xfs_fs_show_options(
1412 	struct seq_file		*m,
1413 	struct dentry		*root)
1414 {
1415 	xfs_showargs(XFS_M(root->d_sb), m);
1416 	return 0;
1417 }
1418 
1419 /*
1420  * This function fills in xfs_mount_t fields based on mount args.
1421  * Note: the superblock _has_ now been read in.
1422  */
1423 STATIC int
xfs_finish_flags(struct xfs_mount * mp)1424 xfs_finish_flags(
1425 	struct xfs_mount	*mp)
1426 {
1427 	int			ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
1428 
1429 	/* Fail a mount where the logbuf is smaller than the log stripe */
1430 	if (xfs_sb_version_haslogv2(&mp->m_sb)) {
1431 		if (mp->m_logbsize <= 0 &&
1432 		    mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
1433 			mp->m_logbsize = mp->m_sb.sb_logsunit;
1434 		} else if (mp->m_logbsize > 0 &&
1435 			   mp->m_logbsize < mp->m_sb.sb_logsunit) {
1436 			xfs_warn(mp,
1437 		"logbuf size must be greater than or equal to log stripe size");
1438 			return -EINVAL;
1439 		}
1440 	} else {
1441 		/* Fail a mount if the logbuf is larger than 32K */
1442 		if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
1443 			xfs_warn(mp,
1444 		"logbuf size for version 1 logs must be 16K or 32K");
1445 			return -EINVAL;
1446 		}
1447 	}
1448 
1449 	/*
1450 	 * V5 filesystems always use attr2 format for attributes.
1451 	 */
1452 	if (xfs_sb_version_hascrc(&mp->m_sb) &&
1453 	    (mp->m_flags & XFS_MOUNT_NOATTR2)) {
1454 		xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
1455 			     "attr2 is always enabled for V5 filesystems.");
1456 		return -EINVAL;
1457 	}
1458 
1459 	/*
1460 	 * mkfs'ed attr2 will turn on attr2 mount unless explicitly
1461 	 * told by noattr2 to turn it off
1462 	 */
1463 	if (xfs_sb_version_hasattr2(&mp->m_sb) &&
1464 	    !(mp->m_flags & XFS_MOUNT_NOATTR2))
1465 		mp->m_flags |= XFS_MOUNT_ATTR2;
1466 
1467 	/*
1468 	 * prohibit r/w mounts of read-only filesystems
1469 	 */
1470 	if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
1471 		xfs_warn(mp,
1472 			"cannot mount a read-only filesystem as read-write");
1473 		return -EROFS;
1474 	}
1475 
1476 	if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
1477 	    (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE)) &&
1478 	    !xfs_sb_version_has_pquotino(&mp->m_sb)) {
1479 		xfs_warn(mp,
1480 		  "Super block does not support project and group quota together");
1481 		return -EINVAL;
1482 	}
1483 
1484 	return 0;
1485 }
1486 
1487 static int
xfs_init_percpu_counters(struct xfs_mount * mp)1488 xfs_init_percpu_counters(
1489 	struct xfs_mount	*mp)
1490 {
1491 	int		error;
1492 
1493 	error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
1494 	if (error)
1495 		return -ENOMEM;
1496 
1497 	error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
1498 	if (error)
1499 		goto free_icount;
1500 
1501 	error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
1502 	if (error)
1503 		goto free_ifree;
1504 
1505 	error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
1506 	if (error)
1507 		goto free_fdblocks;
1508 
1509 	return 0;
1510 
1511 free_fdblocks:
1512 	percpu_counter_destroy(&mp->m_fdblocks);
1513 free_ifree:
1514 	percpu_counter_destroy(&mp->m_ifree);
1515 free_icount:
1516 	percpu_counter_destroy(&mp->m_icount);
1517 	return -ENOMEM;
1518 }
1519 
1520 void
xfs_reinit_percpu_counters(struct xfs_mount * mp)1521 xfs_reinit_percpu_counters(
1522 	struct xfs_mount	*mp)
1523 {
1524 	percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1525 	percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1526 	percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
1527 }
1528 
1529 static void
xfs_destroy_percpu_counters(struct xfs_mount * mp)1530 xfs_destroy_percpu_counters(
1531 	struct xfs_mount	*mp)
1532 {
1533 	percpu_counter_destroy(&mp->m_icount);
1534 	percpu_counter_destroy(&mp->m_ifree);
1535 	percpu_counter_destroy(&mp->m_fdblocks);
1536 	ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
1537 	       percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1538 	percpu_counter_destroy(&mp->m_delalloc_blks);
1539 }
1540 
1541 static struct xfs_mount *
xfs_mount_alloc(struct super_block * sb)1542 xfs_mount_alloc(
1543 	struct super_block	*sb)
1544 {
1545 	struct xfs_mount	*mp;
1546 
1547 	mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL);
1548 	if (!mp)
1549 		return NULL;
1550 
1551 	mp->m_super = sb;
1552 	spin_lock_init(&mp->m_sb_lock);
1553 	spin_lock_init(&mp->m_agirotor_lock);
1554 	INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
1555 	spin_lock_init(&mp->m_perag_lock);
1556 	mutex_init(&mp->m_growlock);
1557 	atomic_set(&mp->m_active_trans, 0);
1558 	INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
1559 	INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
1560 	INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker);
1561 	INIT_DELAYED_WORK(&mp->m_cowblocks_work, xfs_cowblocks_worker);
1562 	mp->m_kobj.kobject.kset = xfs_kset;
1563 	/*
1564 	 * We don't create the finobt per-ag space reservation until after log
1565 	 * recovery, so we must set this to true so that an ifree transaction
1566 	 * started during log recovery will not depend on space reservations
1567 	 * for finobt expansion.
1568 	 */
1569 	mp->m_finobt_nores = true;
1570 	return mp;
1571 }
1572 
1573 
1574 STATIC int
xfs_fs_fill_super(struct super_block * sb,void * data,int silent)1575 xfs_fs_fill_super(
1576 	struct super_block	*sb,
1577 	void			*data,
1578 	int			silent)
1579 {
1580 	struct inode		*root;
1581 	struct xfs_mount	*mp = NULL;
1582 	int			flags = 0, error = -ENOMEM;
1583 
1584 	/*
1585 	 * allocate mp and do all low-level struct initializations before we
1586 	 * attach it to the super
1587 	 */
1588 	mp = xfs_mount_alloc(sb);
1589 	if (!mp)
1590 		goto out;
1591 	sb->s_fs_info = mp;
1592 
1593 	error = xfs_parseargs(mp, (char *)data);
1594 	if (error)
1595 		goto out_free_fsname;
1596 
1597 	sb_min_blocksize(sb, BBSIZE);
1598 	sb->s_xattr = xfs_xattr_handlers;
1599 	sb->s_export_op = &xfs_export_operations;
1600 #ifdef CONFIG_XFS_QUOTA
1601 	sb->s_qcop = &xfs_quotactl_operations;
1602 	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1603 #endif
1604 	sb->s_op = &xfs_super_operations;
1605 
1606 	/*
1607 	 * Delay mount work if the debug hook is set. This is debug
1608 	 * instrumention to coordinate simulation of xfs mount failures with
1609 	 * VFS superblock operations
1610 	 */
1611 	if (xfs_globals.mount_delay) {
1612 		xfs_notice(mp, "Delaying mount for %d seconds.",
1613 			xfs_globals.mount_delay);
1614 		msleep(xfs_globals.mount_delay * 1000);
1615 	}
1616 
1617 	if (silent)
1618 		flags |= XFS_MFSI_QUIET;
1619 
1620 	error = xfs_open_devices(mp);
1621 	if (error)
1622 		goto out_free_fsname;
1623 
1624 	error = xfs_init_mount_workqueues(mp);
1625 	if (error)
1626 		goto out_close_devices;
1627 
1628 	error = xfs_init_percpu_counters(mp);
1629 	if (error)
1630 		goto out_destroy_workqueues;
1631 
1632 	/* Allocate stats memory before we do operations that might use it */
1633 	mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1634 	if (!mp->m_stats.xs_stats) {
1635 		error = -ENOMEM;
1636 		goto out_destroy_counters;
1637 	}
1638 
1639 	error = xfs_readsb(mp, flags);
1640 	if (error)
1641 		goto out_free_stats;
1642 
1643 	error = xfs_finish_flags(mp);
1644 	if (error)
1645 		goto out_free_sb;
1646 
1647 	error = xfs_setup_devices(mp);
1648 	if (error)
1649 		goto out_free_sb;
1650 
1651 	/*
1652 	 * XFS block mappings use 54 bits to store the logical block offset.
1653 	 * This should suffice to handle the maximum file size that the VFS
1654 	 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
1655 	 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
1656 	 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
1657 	 * to check this assertion.
1658 	 *
1659 	 * Avoid integer overflow by comparing the maximum bmbt offset to the
1660 	 * maximum pagecache offset in units of fs blocks.
1661 	 */
1662 	if (XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE) > XFS_MAX_FILEOFF) {
1663 		xfs_warn(mp,
1664 "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
1665 			 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
1666 			 XFS_MAX_FILEOFF);
1667 		error = -EINVAL;
1668 		goto out_free_sb;
1669 	}
1670 
1671 	error = xfs_filestream_mount(mp);
1672 	if (error)
1673 		goto out_free_sb;
1674 
1675 	/*
1676 	 * we must configure the block size in the superblock before we run the
1677 	 * full mount process as the mount process can lookup and cache inodes.
1678 	 */
1679 	sb->s_magic = XFS_SUPER_MAGIC;
1680 	sb->s_blocksize = mp->m_sb.sb_blocksize;
1681 	sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1682 	sb->s_maxbytes = MAX_LFS_FILESIZE;
1683 	sb->s_max_links = XFS_MAXLINK;
1684 	sb->s_time_gran = 1;
1685 	sb->s_time_min = S32_MIN;
1686 	sb->s_time_max = S32_MAX;
1687 	sb->s_iflags |= SB_I_CGROUPWB;
1688 
1689 	set_posix_acl_flag(sb);
1690 
1691 	/* version 5 superblocks support inode version counters. */
1692 	if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
1693 		sb->s_flags |= SB_I_VERSION;
1694 
1695 	if (mp->m_flags & XFS_MOUNT_DAX) {
1696 		bool rtdev_is_dax = false, datadev_is_dax;
1697 
1698 		xfs_warn(mp,
1699 		"DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
1700 
1701 		datadev_is_dax = bdev_dax_supported(mp->m_ddev_targp->bt_bdev,
1702 			sb->s_blocksize);
1703 		if (mp->m_rtdev_targp)
1704 			rtdev_is_dax = bdev_dax_supported(
1705 				mp->m_rtdev_targp->bt_bdev, sb->s_blocksize);
1706 		if (!rtdev_is_dax && !datadev_is_dax) {
1707 			xfs_alert(mp,
1708 			"DAX unsupported by block device. Turning off DAX.");
1709 			mp->m_flags &= ~XFS_MOUNT_DAX;
1710 		}
1711 		if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1712 			xfs_alert(mp,
1713 		"DAX and reflink cannot be used together!");
1714 			error = -EINVAL;
1715 			goto out_filestream_unmount;
1716 		}
1717 	}
1718 
1719 	if (mp->m_flags & XFS_MOUNT_DISCARD) {
1720 		struct request_queue *q = bdev_get_queue(sb->s_bdev);
1721 
1722 		if (!blk_queue_discard(q)) {
1723 			xfs_warn(mp, "mounting with \"discard\" option, but "
1724 					"the device does not support discard");
1725 			mp->m_flags &= ~XFS_MOUNT_DISCARD;
1726 		}
1727 	}
1728 
1729 	if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1730 		if (mp->m_sb.sb_rblocks) {
1731 			xfs_alert(mp,
1732 	"reflink not compatible with realtime device!");
1733 			error = -EINVAL;
1734 			goto out_filestream_unmount;
1735 		}
1736 
1737 		if (xfs_globals.always_cow) {
1738 			xfs_info(mp, "using DEBUG-only always_cow mode.");
1739 			mp->m_always_cow = true;
1740 		}
1741 	}
1742 
1743 	if (xfs_sb_version_hasrmapbt(&mp->m_sb) && mp->m_sb.sb_rblocks) {
1744 		xfs_alert(mp,
1745 	"reverse mapping btree not compatible with realtime device!");
1746 		error = -EINVAL;
1747 		goto out_filestream_unmount;
1748 	}
1749 
1750 	error = xfs_mountfs(mp);
1751 	if (error)
1752 		goto out_filestream_unmount;
1753 
1754 	root = igrab(VFS_I(mp->m_rootip));
1755 	if (!root) {
1756 		error = -ENOENT;
1757 		goto out_unmount;
1758 	}
1759 	sb->s_root = d_make_root(root);
1760 	if (!sb->s_root) {
1761 		error = -ENOMEM;
1762 		goto out_unmount;
1763 	}
1764 
1765 	return 0;
1766 
1767  out_filestream_unmount:
1768 	xfs_filestream_unmount(mp);
1769  out_free_sb:
1770 	xfs_freesb(mp);
1771  out_free_stats:
1772 	free_percpu(mp->m_stats.xs_stats);
1773  out_destroy_counters:
1774 	xfs_destroy_percpu_counters(mp);
1775  out_destroy_workqueues:
1776 	xfs_destroy_mount_workqueues(mp);
1777  out_close_devices:
1778 	xfs_close_devices(mp);
1779  out_free_fsname:
1780 	sb->s_fs_info = NULL;
1781 	xfs_free_fsname(mp);
1782 	kfree(mp);
1783  out:
1784 	return error;
1785 
1786  out_unmount:
1787 	xfs_filestream_unmount(mp);
1788 	xfs_unmountfs(mp);
1789 	goto out_free_sb;
1790 }
1791 
1792 STATIC void
xfs_fs_put_super(struct super_block * sb)1793 xfs_fs_put_super(
1794 	struct super_block	*sb)
1795 {
1796 	struct xfs_mount	*mp = XFS_M(sb);
1797 
1798 	/* if ->fill_super failed, we have no mount to tear down */
1799 	if (!sb->s_fs_info)
1800 		return;
1801 
1802 	xfs_notice(mp, "Unmounting Filesystem");
1803 	xfs_filestream_unmount(mp);
1804 	xfs_unmountfs(mp);
1805 
1806 	xfs_freesb(mp);
1807 	free_percpu(mp->m_stats.xs_stats);
1808 	xfs_destroy_percpu_counters(mp);
1809 	xfs_destroy_mount_workqueues(mp);
1810 	xfs_close_devices(mp);
1811 
1812 	sb->s_fs_info = NULL;
1813 	xfs_free_fsname(mp);
1814 	kfree(mp);
1815 }
1816 
1817 STATIC struct dentry *
xfs_fs_mount(struct file_system_type * fs_type,int flags,const char * dev_name,void * data)1818 xfs_fs_mount(
1819 	struct file_system_type	*fs_type,
1820 	int			flags,
1821 	const char		*dev_name,
1822 	void			*data)
1823 {
1824 	return mount_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super);
1825 }
1826 
1827 static long
xfs_fs_nr_cached_objects(struct super_block * sb,struct shrink_control * sc)1828 xfs_fs_nr_cached_objects(
1829 	struct super_block	*sb,
1830 	struct shrink_control	*sc)
1831 {
1832 	/* Paranoia: catch incorrect calls during mount setup or teardown */
1833 	if (WARN_ON_ONCE(!sb->s_fs_info))
1834 		return 0;
1835 	return xfs_reclaim_inodes_count(XFS_M(sb));
1836 }
1837 
1838 static long
xfs_fs_free_cached_objects(struct super_block * sb,struct shrink_control * sc)1839 xfs_fs_free_cached_objects(
1840 	struct super_block	*sb,
1841 	struct shrink_control	*sc)
1842 {
1843 	return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1844 }
1845 
1846 static const struct super_operations xfs_super_operations = {
1847 	.alloc_inode		= xfs_fs_alloc_inode,
1848 	.destroy_inode		= xfs_fs_destroy_inode,
1849 	.dirty_inode		= xfs_fs_dirty_inode,
1850 	.drop_inode		= xfs_fs_drop_inode,
1851 	.put_super		= xfs_fs_put_super,
1852 	.sync_fs		= xfs_fs_sync_fs,
1853 	.freeze_fs		= xfs_fs_freeze,
1854 	.unfreeze_fs		= xfs_fs_unfreeze,
1855 	.statfs			= xfs_fs_statfs,
1856 	.remount_fs		= xfs_fs_remount,
1857 	.show_options		= xfs_fs_show_options,
1858 	.nr_cached_objects	= xfs_fs_nr_cached_objects,
1859 	.free_cached_objects	= xfs_fs_free_cached_objects,
1860 };
1861 
1862 static struct file_system_type xfs_fs_type = {
1863 	.owner			= THIS_MODULE,
1864 	.name			= "xfs",
1865 	.mount			= xfs_fs_mount,
1866 	.kill_sb		= kill_block_super,
1867 	.fs_flags		= FS_REQUIRES_DEV,
1868 };
1869 MODULE_ALIAS_FS("xfs");
1870 
1871 STATIC int __init
xfs_init_zones(void)1872 xfs_init_zones(void)
1873 {
1874 	if (bioset_init(&xfs_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
1875 			offsetof(struct xfs_ioend, io_inline_bio),
1876 			BIOSET_NEED_BVECS))
1877 		goto out;
1878 
1879 	xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t),
1880 						"xfs_log_ticket");
1881 	if (!xfs_log_ticket_zone)
1882 		goto out_free_ioend_bioset;
1883 
1884 	xfs_bmap_free_item_zone = kmem_zone_init(
1885 			sizeof(struct xfs_extent_free_item),
1886 			"xfs_bmap_free_item");
1887 	if (!xfs_bmap_free_item_zone)
1888 		goto out_destroy_log_ticket_zone;
1889 
1890 	xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t),
1891 						"xfs_btree_cur");
1892 	if (!xfs_btree_cur_zone)
1893 		goto out_destroy_bmap_free_item_zone;
1894 
1895 	xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t),
1896 						"xfs_da_state");
1897 	if (!xfs_da_state_zone)
1898 		goto out_destroy_btree_cur_zone;
1899 
1900 	xfs_ifork_zone = kmem_zone_init(sizeof(struct xfs_ifork), "xfs_ifork");
1901 	if (!xfs_ifork_zone)
1902 		goto out_destroy_da_state_zone;
1903 
1904 	xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans");
1905 	if (!xfs_trans_zone)
1906 		goto out_destroy_ifork_zone;
1907 
1908 
1909 	/*
1910 	 * The size of the zone allocated buf log item is the maximum
1911 	 * size possible under XFS.  This wastes a little bit of memory,
1912 	 * but it is much faster.
1913 	 */
1914 	xfs_buf_item_zone = kmem_zone_init(sizeof(struct xfs_buf_log_item),
1915 					   "xfs_buf_item");
1916 	if (!xfs_buf_item_zone)
1917 		goto out_destroy_trans_zone;
1918 
1919 	xfs_efd_zone = kmem_zone_init((sizeof(struct xfs_efd_log_item) +
1920 			((XFS_EFD_MAX_FAST_EXTENTS - 1) *
1921 				 sizeof(xfs_extent_t))), "xfs_efd_item");
1922 	if (!xfs_efd_zone)
1923 		goto out_destroy_buf_item_zone;
1924 
1925 	xfs_efi_zone = kmem_zone_init((sizeof(struct xfs_efi_log_item) +
1926 			((XFS_EFI_MAX_FAST_EXTENTS - 1) *
1927 				sizeof(xfs_extent_t))), "xfs_efi_item");
1928 	if (!xfs_efi_zone)
1929 		goto out_destroy_efd_zone;
1930 
1931 	xfs_inode_zone =
1932 		kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode",
1933 			KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | KM_ZONE_SPREAD |
1934 			KM_ZONE_ACCOUNT, xfs_fs_inode_init_once);
1935 	if (!xfs_inode_zone)
1936 		goto out_destroy_efi_zone;
1937 
1938 	xfs_ili_zone =
1939 		kmem_zone_init_flags(sizeof(struct xfs_inode_log_item),
1940 					"xfs_ili", KM_ZONE_SPREAD, NULL);
1941 	if (!xfs_ili_zone)
1942 		goto out_destroy_inode_zone;
1943 	xfs_icreate_zone = kmem_zone_init(sizeof(struct xfs_icreate_item),
1944 					"xfs_icr");
1945 	if (!xfs_icreate_zone)
1946 		goto out_destroy_ili_zone;
1947 
1948 	xfs_rud_zone = kmem_zone_init(sizeof(struct xfs_rud_log_item),
1949 			"xfs_rud_item");
1950 	if (!xfs_rud_zone)
1951 		goto out_destroy_icreate_zone;
1952 
1953 	xfs_rui_zone = kmem_zone_init(
1954 			xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
1955 			"xfs_rui_item");
1956 	if (!xfs_rui_zone)
1957 		goto out_destroy_rud_zone;
1958 
1959 	xfs_cud_zone = kmem_zone_init(sizeof(struct xfs_cud_log_item),
1960 			"xfs_cud_item");
1961 	if (!xfs_cud_zone)
1962 		goto out_destroy_rui_zone;
1963 
1964 	xfs_cui_zone = kmem_zone_init(
1965 			xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
1966 			"xfs_cui_item");
1967 	if (!xfs_cui_zone)
1968 		goto out_destroy_cud_zone;
1969 
1970 	xfs_bud_zone = kmem_zone_init(sizeof(struct xfs_bud_log_item),
1971 			"xfs_bud_item");
1972 	if (!xfs_bud_zone)
1973 		goto out_destroy_cui_zone;
1974 
1975 	xfs_bui_zone = kmem_zone_init(
1976 			xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
1977 			"xfs_bui_item");
1978 	if (!xfs_bui_zone)
1979 		goto out_destroy_bud_zone;
1980 
1981 	return 0;
1982 
1983  out_destroy_bud_zone:
1984 	kmem_zone_destroy(xfs_bud_zone);
1985  out_destroy_cui_zone:
1986 	kmem_zone_destroy(xfs_cui_zone);
1987  out_destroy_cud_zone:
1988 	kmem_zone_destroy(xfs_cud_zone);
1989  out_destroy_rui_zone:
1990 	kmem_zone_destroy(xfs_rui_zone);
1991  out_destroy_rud_zone:
1992 	kmem_zone_destroy(xfs_rud_zone);
1993  out_destroy_icreate_zone:
1994 	kmem_zone_destroy(xfs_icreate_zone);
1995  out_destroy_ili_zone:
1996 	kmem_zone_destroy(xfs_ili_zone);
1997  out_destroy_inode_zone:
1998 	kmem_zone_destroy(xfs_inode_zone);
1999  out_destroy_efi_zone:
2000 	kmem_zone_destroy(xfs_efi_zone);
2001  out_destroy_efd_zone:
2002 	kmem_zone_destroy(xfs_efd_zone);
2003  out_destroy_buf_item_zone:
2004 	kmem_zone_destroy(xfs_buf_item_zone);
2005  out_destroy_trans_zone:
2006 	kmem_zone_destroy(xfs_trans_zone);
2007  out_destroy_ifork_zone:
2008 	kmem_zone_destroy(xfs_ifork_zone);
2009  out_destroy_da_state_zone:
2010 	kmem_zone_destroy(xfs_da_state_zone);
2011  out_destroy_btree_cur_zone:
2012 	kmem_zone_destroy(xfs_btree_cur_zone);
2013  out_destroy_bmap_free_item_zone:
2014 	kmem_zone_destroy(xfs_bmap_free_item_zone);
2015  out_destroy_log_ticket_zone:
2016 	kmem_zone_destroy(xfs_log_ticket_zone);
2017  out_free_ioend_bioset:
2018 	bioset_exit(&xfs_ioend_bioset);
2019  out:
2020 	return -ENOMEM;
2021 }
2022 
2023 STATIC void
xfs_destroy_zones(void)2024 xfs_destroy_zones(void)
2025 {
2026 	/*
2027 	 * Make sure all delayed rcu free are flushed before we
2028 	 * destroy caches.
2029 	 */
2030 	rcu_barrier();
2031 	kmem_zone_destroy(xfs_bui_zone);
2032 	kmem_zone_destroy(xfs_bud_zone);
2033 	kmem_zone_destroy(xfs_cui_zone);
2034 	kmem_zone_destroy(xfs_cud_zone);
2035 	kmem_zone_destroy(xfs_rui_zone);
2036 	kmem_zone_destroy(xfs_rud_zone);
2037 	kmem_zone_destroy(xfs_icreate_zone);
2038 	kmem_zone_destroy(xfs_ili_zone);
2039 	kmem_zone_destroy(xfs_inode_zone);
2040 	kmem_zone_destroy(xfs_efi_zone);
2041 	kmem_zone_destroy(xfs_efd_zone);
2042 	kmem_zone_destroy(xfs_buf_item_zone);
2043 	kmem_zone_destroy(xfs_trans_zone);
2044 	kmem_zone_destroy(xfs_ifork_zone);
2045 	kmem_zone_destroy(xfs_da_state_zone);
2046 	kmem_zone_destroy(xfs_btree_cur_zone);
2047 	kmem_zone_destroy(xfs_bmap_free_item_zone);
2048 	kmem_zone_destroy(xfs_log_ticket_zone);
2049 	bioset_exit(&xfs_ioend_bioset);
2050 }
2051 
2052 STATIC int __init
xfs_init_workqueues(void)2053 xfs_init_workqueues(void)
2054 {
2055 	/*
2056 	 * The allocation workqueue can be used in memory reclaim situations
2057 	 * (writepage path), and parallelism is only limited by the number of
2058 	 * AGs in all the filesystems mounted. Hence use the default large
2059 	 * max_active value for this workqueue.
2060 	 */
2061 	xfs_alloc_wq = alloc_workqueue("xfsalloc",
2062 			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0);
2063 	if (!xfs_alloc_wq)
2064 		return -ENOMEM;
2065 
2066 	xfs_discard_wq = alloc_workqueue("xfsdiscard", WQ_UNBOUND, 0);
2067 	if (!xfs_discard_wq)
2068 		goto out_free_alloc_wq;
2069 
2070 	return 0;
2071 out_free_alloc_wq:
2072 	destroy_workqueue(xfs_alloc_wq);
2073 	return -ENOMEM;
2074 }
2075 
2076 STATIC void
xfs_destroy_workqueues(void)2077 xfs_destroy_workqueues(void)
2078 {
2079 	destroy_workqueue(xfs_discard_wq);
2080 	destroy_workqueue(xfs_alloc_wq);
2081 }
2082 
2083 STATIC int __init
init_xfs_fs(void)2084 init_xfs_fs(void)
2085 {
2086 	int			error;
2087 
2088 	xfs_check_ondisk_structs();
2089 
2090 	printk(KERN_INFO XFS_VERSION_STRING " with "
2091 			 XFS_BUILD_OPTIONS " enabled\n");
2092 
2093 	xfs_dir_startup();
2094 
2095 	error = xfs_init_zones();
2096 	if (error)
2097 		goto out;
2098 
2099 	error = xfs_init_workqueues();
2100 	if (error)
2101 		goto out_destroy_zones;
2102 
2103 	error = xfs_mru_cache_init();
2104 	if (error)
2105 		goto out_destroy_wq;
2106 
2107 	error = xfs_buf_init();
2108 	if (error)
2109 		goto out_mru_cache_uninit;
2110 
2111 	error = xfs_init_procfs();
2112 	if (error)
2113 		goto out_buf_terminate;
2114 
2115 	error = xfs_sysctl_register();
2116 	if (error)
2117 		goto out_cleanup_procfs;
2118 
2119 	xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2120 	if (!xfs_kset) {
2121 		error = -ENOMEM;
2122 		goto out_sysctl_unregister;
2123 	}
2124 
2125 	xfsstats.xs_kobj.kobject.kset = xfs_kset;
2126 
2127 	xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2128 	if (!xfsstats.xs_stats) {
2129 		error = -ENOMEM;
2130 		goto out_kset_unregister;
2131 	}
2132 
2133 	error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
2134 			       "stats");
2135 	if (error)
2136 		goto out_free_stats;
2137 
2138 #ifdef DEBUG
2139 	xfs_dbg_kobj.kobject.kset = xfs_kset;
2140 	error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
2141 	if (error)
2142 		goto out_remove_stats_kobj;
2143 #endif
2144 
2145 	error = xfs_qm_init();
2146 	if (error)
2147 		goto out_remove_dbg_kobj;
2148 
2149 	error = register_filesystem(&xfs_fs_type);
2150 	if (error)
2151 		goto out_qm_exit;
2152 	return 0;
2153 
2154  out_qm_exit:
2155 	xfs_qm_exit();
2156  out_remove_dbg_kobj:
2157 #ifdef DEBUG
2158 	xfs_sysfs_del(&xfs_dbg_kobj);
2159  out_remove_stats_kobj:
2160 #endif
2161 	xfs_sysfs_del(&xfsstats.xs_kobj);
2162  out_free_stats:
2163 	free_percpu(xfsstats.xs_stats);
2164  out_kset_unregister:
2165 	kset_unregister(xfs_kset);
2166  out_sysctl_unregister:
2167 	xfs_sysctl_unregister();
2168  out_cleanup_procfs:
2169 	xfs_cleanup_procfs();
2170  out_buf_terminate:
2171 	xfs_buf_terminate();
2172  out_mru_cache_uninit:
2173 	xfs_mru_cache_uninit();
2174  out_destroy_wq:
2175 	xfs_destroy_workqueues();
2176  out_destroy_zones:
2177 	xfs_destroy_zones();
2178  out:
2179 	return error;
2180 }
2181 
2182 STATIC void __exit
exit_xfs_fs(void)2183 exit_xfs_fs(void)
2184 {
2185 	xfs_qm_exit();
2186 	unregister_filesystem(&xfs_fs_type);
2187 #ifdef DEBUG
2188 	xfs_sysfs_del(&xfs_dbg_kobj);
2189 #endif
2190 	xfs_sysfs_del(&xfsstats.xs_kobj);
2191 	free_percpu(xfsstats.xs_stats);
2192 	kset_unregister(xfs_kset);
2193 	xfs_sysctl_unregister();
2194 	xfs_cleanup_procfs();
2195 	xfs_buf_terminate();
2196 	xfs_mru_cache_uninit();
2197 	xfs_destroy_workqueues();
2198 	xfs_destroy_zones();
2199 	xfs_uuid_table_free();
2200 }
2201 
2202 module_init(init_xfs_fs);
2203 module_exit(exit_xfs_fs);
2204 
2205 MODULE_AUTHOR("Silicon Graphics, Inc.");
2206 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2207 MODULE_LICENSE("GPL");
2208