• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 
19 #include "xfs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_sb.h"
25 #include "xfs_mount.h"
26 #include "xfs_da_format.h"
27 #include "xfs_inode.h"
28 #include "xfs_btree.h"
29 #include "xfs_bmap.h"
30 #include "xfs_alloc.h"
31 #include "xfs_error.h"
32 #include "xfs_fsops.h"
33 #include "xfs_trans.h"
34 #include "xfs_buf_item.h"
35 #include "xfs_log.h"
36 #include "xfs_log_priv.h"
37 #include "xfs_da_btree.h"
38 #include "xfs_dir2.h"
39 #include "xfs_extfree_item.h"
40 #include "xfs_mru_cache.h"
41 #include "xfs_inode_item.h"
42 #include "xfs_icache.h"
43 #include "xfs_trace.h"
44 #include "xfs_icreate_item.h"
45 #include "xfs_filestream.h"
46 #include "xfs_quota.h"
47 #include "xfs_sysfs.h"
48 
49 #include <linux/namei.h>
50 #include <linux/init.h>
51 #include <linux/slab.h>
52 #include <linux/mount.h>
53 #include <linux/mempool.h>
54 #include <linux/writeback.h>
55 #include <linux/kthread.h>
56 #include <linux/freezer.h>
57 #include <linux/parser.h>
58 
59 static const struct super_operations xfs_super_operations;
60 static kmem_zone_t *xfs_ioend_zone;
61 mempool_t *xfs_ioend_pool;
62 
63 static struct kset *xfs_kset;		/* top-level xfs sysfs dir */
64 #ifdef DEBUG
65 static struct xfs_kobj xfs_dbg_kobj;	/* global debug sysfs attrs */
66 #endif
67 
68 #define MNTOPT_LOGBUFS	"logbufs"	/* number of XFS log buffers */
69 #define MNTOPT_LOGBSIZE	"logbsize"	/* size of XFS log buffers */
70 #define MNTOPT_LOGDEV	"logdev"	/* log device */
71 #define MNTOPT_RTDEV	"rtdev"		/* realtime I/O device */
72 #define MNTOPT_BIOSIZE	"biosize"	/* log2 of preferred buffered io size */
73 #define MNTOPT_WSYNC	"wsync"		/* safe-mode nfs compatible mount */
74 #define MNTOPT_NOALIGN	"noalign"	/* turn off stripe alignment */
75 #define MNTOPT_SWALLOC	"swalloc"	/* turn on stripe width allocation */
76 #define MNTOPT_SUNIT	"sunit"		/* data volume stripe unit */
77 #define MNTOPT_SWIDTH	"swidth"	/* data volume stripe width */
78 #define MNTOPT_NOUUID	"nouuid"	/* ignore filesystem UUID */
79 #define MNTOPT_MTPT	"mtpt"		/* filesystem mount point */
80 #define MNTOPT_GRPID	"grpid"		/* group-ID from parent directory */
81 #define MNTOPT_NOGRPID	"nogrpid"	/* group-ID from current process */
82 #define MNTOPT_BSDGROUPS    "bsdgroups"    /* group-ID from parent directory */
83 #define MNTOPT_SYSVGROUPS   "sysvgroups"   /* group-ID from current process */
84 #define MNTOPT_ALLOCSIZE    "allocsize"    /* preferred allocation size */
85 #define MNTOPT_NORECOVERY   "norecovery"   /* don't run XFS recovery */
86 #define MNTOPT_BARRIER	"barrier"	/* use writer barriers for log write and
87 					 * unwritten extent conversion */
88 #define MNTOPT_NOBARRIER "nobarrier"	/* .. disable */
89 #define MNTOPT_64BITINODE   "inode64"	/* inodes can be allocated anywhere */
90 #define MNTOPT_32BITINODE   "inode32"	/* inode allocation limited to
91 					 * XFS_MAXINUMBER_32 */
92 #define MNTOPT_IKEEP	"ikeep"		/* do not free empty inode clusters */
93 #define MNTOPT_NOIKEEP	"noikeep"	/* free empty inode clusters */
94 #define MNTOPT_LARGEIO	   "largeio"	/* report large I/O sizes in stat() */
95 #define MNTOPT_NOLARGEIO   "nolargeio"	/* do not report large I/O sizes
96 					 * in stat(). */
97 #define MNTOPT_ATTR2	"attr2"		/* do use attr2 attribute format */
98 #define MNTOPT_NOATTR2	"noattr2"	/* do not use attr2 attribute format */
99 #define MNTOPT_FILESTREAM  "filestreams" /* use filestreams allocator */
100 #define MNTOPT_QUOTA	"quota"		/* disk quotas (user) */
101 #define MNTOPT_NOQUOTA	"noquota"	/* no quotas */
102 #define MNTOPT_USRQUOTA	"usrquota"	/* user quota enabled */
103 #define MNTOPT_GRPQUOTA	"grpquota"	/* group quota enabled */
104 #define MNTOPT_PRJQUOTA	"prjquota"	/* project quota enabled */
105 #define MNTOPT_UQUOTA	"uquota"	/* user quota (IRIX variant) */
106 #define MNTOPT_GQUOTA	"gquota"	/* group quota (IRIX variant) */
107 #define MNTOPT_PQUOTA	"pquota"	/* project quota (IRIX variant) */
108 #define MNTOPT_UQUOTANOENF "uqnoenforce"/* user quota limit enforcement */
109 #define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */
110 #define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */
111 #define MNTOPT_QUOTANOENF  "qnoenforce"	/* same as uqnoenforce */
112 #define MNTOPT_DISCARD	   "discard"	/* Discard unused blocks */
113 #define MNTOPT_NODISCARD   "nodiscard"	/* Do not discard unused blocks */
114 
115 #define MNTOPT_DAX	"dax"		/* Enable direct access to bdev pages */
116 
117 /*
118  * Table driven mount option parser.
119  *
120  * Currently only used for remount, but it will be used for mount
121  * in the future, too.
122  */
123 enum {
124 	Opt_barrier,
125 	Opt_nobarrier,
126 	Opt_inode64,
127 	Opt_inode32,
128 	Opt_err
129 };
130 
131 static const match_table_t tokens = {
132 	{Opt_barrier, "barrier"},
133 	{Opt_nobarrier, "nobarrier"},
134 	{Opt_inode64, "inode64"},
135 	{Opt_inode32, "inode32"},
136 	{Opt_err, NULL}
137 };
138 
139 
140 STATIC unsigned long
suffix_kstrtoint(char * s,unsigned int base,int * res)141 suffix_kstrtoint(char *s, unsigned int base, int *res)
142 {
143 	int	last, shift_left_factor = 0, _res;
144 	char	*value = s;
145 
146 	last = strlen(value) - 1;
147 	if (value[last] == 'K' || value[last] == 'k') {
148 		shift_left_factor = 10;
149 		value[last] = '\0';
150 	}
151 	if (value[last] == 'M' || value[last] == 'm') {
152 		shift_left_factor = 20;
153 		value[last] = '\0';
154 	}
155 	if (value[last] == 'G' || value[last] == 'g') {
156 		shift_left_factor = 30;
157 		value[last] = '\0';
158 	}
159 
160 	if (kstrtoint(s, base, &_res))
161 		return -EINVAL;
162 	*res = _res << shift_left_factor;
163 	return 0;
164 }
165 
166 /*
167  * This function fills in xfs_mount_t fields based on mount args.
168  * Note: the superblock has _not_ yet been read in.
169  *
170  * Note that this function leaks the various device name allocations on
171  * failure.  The caller takes care of them.
172  */
173 STATIC int
xfs_parseargs(struct xfs_mount * mp,char * options)174 xfs_parseargs(
175 	struct xfs_mount	*mp,
176 	char			*options)
177 {
178 	struct super_block	*sb = mp->m_super;
179 	char			*this_char, *value;
180 	int			dsunit = 0;
181 	int			dswidth = 0;
182 	int			iosize = 0;
183 	__uint8_t		iosizelog = 0;
184 
185 	/*
186 	 * set up the mount name first so all the errors will refer to the
187 	 * correct device.
188 	 */
189 	mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL);
190 	if (!mp->m_fsname)
191 		return -ENOMEM;
192 	mp->m_fsname_len = strlen(mp->m_fsname) + 1;
193 
194 	/*
195 	 * Copy binary VFS mount flags we are interested in.
196 	 */
197 	if (sb->s_flags & MS_RDONLY)
198 		mp->m_flags |= XFS_MOUNT_RDONLY;
199 	if (sb->s_flags & MS_DIRSYNC)
200 		mp->m_flags |= XFS_MOUNT_DIRSYNC;
201 	if (sb->s_flags & MS_SYNCHRONOUS)
202 		mp->m_flags |= XFS_MOUNT_WSYNC;
203 
204 	/*
205 	 * Set some default flags that could be cleared by the mount option
206 	 * parsing.
207 	 */
208 	mp->m_flags |= XFS_MOUNT_BARRIER;
209 	mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
210 
211 	/*
212 	 * These can be overridden by the mount option parsing.
213 	 */
214 	mp->m_logbufs = -1;
215 	mp->m_logbsize = -1;
216 
217 	if (!options)
218 		goto done;
219 
220 	while ((this_char = strsep(&options, ",")) != NULL) {
221 		if (!*this_char)
222 			continue;
223 		if ((value = strchr(this_char, '=')) != NULL)
224 			*value++ = 0;
225 
226 		if (!strcmp(this_char, MNTOPT_LOGBUFS)) {
227 			if (!value || !*value) {
228 				xfs_warn(mp, "%s option requires an argument",
229 					this_char);
230 				return -EINVAL;
231 			}
232 			if (kstrtoint(value, 10, &mp->m_logbufs))
233 				return -EINVAL;
234 		} else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) {
235 			if (!value || !*value) {
236 				xfs_warn(mp, "%s option requires an argument",
237 					this_char);
238 				return -EINVAL;
239 			}
240 			if (suffix_kstrtoint(value, 10, &mp->m_logbsize))
241 				return -EINVAL;
242 		} else if (!strcmp(this_char, MNTOPT_LOGDEV)) {
243 			if (!value || !*value) {
244 				xfs_warn(mp, "%s option requires an argument",
245 					this_char);
246 				return -EINVAL;
247 			}
248 			mp->m_logname = kstrndup(value, MAXNAMELEN, GFP_KERNEL);
249 			if (!mp->m_logname)
250 				return -ENOMEM;
251 		} else if (!strcmp(this_char, MNTOPT_MTPT)) {
252 			xfs_warn(mp, "%s option not allowed on this system",
253 				this_char);
254 			return -EINVAL;
255 		} else if (!strcmp(this_char, MNTOPT_RTDEV)) {
256 			if (!value || !*value) {
257 				xfs_warn(mp, "%s option requires an argument",
258 					this_char);
259 				return -EINVAL;
260 			}
261 			mp->m_rtname = kstrndup(value, MAXNAMELEN, GFP_KERNEL);
262 			if (!mp->m_rtname)
263 				return -ENOMEM;
264 		} else if (!strcmp(this_char, MNTOPT_ALLOCSIZE) ||
265 			   !strcmp(this_char, MNTOPT_BIOSIZE)) {
266 			if (!value || !*value) {
267 				xfs_warn(mp, "%s option requires an argument",
268 					this_char);
269 				return -EINVAL;
270 			}
271 			if (suffix_kstrtoint(value, 10, &iosize))
272 				return -EINVAL;
273 			iosizelog = ffs(iosize) - 1;
274 		} else if (!strcmp(this_char, MNTOPT_GRPID) ||
275 			   !strcmp(this_char, MNTOPT_BSDGROUPS)) {
276 			mp->m_flags |= XFS_MOUNT_GRPID;
277 		} else if (!strcmp(this_char, MNTOPT_NOGRPID) ||
278 			   !strcmp(this_char, MNTOPT_SYSVGROUPS)) {
279 			mp->m_flags &= ~XFS_MOUNT_GRPID;
280 		} else if (!strcmp(this_char, MNTOPT_WSYNC)) {
281 			mp->m_flags |= XFS_MOUNT_WSYNC;
282 		} else if (!strcmp(this_char, MNTOPT_NORECOVERY)) {
283 			mp->m_flags |= XFS_MOUNT_NORECOVERY;
284 		} else if (!strcmp(this_char, MNTOPT_NOALIGN)) {
285 			mp->m_flags |= XFS_MOUNT_NOALIGN;
286 		} else if (!strcmp(this_char, MNTOPT_SWALLOC)) {
287 			mp->m_flags |= XFS_MOUNT_SWALLOC;
288 		} else if (!strcmp(this_char, MNTOPT_SUNIT)) {
289 			if (!value || !*value) {
290 				xfs_warn(mp, "%s option requires an argument",
291 					this_char);
292 				return -EINVAL;
293 			}
294 			if (kstrtoint(value, 10, &dsunit))
295 				return -EINVAL;
296 		} else if (!strcmp(this_char, MNTOPT_SWIDTH)) {
297 			if (!value || !*value) {
298 				xfs_warn(mp, "%s option requires an argument",
299 					this_char);
300 				return -EINVAL;
301 			}
302 			if (kstrtoint(value, 10, &dswidth))
303 				return -EINVAL;
304 		} else if (!strcmp(this_char, MNTOPT_32BITINODE)) {
305 			mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
306 		} else if (!strcmp(this_char, MNTOPT_64BITINODE)) {
307 			mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
308 		} else if (!strcmp(this_char, MNTOPT_NOUUID)) {
309 			mp->m_flags |= XFS_MOUNT_NOUUID;
310 		} else if (!strcmp(this_char, MNTOPT_BARRIER)) {
311 			mp->m_flags |= XFS_MOUNT_BARRIER;
312 		} else if (!strcmp(this_char, MNTOPT_NOBARRIER)) {
313 			mp->m_flags &= ~XFS_MOUNT_BARRIER;
314 		} else if (!strcmp(this_char, MNTOPT_IKEEP)) {
315 			mp->m_flags |= XFS_MOUNT_IKEEP;
316 		} else if (!strcmp(this_char, MNTOPT_NOIKEEP)) {
317 			mp->m_flags &= ~XFS_MOUNT_IKEEP;
318 		} else if (!strcmp(this_char, MNTOPT_LARGEIO)) {
319 			mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE;
320 		} else if (!strcmp(this_char, MNTOPT_NOLARGEIO)) {
321 			mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
322 		} else if (!strcmp(this_char, MNTOPT_ATTR2)) {
323 			mp->m_flags |= XFS_MOUNT_ATTR2;
324 		} else if (!strcmp(this_char, MNTOPT_NOATTR2)) {
325 			mp->m_flags &= ~XFS_MOUNT_ATTR2;
326 			mp->m_flags |= XFS_MOUNT_NOATTR2;
327 		} else if (!strcmp(this_char, MNTOPT_FILESTREAM)) {
328 			mp->m_flags |= XFS_MOUNT_FILESTREAMS;
329 		} else if (!strcmp(this_char, MNTOPT_NOQUOTA)) {
330 			mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
331 			mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
332 			mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE;
333 		} else if (!strcmp(this_char, MNTOPT_QUOTA) ||
334 			   !strcmp(this_char, MNTOPT_UQUOTA) ||
335 			   !strcmp(this_char, MNTOPT_USRQUOTA)) {
336 			mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
337 					 XFS_UQUOTA_ENFD);
338 		} else if (!strcmp(this_char, MNTOPT_QUOTANOENF) ||
339 			   !strcmp(this_char, MNTOPT_UQUOTANOENF)) {
340 			mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
341 			mp->m_qflags &= ~XFS_UQUOTA_ENFD;
342 		} else if (!strcmp(this_char, MNTOPT_PQUOTA) ||
343 			   !strcmp(this_char, MNTOPT_PRJQUOTA)) {
344 			mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
345 					 XFS_PQUOTA_ENFD);
346 		} else if (!strcmp(this_char, MNTOPT_PQUOTANOENF)) {
347 			mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
348 			mp->m_qflags &= ~XFS_PQUOTA_ENFD;
349 		} else if (!strcmp(this_char, MNTOPT_GQUOTA) ||
350 			   !strcmp(this_char, MNTOPT_GRPQUOTA)) {
351 			mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
352 					 XFS_GQUOTA_ENFD);
353 		} else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) {
354 			mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
355 			mp->m_qflags &= ~XFS_GQUOTA_ENFD;
356 		} else if (!strcmp(this_char, MNTOPT_DISCARD)) {
357 			mp->m_flags |= XFS_MOUNT_DISCARD;
358 		} else if (!strcmp(this_char, MNTOPT_NODISCARD)) {
359 			mp->m_flags &= ~XFS_MOUNT_DISCARD;
360 #ifdef CONFIG_FS_DAX
361 		} else if (!strcmp(this_char, MNTOPT_DAX)) {
362 			mp->m_flags |= XFS_MOUNT_DAX;
363 #endif
364 		} else {
365 			xfs_warn(mp, "unknown mount option [%s].", this_char);
366 			return -EINVAL;
367 		}
368 	}
369 
370 	/*
371 	 * no recovery flag requires a read-only mount
372 	 */
373 	if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
374 	    !(mp->m_flags & XFS_MOUNT_RDONLY)) {
375 		xfs_warn(mp, "no-recovery mounts must be read-only.");
376 		return -EINVAL;
377 	}
378 
379 	if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) {
380 		xfs_warn(mp,
381 	"sunit and swidth options incompatible with the noalign option");
382 		return -EINVAL;
383 	}
384 
385 #ifndef CONFIG_XFS_QUOTA
386 	if (XFS_IS_QUOTA_RUNNING(mp)) {
387 		xfs_warn(mp, "quota support not available in this kernel.");
388 		return -EINVAL;
389 	}
390 #endif
391 
392 	if ((dsunit && !dswidth) || (!dsunit && dswidth)) {
393 		xfs_warn(mp, "sunit and swidth must be specified together");
394 		return -EINVAL;
395 	}
396 
397 	if (dsunit && (dswidth % dsunit != 0)) {
398 		xfs_warn(mp,
399 	"stripe width (%d) must be a multiple of the stripe unit (%d)",
400 			dswidth, dsunit);
401 		return -EINVAL;
402 	}
403 
404 done:
405 	if (dsunit && !(mp->m_flags & XFS_MOUNT_NOALIGN)) {
406 		/*
407 		 * At this point the superblock has not been read
408 		 * in, therefore we do not know the block size.
409 		 * Before the mount call ends we will convert
410 		 * these to FSBs.
411 		 */
412 		mp->m_dalign = dsunit;
413 		mp->m_swidth = dswidth;
414 	}
415 
416 	if (mp->m_logbufs != -1 &&
417 	    mp->m_logbufs != 0 &&
418 	    (mp->m_logbufs < XLOG_MIN_ICLOGS ||
419 	     mp->m_logbufs > XLOG_MAX_ICLOGS)) {
420 		xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
421 			mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
422 		return -EINVAL;
423 	}
424 	if (mp->m_logbsize != -1 &&
425 	    mp->m_logbsize !=  0 &&
426 	    (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
427 	     mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
428 	     !is_power_of_2(mp->m_logbsize))) {
429 		xfs_warn(mp,
430 			"invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
431 			mp->m_logbsize);
432 		return -EINVAL;
433 	}
434 
435 	if (iosizelog) {
436 		if (iosizelog > XFS_MAX_IO_LOG ||
437 		    iosizelog < XFS_MIN_IO_LOG) {
438 			xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
439 				iosizelog, XFS_MIN_IO_LOG,
440 				XFS_MAX_IO_LOG);
441 			return -EINVAL;
442 		}
443 
444 		mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE;
445 		mp->m_readio_log = iosizelog;
446 		mp->m_writeio_log = iosizelog;
447 	}
448 
449 	return 0;
450 }
451 
452 struct proc_xfs_info {
453 	uint64_t	flag;
454 	char		*str;
455 };
456 
457 STATIC int
xfs_showargs(struct xfs_mount * mp,struct seq_file * m)458 xfs_showargs(
459 	struct xfs_mount	*mp,
460 	struct seq_file		*m)
461 {
462 	static struct proc_xfs_info xfs_info_set[] = {
463 		/* the few simple ones we can get from the mount struct */
464 		{ XFS_MOUNT_IKEEP,		"," MNTOPT_IKEEP },
465 		{ XFS_MOUNT_WSYNC,		"," MNTOPT_WSYNC },
466 		{ XFS_MOUNT_NOALIGN,		"," MNTOPT_NOALIGN },
467 		{ XFS_MOUNT_SWALLOC,		"," MNTOPT_SWALLOC },
468 		{ XFS_MOUNT_NOUUID,		"," MNTOPT_NOUUID },
469 		{ XFS_MOUNT_NORECOVERY,		"," MNTOPT_NORECOVERY },
470 		{ XFS_MOUNT_ATTR2,		"," MNTOPT_ATTR2 },
471 		{ XFS_MOUNT_FILESTREAMS,	"," MNTOPT_FILESTREAM },
472 		{ XFS_MOUNT_GRPID,		"," MNTOPT_GRPID },
473 		{ XFS_MOUNT_DISCARD,		"," MNTOPT_DISCARD },
474 		{ XFS_MOUNT_SMALL_INUMS,	"," MNTOPT_32BITINODE },
475 		{ XFS_MOUNT_DAX,		"," MNTOPT_DAX },
476 		{ 0, NULL }
477 	};
478 	static struct proc_xfs_info xfs_info_unset[] = {
479 		/* the few simple ones we can get from the mount struct */
480 		{ XFS_MOUNT_COMPAT_IOSIZE,	"," MNTOPT_LARGEIO },
481 		{ XFS_MOUNT_BARRIER,		"," MNTOPT_NOBARRIER },
482 		{ XFS_MOUNT_SMALL_INUMS,	"," MNTOPT_64BITINODE },
483 		{ 0, NULL }
484 	};
485 	struct proc_xfs_info	*xfs_infop;
486 
487 	for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
488 		if (mp->m_flags & xfs_infop->flag)
489 			seq_puts(m, xfs_infop->str);
490 	}
491 	for (xfs_infop = xfs_info_unset; xfs_infop->flag; xfs_infop++) {
492 		if (!(mp->m_flags & xfs_infop->flag))
493 			seq_puts(m, xfs_infop->str);
494 	}
495 
496 	if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)
497 		seq_printf(m, "," MNTOPT_ALLOCSIZE "=%dk",
498 				(int)(1 << mp->m_writeio_log) >> 10);
499 
500 	if (mp->m_logbufs > 0)
501 		seq_printf(m, "," MNTOPT_LOGBUFS "=%d", mp->m_logbufs);
502 	if (mp->m_logbsize > 0)
503 		seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10);
504 
505 	if (mp->m_logname)
506 		seq_show_option(m, MNTOPT_LOGDEV, mp->m_logname);
507 	if (mp->m_rtname)
508 		seq_show_option(m, MNTOPT_RTDEV, mp->m_rtname);
509 
510 	if (mp->m_dalign > 0)
511 		seq_printf(m, "," MNTOPT_SUNIT "=%d",
512 				(int)XFS_FSB_TO_BB(mp, mp->m_dalign));
513 	if (mp->m_swidth > 0)
514 		seq_printf(m, "," MNTOPT_SWIDTH "=%d",
515 				(int)XFS_FSB_TO_BB(mp, mp->m_swidth));
516 
517 	if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD))
518 		seq_puts(m, "," MNTOPT_USRQUOTA);
519 	else if (mp->m_qflags & XFS_UQUOTA_ACCT)
520 		seq_puts(m, "," MNTOPT_UQUOTANOENF);
521 
522 	if (mp->m_qflags & XFS_PQUOTA_ACCT) {
523 		if (mp->m_qflags & XFS_PQUOTA_ENFD)
524 			seq_puts(m, "," MNTOPT_PRJQUOTA);
525 		else
526 			seq_puts(m, "," MNTOPT_PQUOTANOENF);
527 	}
528 	if (mp->m_qflags & XFS_GQUOTA_ACCT) {
529 		if (mp->m_qflags & XFS_GQUOTA_ENFD)
530 			seq_puts(m, "," MNTOPT_GRPQUOTA);
531 		else
532 			seq_puts(m, "," MNTOPT_GQUOTANOENF);
533 	}
534 
535 	if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
536 		seq_puts(m, "," MNTOPT_NOQUOTA);
537 
538 	return 0;
539 }
540 __uint64_t
xfs_max_file_offset(unsigned int blockshift)541 xfs_max_file_offset(
542 	unsigned int		blockshift)
543 {
544 	unsigned int		pagefactor = 1;
545 	unsigned int		bitshift = BITS_PER_LONG - 1;
546 
547 	/* Figure out maximum filesize, on Linux this can depend on
548 	 * the filesystem blocksize (on 32 bit platforms).
549 	 * __block_write_begin does this in an [unsigned] long...
550 	 *      page->index << (PAGE_CACHE_SHIFT - bbits)
551 	 * So, for page sized blocks (4K on 32 bit platforms),
552 	 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
553 	 *      (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
554 	 * but for smaller blocksizes it is less (bbits = log2 bsize).
555 	 * Note1: get_block_t takes a long (implicit cast from above)
556 	 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
557 	 * can optionally convert the [unsigned] long from above into
558 	 * an [unsigned] long long.
559 	 */
560 
561 #if BITS_PER_LONG == 32
562 # if defined(CONFIG_LBDAF)
563 	ASSERT(sizeof(sector_t) == 8);
564 	pagefactor = PAGE_CACHE_SIZE;
565 	bitshift = BITS_PER_LONG;
566 # else
567 	pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift);
568 # endif
569 #endif
570 
571 	return (((__uint64_t)pagefactor) << bitshift) - 1;
572 }
573 
574 /*
575  * xfs_set_inode32() and xfs_set_inode64() are passed an agcount
576  * because in the growfs case, mp->m_sb.sb_agcount is not updated
577  * yet to the potentially higher ag count.
578  */
579 xfs_agnumber_t
xfs_set_inode32(struct xfs_mount * mp,xfs_agnumber_t agcount)580 xfs_set_inode32(struct xfs_mount *mp, xfs_agnumber_t agcount)
581 {
582 	xfs_agnumber_t	index = 0;
583 	xfs_agnumber_t	maxagi = 0;
584 	xfs_sb_t	*sbp = &mp->m_sb;
585 	xfs_agnumber_t	max_metadata;
586 	xfs_agino_t	agino;
587 	xfs_ino_t	ino;
588 	xfs_perag_t	*pag;
589 
590 	/* Calculate how much should be reserved for inodes to meet
591 	 * the max inode percentage.
592 	 */
593 	if (mp->m_maxicount) {
594 		__uint64_t	icount;
595 
596 		icount = sbp->sb_dblocks * sbp->sb_imax_pct;
597 		do_div(icount, 100);
598 		icount += sbp->sb_agblocks - 1;
599 		do_div(icount, sbp->sb_agblocks);
600 		max_metadata = icount;
601 	} else {
602 		max_metadata = agcount;
603 	}
604 
605 	agino =	XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0);
606 
607 	for (index = 0; index < agcount; index++) {
608 		ino = XFS_AGINO_TO_INO(mp, index, agino);
609 
610 		if (ino > XFS_MAXINUMBER_32) {
611 			pag = xfs_perag_get(mp, index);
612 			pag->pagi_inodeok = 0;
613 			pag->pagf_metadata = 0;
614 			xfs_perag_put(pag);
615 			continue;
616 		}
617 
618 		pag = xfs_perag_get(mp, index);
619 		pag->pagi_inodeok = 1;
620 		maxagi++;
621 		if (index < max_metadata)
622 			pag->pagf_metadata = 1;
623 		xfs_perag_put(pag);
624 	}
625 	mp->m_flags |= (XFS_MOUNT_32BITINODES |
626 			XFS_MOUNT_SMALL_INUMS);
627 
628 	return maxagi;
629 }
630 
631 xfs_agnumber_t
xfs_set_inode64(struct xfs_mount * mp,xfs_agnumber_t agcount)632 xfs_set_inode64(struct xfs_mount *mp, xfs_agnumber_t agcount)
633 {
634 	xfs_agnumber_t index = 0;
635 
636 	for (index = 0; index < agcount; index++) {
637 		struct xfs_perag	*pag;
638 
639 		pag = xfs_perag_get(mp, index);
640 		pag->pagi_inodeok = 1;
641 		pag->pagf_metadata = 0;
642 		xfs_perag_put(pag);
643 	}
644 
645 	/* There is no need for lock protection on m_flags,
646 	 * the rw_semaphore of the VFS superblock is locked
647 	 * during mount/umount/remount operations, so this is
648 	 * enough to avoid concurency on the m_flags field
649 	 */
650 	mp->m_flags &= ~(XFS_MOUNT_32BITINODES |
651 			 XFS_MOUNT_SMALL_INUMS);
652 	return index;
653 }
654 
655 STATIC int
xfs_blkdev_get(xfs_mount_t * mp,const char * name,struct block_device ** bdevp)656 xfs_blkdev_get(
657 	xfs_mount_t		*mp,
658 	const char		*name,
659 	struct block_device	**bdevp)
660 {
661 	int			error = 0;
662 
663 	*bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
664 				    mp);
665 	if (IS_ERR(*bdevp)) {
666 		error = PTR_ERR(*bdevp);
667 		xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
668 	}
669 
670 	return error;
671 }
672 
673 STATIC void
xfs_blkdev_put(struct block_device * bdev)674 xfs_blkdev_put(
675 	struct block_device	*bdev)
676 {
677 	if (bdev)
678 		blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
679 }
680 
681 void
xfs_blkdev_issue_flush(xfs_buftarg_t * buftarg)682 xfs_blkdev_issue_flush(
683 	xfs_buftarg_t		*buftarg)
684 {
685 	blkdev_issue_flush(buftarg->bt_bdev, GFP_NOFS, NULL);
686 }
687 
688 STATIC void
xfs_close_devices(struct xfs_mount * mp)689 xfs_close_devices(
690 	struct xfs_mount	*mp)
691 {
692 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
693 		struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
694 		xfs_free_buftarg(mp, mp->m_logdev_targp);
695 		xfs_blkdev_put(logdev);
696 	}
697 	if (mp->m_rtdev_targp) {
698 		struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
699 		xfs_free_buftarg(mp, mp->m_rtdev_targp);
700 		xfs_blkdev_put(rtdev);
701 	}
702 	xfs_free_buftarg(mp, mp->m_ddev_targp);
703 }
704 
705 /*
706  * The file system configurations are:
707  *	(1) device (partition) with data and internal log
708  *	(2) logical volume with data and log subvolumes.
709  *	(3) logical volume with data, log, and realtime subvolumes.
710  *
711  * We only have to handle opening the log and realtime volumes here if
712  * they are present.  The data subvolume has already been opened by
713  * get_sb_bdev() and is stored in sb->s_bdev.
714  */
715 STATIC int
xfs_open_devices(struct xfs_mount * mp)716 xfs_open_devices(
717 	struct xfs_mount	*mp)
718 {
719 	struct block_device	*ddev = mp->m_super->s_bdev;
720 	struct block_device	*logdev = NULL, *rtdev = NULL;
721 	int			error;
722 
723 	/*
724 	 * Open real time and log devices - order is important.
725 	 */
726 	if (mp->m_logname) {
727 		error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
728 		if (error)
729 			goto out;
730 	}
731 
732 	if (mp->m_rtname) {
733 		error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
734 		if (error)
735 			goto out_close_logdev;
736 
737 		if (rtdev == ddev || rtdev == logdev) {
738 			xfs_warn(mp,
739 	"Cannot mount filesystem with identical rtdev and ddev/logdev.");
740 			error = -EINVAL;
741 			goto out_close_rtdev;
742 		}
743 	}
744 
745 	/*
746 	 * Setup xfs_mount buffer target pointers
747 	 */
748 	error = -ENOMEM;
749 	mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev);
750 	if (!mp->m_ddev_targp)
751 		goto out_close_rtdev;
752 
753 	if (rtdev) {
754 		mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev);
755 		if (!mp->m_rtdev_targp)
756 			goto out_free_ddev_targ;
757 	}
758 
759 	if (logdev && logdev != ddev) {
760 		mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev);
761 		if (!mp->m_logdev_targp)
762 			goto out_free_rtdev_targ;
763 	} else {
764 		mp->m_logdev_targp = mp->m_ddev_targp;
765 	}
766 
767 	return 0;
768 
769  out_free_rtdev_targ:
770 	if (mp->m_rtdev_targp)
771 		xfs_free_buftarg(mp, mp->m_rtdev_targp);
772  out_free_ddev_targ:
773 	xfs_free_buftarg(mp, mp->m_ddev_targp);
774  out_close_rtdev:
775 	xfs_blkdev_put(rtdev);
776  out_close_logdev:
777 	if (logdev && logdev != ddev)
778 		xfs_blkdev_put(logdev);
779  out:
780 	return error;
781 }
782 
783 /*
784  * Setup xfs_mount buffer target pointers based on superblock
785  */
786 STATIC int
xfs_setup_devices(struct xfs_mount * mp)787 xfs_setup_devices(
788 	struct xfs_mount	*mp)
789 {
790 	int			error;
791 
792 	error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
793 	if (error)
794 		return error;
795 
796 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
797 		unsigned int	log_sector_size = BBSIZE;
798 
799 		if (xfs_sb_version_hassector(&mp->m_sb))
800 			log_sector_size = mp->m_sb.sb_logsectsize;
801 		error = xfs_setsize_buftarg(mp->m_logdev_targp,
802 					    log_sector_size);
803 		if (error)
804 			return error;
805 	}
806 	if (mp->m_rtdev_targp) {
807 		error = xfs_setsize_buftarg(mp->m_rtdev_targp,
808 					    mp->m_sb.sb_sectsize);
809 		if (error)
810 			return error;
811 	}
812 
813 	return 0;
814 }
815 
816 STATIC int
xfs_init_mount_workqueues(struct xfs_mount * mp)817 xfs_init_mount_workqueues(
818 	struct xfs_mount	*mp)
819 {
820 	mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
821 			WQ_MEM_RECLAIM|WQ_FREEZABLE, 1, mp->m_fsname);
822 	if (!mp->m_buf_workqueue)
823 		goto out;
824 
825 	mp->m_data_workqueue = alloc_workqueue("xfs-data/%s",
826 			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
827 	if (!mp->m_data_workqueue)
828 		goto out_destroy_buf;
829 
830 	mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
831 			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
832 	if (!mp->m_unwritten_workqueue)
833 		goto out_destroy_data_iodone_queue;
834 
835 	mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s",
836 			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
837 	if (!mp->m_cil_workqueue)
838 		goto out_destroy_unwritten;
839 
840 	mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
841 			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
842 	if (!mp->m_reclaim_workqueue)
843 		goto out_destroy_cil;
844 
845 	mp->m_log_workqueue = alloc_workqueue("xfs-log/%s",
846 			WQ_MEM_RECLAIM|WQ_FREEZABLE|WQ_HIGHPRI, 0,
847 			mp->m_fsname);
848 	if (!mp->m_log_workqueue)
849 		goto out_destroy_reclaim;
850 
851 	mp->m_eofblocks_workqueue = alloc_workqueue("xfs-eofblocks/%s",
852 			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
853 	if (!mp->m_eofblocks_workqueue)
854 		goto out_destroy_log;
855 
856 	return 0;
857 
858 out_destroy_log:
859 	destroy_workqueue(mp->m_log_workqueue);
860 out_destroy_reclaim:
861 	destroy_workqueue(mp->m_reclaim_workqueue);
862 out_destroy_cil:
863 	destroy_workqueue(mp->m_cil_workqueue);
864 out_destroy_unwritten:
865 	destroy_workqueue(mp->m_unwritten_workqueue);
866 out_destroy_data_iodone_queue:
867 	destroy_workqueue(mp->m_data_workqueue);
868 out_destroy_buf:
869 	destroy_workqueue(mp->m_buf_workqueue);
870 out:
871 	return -ENOMEM;
872 }
873 
874 STATIC void
xfs_destroy_mount_workqueues(struct xfs_mount * mp)875 xfs_destroy_mount_workqueues(
876 	struct xfs_mount	*mp)
877 {
878 	destroy_workqueue(mp->m_eofblocks_workqueue);
879 	destroy_workqueue(mp->m_log_workqueue);
880 	destroy_workqueue(mp->m_reclaim_workqueue);
881 	destroy_workqueue(mp->m_cil_workqueue);
882 	destroy_workqueue(mp->m_data_workqueue);
883 	destroy_workqueue(mp->m_unwritten_workqueue);
884 	destroy_workqueue(mp->m_buf_workqueue);
885 }
886 
887 /*
888  * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
889  * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
890  * for IO to complete so that we effectively throttle multiple callers to the
891  * rate at which IO is completing.
892  */
893 void
xfs_flush_inodes(struct xfs_mount * mp)894 xfs_flush_inodes(
895 	struct xfs_mount	*mp)
896 {
897 	struct super_block	*sb = mp->m_super;
898 
899 	if (down_read_trylock(&sb->s_umount)) {
900 		sync_inodes_sb(sb);
901 		up_read(&sb->s_umount);
902 	}
903 }
904 
905 /* Catch misguided souls that try to use this interface on XFS */
906 STATIC struct inode *
xfs_fs_alloc_inode(struct super_block * sb)907 xfs_fs_alloc_inode(
908 	struct super_block	*sb)
909 {
910 	BUG();
911 	return NULL;
912 }
913 
914 /*
915  * Now that the generic code is guaranteed not to be accessing
916  * the linux inode, we can reclaim the inode.
917  */
918 STATIC void
xfs_fs_destroy_inode(struct inode * inode)919 xfs_fs_destroy_inode(
920 	struct inode		*inode)
921 {
922 	struct xfs_inode	*ip = XFS_I(inode);
923 
924 	trace_xfs_destroy_inode(ip);
925 
926 	XFS_STATS_INC(ip->i_mount, vn_reclaim);
927 
928 	ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
929 
930 	/*
931 	 * We should never get here with one of the reclaim flags already set.
932 	 */
933 	ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
934 	ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
935 
936 	/*
937 	 * We always use background reclaim here because even if the
938 	 * inode is clean, it still may be under IO and hence we have
939 	 * to take the flush lock. The background reclaim path handles
940 	 * this more efficiently than we can here, so simply let background
941 	 * reclaim tear down all inodes.
942 	 */
943 	xfs_inode_set_reclaim_tag(ip);
944 }
945 
946 /*
947  * Slab object creation initialisation for the XFS inode.
948  * This covers only the idempotent fields in the XFS inode;
949  * all other fields need to be initialised on allocation
950  * from the slab. This avoids the need to repeatedly initialise
951  * fields in the xfs inode that left in the initialise state
952  * when freeing the inode.
953  */
954 STATIC void
xfs_fs_inode_init_once(void * inode)955 xfs_fs_inode_init_once(
956 	void			*inode)
957 {
958 	struct xfs_inode	*ip = inode;
959 
960 	memset(ip, 0, sizeof(struct xfs_inode));
961 
962 	/* vfs inode */
963 	inode_init_once(VFS_I(ip));
964 
965 	/* xfs inode */
966 	atomic_set(&ip->i_pincount, 0);
967 	spin_lock_init(&ip->i_flags_lock);
968 
969 	mrlock_init(&ip->i_mmaplock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
970 		     "xfsino", ip->i_ino);
971 	mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
972 		     "xfsino", ip->i_ino);
973 }
974 
975 STATIC void
xfs_fs_evict_inode(struct inode * inode)976 xfs_fs_evict_inode(
977 	struct inode		*inode)
978 {
979 	xfs_inode_t		*ip = XFS_I(inode);
980 
981 	ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
982 
983 	trace_xfs_evict_inode(ip);
984 
985 	truncate_inode_pages_final(&inode->i_data);
986 	clear_inode(inode);
987 	XFS_STATS_INC(ip->i_mount, vn_rele);
988 	XFS_STATS_INC(ip->i_mount, vn_remove);
989 
990 	xfs_inactive(ip);
991 }
992 
993 /*
994  * We do an unlocked check for XFS_IDONTCACHE here because we are already
995  * serialised against cache hits here via the inode->i_lock and igrab() in
996  * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
997  * racing with us, and it avoids needing to grab a spinlock here for every inode
998  * we drop the final reference on.
999  */
1000 STATIC int
xfs_fs_drop_inode(struct inode * inode)1001 xfs_fs_drop_inode(
1002 	struct inode		*inode)
1003 {
1004 	struct xfs_inode	*ip = XFS_I(inode);
1005 
1006 	return generic_drop_inode(inode) || (ip->i_flags & XFS_IDONTCACHE);
1007 }
1008 
1009 STATIC void
xfs_free_fsname(struct xfs_mount * mp)1010 xfs_free_fsname(
1011 	struct xfs_mount	*mp)
1012 {
1013 	kfree(mp->m_fsname);
1014 	kfree(mp->m_rtname);
1015 	kfree(mp->m_logname);
1016 }
1017 
1018 STATIC int
xfs_fs_sync_fs(struct super_block * sb,int wait)1019 xfs_fs_sync_fs(
1020 	struct super_block	*sb,
1021 	int			wait)
1022 {
1023 	struct xfs_mount	*mp = XFS_M(sb);
1024 
1025 	/*
1026 	 * Doing anything during the async pass would be counterproductive.
1027 	 */
1028 	if (!wait)
1029 		return 0;
1030 
1031 	xfs_log_force(mp, XFS_LOG_SYNC);
1032 	if (laptop_mode) {
1033 		/*
1034 		 * The disk must be active because we're syncing.
1035 		 * We schedule log work now (now that the disk is
1036 		 * active) instead of later (when it might not be).
1037 		 */
1038 		flush_delayed_work(&mp->m_log->l_work);
1039 	}
1040 
1041 	return 0;
1042 }
1043 
1044 STATIC int
xfs_fs_statfs(struct dentry * dentry,struct kstatfs * statp)1045 xfs_fs_statfs(
1046 	struct dentry		*dentry,
1047 	struct kstatfs		*statp)
1048 {
1049 	struct xfs_mount	*mp = XFS_M(dentry->d_sb);
1050 	xfs_sb_t		*sbp = &mp->m_sb;
1051 	struct xfs_inode	*ip = XFS_I(d_inode(dentry));
1052 	__uint64_t		fakeinos, id;
1053 	__uint64_t		icount;
1054 	__uint64_t		ifree;
1055 	__uint64_t		fdblocks;
1056 	xfs_extlen_t		lsize;
1057 	__int64_t		ffree;
1058 
1059 	statp->f_type = XFS_SB_MAGIC;
1060 	statp->f_namelen = MAXNAMELEN - 1;
1061 
1062 	id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
1063 	statp->f_fsid.val[0] = (u32)id;
1064 	statp->f_fsid.val[1] = (u32)(id >> 32);
1065 
1066 	icount = percpu_counter_sum(&mp->m_icount);
1067 	ifree = percpu_counter_sum(&mp->m_ifree);
1068 	fdblocks = percpu_counter_sum(&mp->m_fdblocks);
1069 
1070 	spin_lock(&mp->m_sb_lock);
1071 	statp->f_bsize = sbp->sb_blocksize;
1072 	lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
1073 	statp->f_blocks = sbp->sb_dblocks - lsize;
1074 	spin_unlock(&mp->m_sb_lock);
1075 
1076 	statp->f_bfree = fdblocks - XFS_ALLOC_SET_ASIDE(mp);
1077 	statp->f_bavail = statp->f_bfree;
1078 
1079 	fakeinos = statp->f_bfree << sbp->sb_inopblog;
1080 	statp->f_files = MIN(icount + fakeinos, (__uint64_t)XFS_MAXINUMBER);
1081 	if (mp->m_maxicount)
1082 		statp->f_files = min_t(typeof(statp->f_files),
1083 					statp->f_files,
1084 					mp->m_maxicount);
1085 
1086 	/* If sb_icount overshot maxicount, report actual allocation */
1087 	statp->f_files = max_t(typeof(statp->f_files),
1088 					statp->f_files,
1089 					sbp->sb_icount);
1090 
1091 	/* make sure statp->f_ffree does not underflow */
1092 	ffree = statp->f_files - (icount - ifree);
1093 	statp->f_ffree = max_t(__int64_t, ffree, 0);
1094 
1095 
1096 	if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
1097 	    ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
1098 			      (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
1099 		xfs_qm_statvfs(ip, statp);
1100 	return 0;
1101 }
1102 
1103 STATIC void
xfs_save_resvblks(struct xfs_mount * mp)1104 xfs_save_resvblks(struct xfs_mount *mp)
1105 {
1106 	__uint64_t resblks = 0;
1107 
1108 	mp->m_resblks_save = mp->m_resblks;
1109 	xfs_reserve_blocks(mp, &resblks, NULL);
1110 }
1111 
1112 STATIC void
xfs_restore_resvblks(struct xfs_mount * mp)1113 xfs_restore_resvblks(struct xfs_mount *mp)
1114 {
1115 	__uint64_t resblks;
1116 
1117 	if (mp->m_resblks_save) {
1118 		resblks = mp->m_resblks_save;
1119 		mp->m_resblks_save = 0;
1120 	} else
1121 		resblks = xfs_default_resblks(mp);
1122 
1123 	xfs_reserve_blocks(mp, &resblks, NULL);
1124 }
1125 
1126 /*
1127  * Trigger writeback of all the dirty metadata in the file system.
1128  *
1129  * This ensures that the metadata is written to their location on disk rather
1130  * than just existing in transactions in the log. This means after a quiesce
1131  * there is no log replay required to write the inodes to disk - this is the
1132  * primary difference between a sync and a quiesce.
1133  *
1134  * Note: xfs_log_quiesce() stops background log work - the callers must ensure
1135  * it is started again when appropriate.
1136  */
1137 static void
xfs_quiesce_attr(struct xfs_mount * mp)1138 xfs_quiesce_attr(
1139 	struct xfs_mount	*mp)
1140 {
1141 	int	error = 0;
1142 
1143 	/* wait for all modifications to complete */
1144 	while (atomic_read(&mp->m_active_trans) > 0)
1145 		delay(100);
1146 
1147 	/* force the log to unpin objects from the now complete transactions */
1148 	xfs_log_force(mp, XFS_LOG_SYNC);
1149 
1150 	/* reclaim inodes to do any IO before the freeze completes */
1151 	xfs_reclaim_inodes(mp, 0);
1152 	xfs_reclaim_inodes(mp, SYNC_WAIT);
1153 
1154 	/* Push the superblock and write an unmount record */
1155 	error = xfs_log_sbcount(mp);
1156 	if (error)
1157 		xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. "
1158 				"Frozen image may not be consistent.");
1159 	/*
1160 	 * Just warn here till VFS can correctly support
1161 	 * read-only remount without racing.
1162 	 */
1163 	WARN_ON(atomic_read(&mp->m_active_trans) != 0);
1164 
1165 	xfs_log_quiesce(mp);
1166 }
1167 
1168 STATIC int
xfs_fs_remount(struct super_block * sb,int * flags,char * options)1169 xfs_fs_remount(
1170 	struct super_block	*sb,
1171 	int			*flags,
1172 	char			*options)
1173 {
1174 	struct xfs_mount	*mp = XFS_M(sb);
1175 	xfs_sb_t		*sbp = &mp->m_sb;
1176 	substring_t		args[MAX_OPT_ARGS];
1177 	char			*p;
1178 	int			error;
1179 
1180 	sync_filesystem(sb);
1181 	while ((p = strsep(&options, ",")) != NULL) {
1182 		int token;
1183 
1184 		if (!*p)
1185 			continue;
1186 
1187 		token = match_token(p, tokens, args);
1188 		switch (token) {
1189 		case Opt_barrier:
1190 			mp->m_flags |= XFS_MOUNT_BARRIER;
1191 			break;
1192 		case Opt_nobarrier:
1193 			mp->m_flags &= ~XFS_MOUNT_BARRIER;
1194 			break;
1195 		case Opt_inode64:
1196 			mp->m_maxagi = xfs_set_inode64(mp, sbp->sb_agcount);
1197 			break;
1198 		case Opt_inode32:
1199 			mp->m_maxagi = xfs_set_inode32(mp, sbp->sb_agcount);
1200 			break;
1201 		default:
1202 			/*
1203 			 * Logically we would return an error here to prevent
1204 			 * users from believing they might have changed
1205 			 * mount options using remount which can't be changed.
1206 			 *
1207 			 * But unfortunately mount(8) adds all options from
1208 			 * mtab and fstab to the mount arguments in some cases
1209 			 * so we can't blindly reject options, but have to
1210 			 * check for each specified option if it actually
1211 			 * differs from the currently set option and only
1212 			 * reject it if that's the case.
1213 			 *
1214 			 * Until that is implemented we return success for
1215 			 * every remount request, and silently ignore all
1216 			 * options that we can't actually change.
1217 			 */
1218 #if 0
1219 			xfs_info(mp,
1220 		"mount option \"%s\" not supported for remount", p);
1221 			return -EINVAL;
1222 #else
1223 			break;
1224 #endif
1225 		}
1226 	}
1227 
1228 	/* ro -> rw */
1229 	if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) {
1230 		if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
1231 			xfs_warn(mp,
1232 		"ro->rw transition prohibited on norecovery mount");
1233 			return -EINVAL;
1234 		}
1235 
1236 		if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
1237 		    xfs_sb_has_ro_compat_feature(sbp,
1238 					XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1239 			xfs_warn(mp,
1240 "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1241 				(sbp->sb_features_ro_compat &
1242 					XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1243 			return -EINVAL;
1244 		}
1245 
1246 		mp->m_flags &= ~XFS_MOUNT_RDONLY;
1247 
1248 		/*
1249 		 * If this is the first remount to writeable state we
1250 		 * might have some superblock changes to update.
1251 		 */
1252 		if (mp->m_update_sb) {
1253 			error = xfs_sync_sb(mp, false);
1254 			if (error) {
1255 				xfs_warn(mp, "failed to write sb changes");
1256 				return error;
1257 			}
1258 			mp->m_update_sb = false;
1259 		}
1260 
1261 		/*
1262 		 * Fill out the reserve pool if it is empty. Use the stashed
1263 		 * value if it is non-zero, otherwise go with the default.
1264 		 */
1265 		xfs_restore_resvblks(mp);
1266 		xfs_log_work_queue(mp);
1267 	}
1268 
1269 	/* rw -> ro */
1270 	if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) {
1271 		/*
1272 		 * Before we sync the metadata, we need to free up the reserve
1273 		 * block pool so that the used block count in the superblock on
1274 		 * disk is correct at the end of the remount. Stash the current
1275 		 * reserve pool size so that if we get remounted rw, we can
1276 		 * return it to the same size.
1277 		 */
1278 		xfs_save_resvblks(mp);
1279 		xfs_quiesce_attr(mp);
1280 		mp->m_flags |= XFS_MOUNT_RDONLY;
1281 	}
1282 
1283 	return 0;
1284 }
1285 
1286 /*
1287  * Second stage of a freeze. The data is already frozen so we only
1288  * need to take care of the metadata. Once that's done sync the superblock
1289  * to the log to dirty it in case of a crash while frozen. This ensures that we
1290  * will recover the unlinked inode lists on the next mount.
1291  */
1292 STATIC int
xfs_fs_freeze(struct super_block * sb)1293 xfs_fs_freeze(
1294 	struct super_block	*sb)
1295 {
1296 	struct xfs_mount	*mp = XFS_M(sb);
1297 
1298 	xfs_save_resvblks(mp);
1299 	xfs_quiesce_attr(mp);
1300 	return xfs_sync_sb(mp, true);
1301 }
1302 
1303 STATIC int
xfs_fs_unfreeze(struct super_block * sb)1304 xfs_fs_unfreeze(
1305 	struct super_block	*sb)
1306 {
1307 	struct xfs_mount	*mp = XFS_M(sb);
1308 
1309 	xfs_restore_resvblks(mp);
1310 	xfs_log_work_queue(mp);
1311 	return 0;
1312 }
1313 
1314 STATIC int
xfs_fs_show_options(struct seq_file * m,struct dentry * root)1315 xfs_fs_show_options(
1316 	struct seq_file		*m,
1317 	struct dentry		*root)
1318 {
1319 	return xfs_showargs(XFS_M(root->d_sb), m);
1320 }
1321 
1322 /*
1323  * This function fills in xfs_mount_t fields based on mount args.
1324  * Note: the superblock _has_ now been read in.
1325  */
1326 STATIC int
xfs_finish_flags(struct xfs_mount * mp)1327 xfs_finish_flags(
1328 	struct xfs_mount	*mp)
1329 {
1330 	int			ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
1331 
1332 	/* Fail a mount where the logbuf is smaller than the log stripe */
1333 	if (xfs_sb_version_haslogv2(&mp->m_sb)) {
1334 		if (mp->m_logbsize <= 0 &&
1335 		    mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
1336 			mp->m_logbsize = mp->m_sb.sb_logsunit;
1337 		} else if (mp->m_logbsize > 0 &&
1338 			   mp->m_logbsize < mp->m_sb.sb_logsunit) {
1339 			xfs_warn(mp,
1340 		"logbuf size must be greater than or equal to log stripe size");
1341 			return -EINVAL;
1342 		}
1343 	} else {
1344 		/* Fail a mount if the logbuf is larger than 32K */
1345 		if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
1346 			xfs_warn(mp,
1347 		"logbuf size for version 1 logs must be 16K or 32K");
1348 			return -EINVAL;
1349 		}
1350 	}
1351 
1352 	/*
1353 	 * V5 filesystems always use attr2 format for attributes.
1354 	 */
1355 	if (xfs_sb_version_hascrc(&mp->m_sb) &&
1356 	    (mp->m_flags & XFS_MOUNT_NOATTR2)) {
1357 		xfs_warn(mp,
1358 "Cannot mount a V5 filesystem as %s. %s is always enabled for V5 filesystems.",
1359 			MNTOPT_NOATTR2, MNTOPT_ATTR2);
1360 		return -EINVAL;
1361 	}
1362 
1363 	/*
1364 	 * mkfs'ed attr2 will turn on attr2 mount unless explicitly
1365 	 * told by noattr2 to turn it off
1366 	 */
1367 	if (xfs_sb_version_hasattr2(&mp->m_sb) &&
1368 	    !(mp->m_flags & XFS_MOUNT_NOATTR2))
1369 		mp->m_flags |= XFS_MOUNT_ATTR2;
1370 
1371 	/*
1372 	 * prohibit r/w mounts of read-only filesystems
1373 	 */
1374 	if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
1375 		xfs_warn(mp,
1376 			"cannot mount a read-only filesystem as read-write");
1377 		return -EROFS;
1378 	}
1379 
1380 	if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
1381 	    (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE)) &&
1382 	    !xfs_sb_version_has_pquotino(&mp->m_sb)) {
1383 		xfs_warn(mp,
1384 		  "Super block does not support project and group quota together");
1385 		return -EINVAL;
1386 	}
1387 
1388 	return 0;
1389 }
1390 
1391 static int
xfs_init_percpu_counters(struct xfs_mount * mp)1392 xfs_init_percpu_counters(
1393 	struct xfs_mount	*mp)
1394 {
1395 	int		error;
1396 
1397 	error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
1398 	if (error)
1399 		return -ENOMEM;
1400 
1401 	error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
1402 	if (error)
1403 		goto free_icount;
1404 
1405 	error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
1406 	if (error)
1407 		goto free_ifree;
1408 
1409 	return 0;
1410 
1411 free_ifree:
1412 	percpu_counter_destroy(&mp->m_ifree);
1413 free_icount:
1414 	percpu_counter_destroy(&mp->m_icount);
1415 	return -ENOMEM;
1416 }
1417 
1418 void
xfs_reinit_percpu_counters(struct xfs_mount * mp)1419 xfs_reinit_percpu_counters(
1420 	struct xfs_mount	*mp)
1421 {
1422 	percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1423 	percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1424 	percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
1425 }
1426 
1427 static void
xfs_destroy_percpu_counters(struct xfs_mount * mp)1428 xfs_destroy_percpu_counters(
1429 	struct xfs_mount	*mp)
1430 {
1431 	percpu_counter_destroy(&mp->m_icount);
1432 	percpu_counter_destroy(&mp->m_ifree);
1433 	percpu_counter_destroy(&mp->m_fdblocks);
1434 }
1435 
1436 STATIC int
xfs_fs_fill_super(struct super_block * sb,void * data,int silent)1437 xfs_fs_fill_super(
1438 	struct super_block	*sb,
1439 	void			*data,
1440 	int			silent)
1441 {
1442 	struct inode		*root;
1443 	struct xfs_mount	*mp = NULL;
1444 	int			flags = 0, error = -ENOMEM;
1445 
1446 	mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL);
1447 	if (!mp)
1448 		goto out;
1449 
1450 	spin_lock_init(&mp->m_sb_lock);
1451 	mutex_init(&mp->m_growlock);
1452 	atomic_set(&mp->m_active_trans, 0);
1453 	INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
1454 	INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker);
1455 	mp->m_kobj.kobject.kset = xfs_kset;
1456 
1457 	mp->m_super = sb;
1458 	sb->s_fs_info = mp;
1459 
1460 	error = xfs_parseargs(mp, (char *)data);
1461 	if (error)
1462 		goto out_free_fsname;
1463 
1464 	sb_min_blocksize(sb, BBSIZE);
1465 	sb->s_xattr = xfs_xattr_handlers;
1466 	sb->s_export_op = &xfs_export_operations;
1467 #ifdef CONFIG_XFS_QUOTA
1468 	sb->s_qcop = &xfs_quotactl_operations;
1469 	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1470 #endif
1471 	sb->s_op = &xfs_super_operations;
1472 
1473 	if (silent)
1474 		flags |= XFS_MFSI_QUIET;
1475 
1476 	error = xfs_open_devices(mp);
1477 	if (error)
1478 		goto out_free_fsname;
1479 
1480 	error = xfs_init_mount_workqueues(mp);
1481 	if (error)
1482 		goto out_close_devices;
1483 
1484 	error = xfs_init_percpu_counters(mp);
1485 	if (error)
1486 		goto out_destroy_workqueues;
1487 
1488 	/* Allocate stats memory before we do operations that might use it */
1489 	mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1490 	if (!mp->m_stats.xs_stats) {
1491 		error = -ENOMEM;
1492 		goto out_destroy_counters;
1493 	}
1494 
1495 	error = xfs_readsb(mp, flags);
1496 	if (error)
1497 		goto out_free_stats;
1498 
1499 	error = xfs_finish_flags(mp);
1500 	if (error)
1501 		goto out_free_sb;
1502 
1503 	error = xfs_setup_devices(mp);
1504 	if (error)
1505 		goto out_free_sb;
1506 
1507 	error = xfs_filestream_mount(mp);
1508 	if (error)
1509 		goto out_free_sb;
1510 
1511 	/*
1512 	 * we must configure the block size in the superblock before we run the
1513 	 * full mount process as the mount process can lookup and cache inodes.
1514 	 */
1515 	sb->s_magic = XFS_SB_MAGIC;
1516 	sb->s_blocksize = mp->m_sb.sb_blocksize;
1517 	sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1518 	sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
1519 	sb->s_max_links = XFS_MAXLINK;
1520 	sb->s_time_gran = 1;
1521 	set_posix_acl_flag(sb);
1522 
1523 	/* version 5 superblocks support inode version counters. */
1524 	if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
1525 		sb->s_flags |= MS_I_VERSION;
1526 
1527 	if (mp->m_flags & XFS_MOUNT_DAX) {
1528 		xfs_warn(mp,
1529 	"DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
1530 		if (sb->s_blocksize != PAGE_SIZE) {
1531 			xfs_alert(mp,
1532 		"Filesystem block size invalid for DAX Turning DAX off.");
1533 			mp->m_flags &= ~XFS_MOUNT_DAX;
1534 		} else if (!sb->s_bdev->bd_disk->fops->direct_access) {
1535 			xfs_alert(mp,
1536 		"Block device does not support DAX Turning DAX off.");
1537 			mp->m_flags &= ~XFS_MOUNT_DAX;
1538 		}
1539 	}
1540 
1541 	if (xfs_sb_version_hassparseinodes(&mp->m_sb))
1542 		xfs_alert(mp,
1543 	"EXPERIMENTAL sparse inode feature enabled. Use at your own risk!");
1544 
1545 	error = xfs_mountfs(mp);
1546 	if (error)
1547 		goto out_filestream_unmount;
1548 
1549 	root = igrab(VFS_I(mp->m_rootip));
1550 	if (!root) {
1551 		error = -ENOENT;
1552 		goto out_unmount;
1553 	}
1554 	sb->s_root = d_make_root(root);
1555 	if (!sb->s_root) {
1556 		error = -ENOMEM;
1557 		goto out_unmount;
1558 	}
1559 
1560 	return 0;
1561 
1562  out_filestream_unmount:
1563 	xfs_filestream_unmount(mp);
1564  out_free_sb:
1565 	xfs_freesb(mp);
1566  out_free_stats:
1567 	free_percpu(mp->m_stats.xs_stats);
1568  out_destroy_counters:
1569 	xfs_destroy_percpu_counters(mp);
1570  out_destroy_workqueues:
1571 	xfs_destroy_mount_workqueues(mp);
1572  out_close_devices:
1573 	xfs_close_devices(mp);
1574  out_free_fsname:
1575 	sb->s_fs_info = NULL;
1576 	xfs_free_fsname(mp);
1577 	kfree(mp);
1578  out:
1579 	return error;
1580 
1581  out_unmount:
1582 	xfs_filestream_unmount(mp);
1583 	xfs_unmountfs(mp);
1584 	goto out_free_sb;
1585 }
1586 
1587 STATIC void
xfs_fs_put_super(struct super_block * sb)1588 xfs_fs_put_super(
1589 	struct super_block	*sb)
1590 {
1591 	struct xfs_mount	*mp = XFS_M(sb);
1592 
1593 	/* if ->fill_super failed, we have no mount to tear down */
1594 	if (!sb->s_fs_info)
1595 		return;
1596 
1597 	xfs_notice(mp, "Unmounting Filesystem");
1598 	xfs_filestream_unmount(mp);
1599 	xfs_unmountfs(mp);
1600 
1601 	xfs_freesb(mp);
1602 	free_percpu(mp->m_stats.xs_stats);
1603 	xfs_destroy_percpu_counters(mp);
1604 	xfs_destroy_mount_workqueues(mp);
1605 	xfs_close_devices(mp);
1606 
1607 	sb->s_fs_info = NULL;
1608 	xfs_free_fsname(mp);
1609 	kfree(mp);
1610 }
1611 
1612 STATIC struct dentry *
xfs_fs_mount(struct file_system_type * fs_type,int flags,const char * dev_name,void * data)1613 xfs_fs_mount(
1614 	struct file_system_type	*fs_type,
1615 	int			flags,
1616 	const char		*dev_name,
1617 	void			*data)
1618 {
1619 	return mount_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super);
1620 }
1621 
1622 static long
xfs_fs_nr_cached_objects(struct super_block * sb,struct shrink_control * sc)1623 xfs_fs_nr_cached_objects(
1624 	struct super_block	*sb,
1625 	struct shrink_control	*sc)
1626 {
1627 	/* Paranoia: catch incorrect calls during mount setup or teardown */
1628 	if (WARN_ON_ONCE(!sb->s_fs_info))
1629 		return 0;
1630 	return xfs_reclaim_inodes_count(XFS_M(sb));
1631 }
1632 
1633 static long
xfs_fs_free_cached_objects(struct super_block * sb,struct shrink_control * sc)1634 xfs_fs_free_cached_objects(
1635 	struct super_block	*sb,
1636 	struct shrink_control	*sc)
1637 {
1638 	return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1639 }
1640 
1641 static const struct super_operations xfs_super_operations = {
1642 	.alloc_inode		= xfs_fs_alloc_inode,
1643 	.destroy_inode		= xfs_fs_destroy_inode,
1644 	.evict_inode		= xfs_fs_evict_inode,
1645 	.drop_inode		= xfs_fs_drop_inode,
1646 	.put_super		= xfs_fs_put_super,
1647 	.sync_fs		= xfs_fs_sync_fs,
1648 	.freeze_fs		= xfs_fs_freeze,
1649 	.unfreeze_fs		= xfs_fs_unfreeze,
1650 	.statfs			= xfs_fs_statfs,
1651 	.remount_fs		= xfs_fs_remount,
1652 	.show_options		= xfs_fs_show_options,
1653 	.nr_cached_objects	= xfs_fs_nr_cached_objects,
1654 	.free_cached_objects	= xfs_fs_free_cached_objects,
1655 };
1656 
1657 static struct file_system_type xfs_fs_type = {
1658 	.owner			= THIS_MODULE,
1659 	.name			= "xfs",
1660 	.mount			= xfs_fs_mount,
1661 	.kill_sb		= kill_block_super,
1662 	.fs_flags		= FS_REQUIRES_DEV,
1663 };
1664 MODULE_ALIAS_FS("xfs");
1665 
1666 STATIC int __init
xfs_init_zones(void)1667 xfs_init_zones(void)
1668 {
1669 
1670 	xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend");
1671 	if (!xfs_ioend_zone)
1672 		goto out;
1673 
1674 	xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE,
1675 						  xfs_ioend_zone);
1676 	if (!xfs_ioend_pool)
1677 		goto out_destroy_ioend_zone;
1678 
1679 	xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t),
1680 						"xfs_log_ticket");
1681 	if (!xfs_log_ticket_zone)
1682 		goto out_destroy_ioend_pool;
1683 
1684 	xfs_bmap_free_item_zone = kmem_zone_init(sizeof(xfs_bmap_free_item_t),
1685 						"xfs_bmap_free_item");
1686 	if (!xfs_bmap_free_item_zone)
1687 		goto out_destroy_log_ticket_zone;
1688 
1689 	xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t),
1690 						"xfs_btree_cur");
1691 	if (!xfs_btree_cur_zone)
1692 		goto out_destroy_bmap_free_item_zone;
1693 
1694 	xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t),
1695 						"xfs_da_state");
1696 	if (!xfs_da_state_zone)
1697 		goto out_destroy_btree_cur_zone;
1698 
1699 	xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork");
1700 	if (!xfs_ifork_zone)
1701 		goto out_destroy_da_state_zone;
1702 
1703 	xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans");
1704 	if (!xfs_trans_zone)
1705 		goto out_destroy_ifork_zone;
1706 
1707 	xfs_log_item_desc_zone =
1708 		kmem_zone_init(sizeof(struct xfs_log_item_desc),
1709 			       "xfs_log_item_desc");
1710 	if (!xfs_log_item_desc_zone)
1711 		goto out_destroy_trans_zone;
1712 
1713 	/*
1714 	 * The size of the zone allocated buf log item is the maximum
1715 	 * size possible under XFS.  This wastes a little bit of memory,
1716 	 * but it is much faster.
1717 	 */
1718 	xfs_buf_item_zone = kmem_zone_init(sizeof(struct xfs_buf_log_item),
1719 					   "xfs_buf_item");
1720 	if (!xfs_buf_item_zone)
1721 		goto out_destroy_log_item_desc_zone;
1722 
1723 	xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) +
1724 			((XFS_EFD_MAX_FAST_EXTENTS - 1) *
1725 				 sizeof(xfs_extent_t))), "xfs_efd_item");
1726 	if (!xfs_efd_zone)
1727 		goto out_destroy_buf_item_zone;
1728 
1729 	xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) +
1730 			((XFS_EFI_MAX_FAST_EXTENTS - 1) *
1731 				sizeof(xfs_extent_t))), "xfs_efi_item");
1732 	if (!xfs_efi_zone)
1733 		goto out_destroy_efd_zone;
1734 
1735 	xfs_inode_zone =
1736 		kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode",
1737 			KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | KM_ZONE_SPREAD,
1738 			xfs_fs_inode_init_once);
1739 	if (!xfs_inode_zone)
1740 		goto out_destroy_efi_zone;
1741 
1742 	xfs_ili_zone =
1743 		kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili",
1744 					KM_ZONE_SPREAD, NULL);
1745 	if (!xfs_ili_zone)
1746 		goto out_destroy_inode_zone;
1747 	xfs_icreate_zone = kmem_zone_init(sizeof(struct xfs_icreate_item),
1748 					"xfs_icr");
1749 	if (!xfs_icreate_zone)
1750 		goto out_destroy_ili_zone;
1751 
1752 	return 0;
1753 
1754  out_destroy_ili_zone:
1755 	kmem_zone_destroy(xfs_ili_zone);
1756  out_destroy_inode_zone:
1757 	kmem_zone_destroy(xfs_inode_zone);
1758  out_destroy_efi_zone:
1759 	kmem_zone_destroy(xfs_efi_zone);
1760  out_destroy_efd_zone:
1761 	kmem_zone_destroy(xfs_efd_zone);
1762  out_destroy_buf_item_zone:
1763 	kmem_zone_destroy(xfs_buf_item_zone);
1764  out_destroy_log_item_desc_zone:
1765 	kmem_zone_destroy(xfs_log_item_desc_zone);
1766  out_destroy_trans_zone:
1767 	kmem_zone_destroy(xfs_trans_zone);
1768  out_destroy_ifork_zone:
1769 	kmem_zone_destroy(xfs_ifork_zone);
1770  out_destroy_da_state_zone:
1771 	kmem_zone_destroy(xfs_da_state_zone);
1772  out_destroy_btree_cur_zone:
1773 	kmem_zone_destroy(xfs_btree_cur_zone);
1774  out_destroy_bmap_free_item_zone:
1775 	kmem_zone_destroy(xfs_bmap_free_item_zone);
1776  out_destroy_log_ticket_zone:
1777 	kmem_zone_destroy(xfs_log_ticket_zone);
1778  out_destroy_ioend_pool:
1779 	mempool_destroy(xfs_ioend_pool);
1780  out_destroy_ioend_zone:
1781 	kmem_zone_destroy(xfs_ioend_zone);
1782  out:
1783 	return -ENOMEM;
1784 }
1785 
1786 STATIC void
xfs_destroy_zones(void)1787 xfs_destroy_zones(void)
1788 {
1789 	/*
1790 	 * Make sure all delayed rcu free are flushed before we
1791 	 * destroy caches.
1792 	 */
1793 	rcu_barrier();
1794 	kmem_zone_destroy(xfs_icreate_zone);
1795 	kmem_zone_destroy(xfs_ili_zone);
1796 	kmem_zone_destroy(xfs_inode_zone);
1797 	kmem_zone_destroy(xfs_efi_zone);
1798 	kmem_zone_destroy(xfs_efd_zone);
1799 	kmem_zone_destroy(xfs_buf_item_zone);
1800 	kmem_zone_destroy(xfs_log_item_desc_zone);
1801 	kmem_zone_destroy(xfs_trans_zone);
1802 	kmem_zone_destroy(xfs_ifork_zone);
1803 	kmem_zone_destroy(xfs_da_state_zone);
1804 	kmem_zone_destroy(xfs_btree_cur_zone);
1805 	kmem_zone_destroy(xfs_bmap_free_item_zone);
1806 	kmem_zone_destroy(xfs_log_ticket_zone);
1807 	mempool_destroy(xfs_ioend_pool);
1808 	kmem_zone_destroy(xfs_ioend_zone);
1809 
1810 }
1811 
1812 STATIC int __init
xfs_init_workqueues(void)1813 xfs_init_workqueues(void)
1814 {
1815 	/*
1816 	 * The allocation workqueue can be used in memory reclaim situations
1817 	 * (writepage path), and parallelism is only limited by the number of
1818 	 * AGs in all the filesystems mounted. Hence use the default large
1819 	 * max_active value for this workqueue.
1820 	 */
1821 	xfs_alloc_wq = alloc_workqueue("xfsalloc",
1822 			WQ_MEM_RECLAIM|WQ_FREEZABLE, 0);
1823 	if (!xfs_alloc_wq)
1824 		return -ENOMEM;
1825 
1826 	return 0;
1827 }
1828 
1829 STATIC void
xfs_destroy_workqueues(void)1830 xfs_destroy_workqueues(void)
1831 {
1832 	destroy_workqueue(xfs_alloc_wq);
1833 }
1834 
1835 STATIC int __init
init_xfs_fs(void)1836 init_xfs_fs(void)
1837 {
1838 	int			error;
1839 
1840 	printk(KERN_INFO XFS_VERSION_STRING " with "
1841 			 XFS_BUILD_OPTIONS " enabled\n");
1842 
1843 	xfs_dir_startup();
1844 
1845 	error = xfs_init_zones();
1846 	if (error)
1847 		goto out;
1848 
1849 	error = xfs_init_workqueues();
1850 	if (error)
1851 		goto out_destroy_zones;
1852 
1853 	error = xfs_mru_cache_init();
1854 	if (error)
1855 		goto out_destroy_wq;
1856 
1857 	error = xfs_buf_init();
1858 	if (error)
1859 		goto out_mru_cache_uninit;
1860 
1861 	error = xfs_init_procfs();
1862 	if (error)
1863 		goto out_buf_terminate;
1864 
1865 	error = xfs_sysctl_register();
1866 	if (error)
1867 		goto out_cleanup_procfs;
1868 
1869 	xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
1870 	if (!xfs_kset) {
1871 		error = -ENOMEM;
1872 		goto out_sysctl_unregister;
1873 	}
1874 
1875 	xfsstats.xs_kobj.kobject.kset = xfs_kset;
1876 
1877 	xfsstats.xs_stats = alloc_percpu(struct xfsstats);
1878 	if (!xfsstats.xs_stats) {
1879 		error = -ENOMEM;
1880 		goto out_kset_unregister;
1881 	}
1882 
1883 	error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
1884 			       "stats");
1885 	if (error)
1886 		goto out_free_stats;
1887 
1888 #ifdef DEBUG
1889 	xfs_dbg_kobj.kobject.kset = xfs_kset;
1890 	error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
1891 	if (error)
1892 		goto out_remove_stats_kobj;
1893 #endif
1894 
1895 	error = xfs_qm_init();
1896 	if (error)
1897 		goto out_remove_dbg_kobj;
1898 
1899 	error = register_filesystem(&xfs_fs_type);
1900 	if (error)
1901 		goto out_qm_exit;
1902 	return 0;
1903 
1904  out_qm_exit:
1905 	xfs_qm_exit();
1906  out_remove_dbg_kobj:
1907 #ifdef DEBUG
1908 	xfs_sysfs_del(&xfs_dbg_kobj);
1909  out_remove_stats_kobj:
1910 #endif
1911 	xfs_sysfs_del(&xfsstats.xs_kobj);
1912  out_free_stats:
1913 	free_percpu(xfsstats.xs_stats);
1914  out_kset_unregister:
1915 	kset_unregister(xfs_kset);
1916  out_sysctl_unregister:
1917 	xfs_sysctl_unregister();
1918  out_cleanup_procfs:
1919 	xfs_cleanup_procfs();
1920  out_buf_terminate:
1921 	xfs_buf_terminate();
1922  out_mru_cache_uninit:
1923 	xfs_mru_cache_uninit();
1924  out_destroy_wq:
1925 	xfs_destroy_workqueues();
1926  out_destroy_zones:
1927 	xfs_destroy_zones();
1928  out:
1929 	return error;
1930 }
1931 
1932 STATIC void __exit
exit_xfs_fs(void)1933 exit_xfs_fs(void)
1934 {
1935 	xfs_qm_exit();
1936 	unregister_filesystem(&xfs_fs_type);
1937 #ifdef DEBUG
1938 	xfs_sysfs_del(&xfs_dbg_kobj);
1939 #endif
1940 	xfs_sysfs_del(&xfsstats.xs_kobj);
1941 	free_percpu(xfsstats.xs_stats);
1942 	kset_unregister(xfs_kset);
1943 	xfs_sysctl_unregister();
1944 	xfs_cleanup_procfs();
1945 	xfs_buf_terminate();
1946 	xfs_mru_cache_uninit();
1947 	xfs_destroy_workqueues();
1948 	xfs_destroy_zones();
1949 	xfs_uuid_table_free();
1950 }
1951 
1952 module_init(init_xfs_fs);
1953 module_exit(exit_xfs_fs);
1954 
1955 MODULE_AUTHOR("Silicon Graphics, Inc.");
1956 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
1957 MODULE_LICENSE("GPL");
1958