1 /*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_bit.h"
25 #include "xfs_sb.h"
26 #include "xfs_mount.h"
27 #include "xfs_inode.h"
28 #include "xfs_ialloc.h"
29 #include "xfs_alloc.h"
30 #include "xfs_error.h"
31 #include "xfs_trace.h"
32 #include "xfs_cksum.h"
33 #include "xfs_trans.h"
34 #include "xfs_buf_item.h"
35 #include "xfs_bmap_btree.h"
36 #include "xfs_alloc_btree.h"
37 #include "xfs_ialloc_btree.h"
38 #include "xfs_log.h"
39
40 /*
41 * Physical superblock buffer manipulations. Shared with libxfs in userspace.
42 */
43
44 /*
45 * Reference counting access wrappers to the perag structures.
46 * Because we never free per-ag structures, the only thing we
47 * have to protect against changes is the tree structure itself.
48 */
49 struct xfs_perag *
xfs_perag_get(struct xfs_mount * mp,xfs_agnumber_t agno)50 xfs_perag_get(
51 struct xfs_mount *mp,
52 xfs_agnumber_t agno)
53 {
54 struct xfs_perag *pag;
55 int ref = 0;
56
57 rcu_read_lock();
58 pag = radix_tree_lookup(&mp->m_perag_tree, agno);
59 if (pag) {
60 ASSERT(atomic_read(&pag->pag_ref) >= 0);
61 ref = atomic_inc_return(&pag->pag_ref);
62 }
63 rcu_read_unlock();
64 trace_xfs_perag_get(mp, agno, ref, _RET_IP_);
65 return pag;
66 }
67
68 /*
69 * search from @first to find the next perag with the given tag set.
70 */
71 struct xfs_perag *
xfs_perag_get_tag(struct xfs_mount * mp,xfs_agnumber_t first,int tag)72 xfs_perag_get_tag(
73 struct xfs_mount *mp,
74 xfs_agnumber_t first,
75 int tag)
76 {
77 struct xfs_perag *pag;
78 int found;
79 int ref;
80
81 rcu_read_lock();
82 found = radix_tree_gang_lookup_tag(&mp->m_perag_tree,
83 (void **)&pag, first, 1, tag);
84 if (found <= 0) {
85 rcu_read_unlock();
86 return NULL;
87 }
88 ref = atomic_inc_return(&pag->pag_ref);
89 rcu_read_unlock();
90 trace_xfs_perag_get_tag(mp, pag->pag_agno, ref, _RET_IP_);
91 return pag;
92 }
93
94 void
xfs_perag_put(struct xfs_perag * pag)95 xfs_perag_put(
96 struct xfs_perag *pag)
97 {
98 int ref;
99
100 ASSERT(atomic_read(&pag->pag_ref) > 0);
101 ref = atomic_dec_return(&pag->pag_ref);
102 trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_);
103 }
104
105 /*
106 * Check the validity of the SB found.
107 */
108 STATIC int
xfs_mount_validate_sb(xfs_mount_t * mp,xfs_sb_t * sbp,bool check_inprogress,bool check_version)109 xfs_mount_validate_sb(
110 xfs_mount_t *mp,
111 xfs_sb_t *sbp,
112 bool check_inprogress,
113 bool check_version)
114 {
115 if (sbp->sb_magicnum != XFS_SB_MAGIC) {
116 xfs_warn(mp, "bad magic number");
117 return -EWRONGFS;
118 }
119
120
121 if (!xfs_sb_good_version(sbp)) {
122 xfs_warn(mp, "bad version");
123 return -EWRONGFS;
124 }
125
126 /*
127 * Version 5 superblock feature mask validation. Reject combinations the
128 * kernel cannot support up front before checking anything else. For
129 * write validation, we don't need to check feature masks.
130 */
131 if (check_version && XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) {
132 if (xfs_sb_has_compat_feature(sbp,
133 XFS_SB_FEAT_COMPAT_UNKNOWN)) {
134 xfs_warn(mp,
135 "Superblock has unknown compatible features (0x%x) enabled.",
136 (sbp->sb_features_compat &
137 XFS_SB_FEAT_COMPAT_UNKNOWN));
138 xfs_warn(mp,
139 "Using a more recent kernel is recommended.");
140 }
141
142 if (xfs_sb_has_ro_compat_feature(sbp,
143 XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
144 xfs_alert(mp,
145 "Superblock has unknown read-only compatible features (0x%x) enabled.",
146 (sbp->sb_features_ro_compat &
147 XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
148 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
149 xfs_warn(mp,
150 "Attempted to mount read-only compatible filesystem read-write.");
151 xfs_warn(mp,
152 "Filesystem can only be safely mounted read only.");
153
154 return -EINVAL;
155 }
156 }
157 if (xfs_sb_has_incompat_feature(sbp,
158 XFS_SB_FEAT_INCOMPAT_UNKNOWN)) {
159 xfs_warn(mp,
160 "Superblock has unknown incompatible features (0x%x) enabled.",
161 (sbp->sb_features_incompat &
162 XFS_SB_FEAT_INCOMPAT_UNKNOWN));
163 xfs_warn(mp,
164 "Filesystem can not be safely mounted by this kernel.");
165 return -EINVAL;
166 }
167 } else if (xfs_sb_version_hascrc(sbp)) {
168 /*
169 * We can't read verify the sb LSN because the read verifier is
170 * called before the log is allocated and processed. We know the
171 * log is set up before write verifier (!check_version) calls,
172 * so just check it here.
173 */
174 if (!xfs_log_check_lsn(mp, sbp->sb_lsn))
175 return -EFSCORRUPTED;
176 }
177
178 if (xfs_sb_version_has_pquotino(sbp)) {
179 if (sbp->sb_qflags & (XFS_OQUOTA_ENFD | XFS_OQUOTA_CHKD)) {
180 xfs_notice(mp,
181 "Version 5 of Super block has XFS_OQUOTA bits.");
182 return -EFSCORRUPTED;
183 }
184 } else if (sbp->sb_qflags & (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD |
185 XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD)) {
186 xfs_notice(mp,
187 "Superblock earlier than Version 5 has XFS_[PQ]UOTA_{ENFD|CHKD} bits.");
188 return -EFSCORRUPTED;
189 }
190
191 /*
192 * Full inode chunks must be aligned to inode chunk size when
193 * sparse inodes are enabled to support the sparse chunk
194 * allocation algorithm and prevent overlapping inode records.
195 */
196 if (xfs_sb_version_hassparseinodes(sbp)) {
197 uint32_t align;
198
199 align = XFS_INODES_PER_CHUNK * sbp->sb_inodesize
200 >> sbp->sb_blocklog;
201 if (sbp->sb_inoalignmt != align) {
202 xfs_warn(mp,
203 "Inode block alignment (%u) must match chunk size (%u) for sparse inodes.",
204 sbp->sb_inoalignmt, align);
205 return -EINVAL;
206 }
207 }
208
209 if (unlikely(
210 sbp->sb_logstart == 0 && mp->m_logdev_targp == mp->m_ddev_targp)) {
211 xfs_warn(mp,
212 "filesystem is marked as having an external log; "
213 "specify logdev on the mount command line.");
214 return -EINVAL;
215 }
216
217 if (unlikely(
218 sbp->sb_logstart != 0 && mp->m_logdev_targp != mp->m_ddev_targp)) {
219 xfs_warn(mp,
220 "filesystem is marked as having an internal log; "
221 "do not specify logdev on the mount command line.");
222 return -EINVAL;
223 }
224
225 /*
226 * More sanity checking. Most of these were stolen directly from
227 * xfs_repair.
228 */
229 if (unlikely(
230 sbp->sb_agcount <= 0 ||
231 sbp->sb_sectsize < XFS_MIN_SECTORSIZE ||
232 sbp->sb_sectsize > XFS_MAX_SECTORSIZE ||
233 sbp->sb_sectlog < XFS_MIN_SECTORSIZE_LOG ||
234 sbp->sb_sectlog > XFS_MAX_SECTORSIZE_LOG ||
235 sbp->sb_sectsize != (1 << sbp->sb_sectlog) ||
236 sbp->sb_blocksize < XFS_MIN_BLOCKSIZE ||
237 sbp->sb_blocksize > XFS_MAX_BLOCKSIZE ||
238 sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG ||
239 sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG ||
240 sbp->sb_blocksize != (1 << sbp->sb_blocklog) ||
241 sbp->sb_dirblklog > XFS_MAX_BLOCKSIZE_LOG ||
242 sbp->sb_inodesize < XFS_DINODE_MIN_SIZE ||
243 sbp->sb_inodesize > XFS_DINODE_MAX_SIZE ||
244 sbp->sb_inodelog < XFS_DINODE_MIN_LOG ||
245 sbp->sb_inodelog > XFS_DINODE_MAX_LOG ||
246 sbp->sb_inodesize != (1 << sbp->sb_inodelog) ||
247 sbp->sb_logsunit > XLOG_MAX_RECORD_BSIZE ||
248 sbp->sb_inopblock != howmany(sbp->sb_blocksize,sbp->sb_inodesize) ||
249 (sbp->sb_blocklog - sbp->sb_inodelog != sbp->sb_inopblog) ||
250 (sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE) ||
251 (sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE) ||
252 (sbp->sb_imax_pct > 100 /* zero sb_imax_pct is valid */) ||
253 sbp->sb_dblocks == 0 ||
254 sbp->sb_dblocks > XFS_MAX_DBLOCKS(sbp) ||
255 sbp->sb_dblocks < XFS_MIN_DBLOCKS(sbp) ||
256 sbp->sb_shared_vn != 0)) {
257 xfs_notice(mp, "SB sanity check failed");
258 return -EFSCORRUPTED;
259 }
260
261 /*
262 * Until this is fixed only page-sized or smaller data blocks work.
263 */
264 if (unlikely(sbp->sb_blocksize > PAGE_SIZE)) {
265 xfs_warn(mp,
266 "File system with blocksize %d bytes. "
267 "Only pagesize (%ld) or less will currently work.",
268 sbp->sb_blocksize, PAGE_SIZE);
269 return -ENOSYS;
270 }
271
272 /*
273 * Currently only very few inode sizes are supported.
274 */
275 switch (sbp->sb_inodesize) {
276 case 256:
277 case 512:
278 case 1024:
279 case 2048:
280 break;
281 default:
282 xfs_warn(mp, "inode size of %d bytes not supported",
283 sbp->sb_inodesize);
284 return -ENOSYS;
285 }
286
287 if (xfs_sb_validate_fsb_count(sbp, sbp->sb_dblocks) ||
288 xfs_sb_validate_fsb_count(sbp, sbp->sb_rblocks)) {
289 xfs_warn(mp,
290 "file system too large to be mounted on this system.");
291 return -EFBIG;
292 }
293
294 if (check_inprogress && sbp->sb_inprogress) {
295 xfs_warn(mp, "Offline file system operation in progress!");
296 return -EFSCORRUPTED;
297 }
298 return 0;
299 }
300
301 void
xfs_sb_quota_from_disk(struct xfs_sb * sbp)302 xfs_sb_quota_from_disk(struct xfs_sb *sbp)
303 {
304 /*
305 * older mkfs doesn't initialize quota inodes to NULLFSINO. This
306 * leads to in-core values having two different values for a quota
307 * inode to be invalid: 0 and NULLFSINO. Change it to a single value
308 * NULLFSINO.
309 *
310 * Note that this change affect only the in-core values. These
311 * values are not written back to disk unless any quota information
312 * is written to the disk. Even in that case, sb_pquotino field is
313 * not written to disk unless the superblock supports pquotino.
314 */
315 if (sbp->sb_uquotino == 0)
316 sbp->sb_uquotino = NULLFSINO;
317 if (sbp->sb_gquotino == 0)
318 sbp->sb_gquotino = NULLFSINO;
319 if (sbp->sb_pquotino == 0)
320 sbp->sb_pquotino = NULLFSINO;
321
322 /*
323 * We need to do these manipilations only if we are working
324 * with an older version of on-disk superblock.
325 */
326 if (xfs_sb_version_has_pquotino(sbp))
327 return;
328
329 if (sbp->sb_qflags & XFS_OQUOTA_ENFD)
330 sbp->sb_qflags |= (sbp->sb_qflags & XFS_PQUOTA_ACCT) ?
331 XFS_PQUOTA_ENFD : XFS_GQUOTA_ENFD;
332 if (sbp->sb_qflags & XFS_OQUOTA_CHKD)
333 sbp->sb_qflags |= (sbp->sb_qflags & XFS_PQUOTA_ACCT) ?
334 XFS_PQUOTA_CHKD : XFS_GQUOTA_CHKD;
335 sbp->sb_qflags &= ~(XFS_OQUOTA_ENFD | XFS_OQUOTA_CHKD);
336
337 if (sbp->sb_qflags & XFS_PQUOTA_ACCT) {
338 /*
339 * In older version of superblock, on-disk superblock only
340 * has sb_gquotino, and in-core superblock has both sb_gquotino
341 * and sb_pquotino. But, only one of them is supported at any
342 * point of time. So, if PQUOTA is set in disk superblock,
343 * copy over sb_gquotino to sb_pquotino.
344 */
345 sbp->sb_pquotino = sbp->sb_gquotino;
346 sbp->sb_gquotino = NULLFSINO;
347 }
348 }
349
350 static void
__xfs_sb_from_disk(struct xfs_sb * to,xfs_dsb_t * from,bool convert_xquota)351 __xfs_sb_from_disk(
352 struct xfs_sb *to,
353 xfs_dsb_t *from,
354 bool convert_xquota)
355 {
356 to->sb_magicnum = be32_to_cpu(from->sb_magicnum);
357 to->sb_blocksize = be32_to_cpu(from->sb_blocksize);
358 to->sb_dblocks = be64_to_cpu(from->sb_dblocks);
359 to->sb_rblocks = be64_to_cpu(from->sb_rblocks);
360 to->sb_rextents = be64_to_cpu(from->sb_rextents);
361 memcpy(&to->sb_uuid, &from->sb_uuid, sizeof(to->sb_uuid));
362 to->sb_logstart = be64_to_cpu(from->sb_logstart);
363 to->sb_rootino = be64_to_cpu(from->sb_rootino);
364 to->sb_rbmino = be64_to_cpu(from->sb_rbmino);
365 to->sb_rsumino = be64_to_cpu(from->sb_rsumino);
366 to->sb_rextsize = be32_to_cpu(from->sb_rextsize);
367 to->sb_agblocks = be32_to_cpu(from->sb_agblocks);
368 to->sb_agcount = be32_to_cpu(from->sb_agcount);
369 to->sb_rbmblocks = be32_to_cpu(from->sb_rbmblocks);
370 to->sb_logblocks = be32_to_cpu(from->sb_logblocks);
371 to->sb_versionnum = be16_to_cpu(from->sb_versionnum);
372 to->sb_sectsize = be16_to_cpu(from->sb_sectsize);
373 to->sb_inodesize = be16_to_cpu(from->sb_inodesize);
374 to->sb_inopblock = be16_to_cpu(from->sb_inopblock);
375 memcpy(&to->sb_fname, &from->sb_fname, sizeof(to->sb_fname));
376 to->sb_blocklog = from->sb_blocklog;
377 to->sb_sectlog = from->sb_sectlog;
378 to->sb_inodelog = from->sb_inodelog;
379 to->sb_inopblog = from->sb_inopblog;
380 to->sb_agblklog = from->sb_agblklog;
381 to->sb_rextslog = from->sb_rextslog;
382 to->sb_inprogress = from->sb_inprogress;
383 to->sb_imax_pct = from->sb_imax_pct;
384 to->sb_icount = be64_to_cpu(from->sb_icount);
385 to->sb_ifree = be64_to_cpu(from->sb_ifree);
386 to->sb_fdblocks = be64_to_cpu(from->sb_fdblocks);
387 to->sb_frextents = be64_to_cpu(from->sb_frextents);
388 to->sb_uquotino = be64_to_cpu(from->sb_uquotino);
389 to->sb_gquotino = be64_to_cpu(from->sb_gquotino);
390 to->sb_qflags = be16_to_cpu(from->sb_qflags);
391 to->sb_flags = from->sb_flags;
392 to->sb_shared_vn = from->sb_shared_vn;
393 to->sb_inoalignmt = be32_to_cpu(from->sb_inoalignmt);
394 to->sb_unit = be32_to_cpu(from->sb_unit);
395 to->sb_width = be32_to_cpu(from->sb_width);
396 to->sb_dirblklog = from->sb_dirblklog;
397 to->sb_logsectlog = from->sb_logsectlog;
398 to->sb_logsectsize = be16_to_cpu(from->sb_logsectsize);
399 to->sb_logsunit = be32_to_cpu(from->sb_logsunit);
400 to->sb_features2 = be32_to_cpu(from->sb_features2);
401 to->sb_bad_features2 = be32_to_cpu(from->sb_bad_features2);
402 to->sb_features_compat = be32_to_cpu(from->sb_features_compat);
403 to->sb_features_ro_compat = be32_to_cpu(from->sb_features_ro_compat);
404 to->sb_features_incompat = be32_to_cpu(from->sb_features_incompat);
405 to->sb_features_log_incompat =
406 be32_to_cpu(from->sb_features_log_incompat);
407 /* crc is only used on disk, not in memory; just init to 0 here. */
408 to->sb_crc = 0;
409 to->sb_spino_align = be32_to_cpu(from->sb_spino_align);
410 to->sb_pquotino = be64_to_cpu(from->sb_pquotino);
411 to->sb_lsn = be64_to_cpu(from->sb_lsn);
412 /*
413 * sb_meta_uuid is only on disk if it differs from sb_uuid and the
414 * feature flag is set; if not set we keep it only in memory.
415 */
416 if (xfs_sb_version_hasmetauuid(to))
417 uuid_copy(&to->sb_meta_uuid, &from->sb_meta_uuid);
418 else
419 uuid_copy(&to->sb_meta_uuid, &from->sb_uuid);
420 /* Convert on-disk flags to in-memory flags? */
421 if (convert_xquota)
422 xfs_sb_quota_from_disk(to);
423 }
424
425 void
xfs_sb_from_disk(struct xfs_sb * to,xfs_dsb_t * from)426 xfs_sb_from_disk(
427 struct xfs_sb *to,
428 xfs_dsb_t *from)
429 {
430 __xfs_sb_from_disk(to, from, true);
431 }
432
433 static void
xfs_sb_quota_to_disk(struct xfs_dsb * to,struct xfs_sb * from)434 xfs_sb_quota_to_disk(
435 struct xfs_dsb *to,
436 struct xfs_sb *from)
437 {
438 __uint16_t qflags = from->sb_qflags;
439
440 to->sb_uquotino = cpu_to_be64(from->sb_uquotino);
441 if (xfs_sb_version_has_pquotino(from)) {
442 to->sb_qflags = cpu_to_be16(from->sb_qflags);
443 to->sb_gquotino = cpu_to_be64(from->sb_gquotino);
444 to->sb_pquotino = cpu_to_be64(from->sb_pquotino);
445 return;
446 }
447
448 /*
449 * The in-core version of sb_qflags do not have XFS_OQUOTA_*
450 * flags, whereas the on-disk version does. So, convert incore
451 * XFS_{PG}QUOTA_* flags to on-disk XFS_OQUOTA_* flags.
452 */
453 qflags &= ~(XFS_PQUOTA_ENFD | XFS_PQUOTA_CHKD |
454 XFS_GQUOTA_ENFD | XFS_GQUOTA_CHKD);
455
456 if (from->sb_qflags &
457 (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD))
458 qflags |= XFS_OQUOTA_ENFD;
459 if (from->sb_qflags &
460 (XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD))
461 qflags |= XFS_OQUOTA_CHKD;
462 to->sb_qflags = cpu_to_be16(qflags);
463
464 /*
465 * GQUOTINO and PQUOTINO cannot be used together in versions
466 * of superblock that do not have pquotino. from->sb_flags
467 * tells us which quota is active and should be copied to
468 * disk. If neither are active, we should NULL the inode.
469 *
470 * In all cases, the separate pquotino must remain 0 because it
471 * it beyond the "end" of the valid non-pquotino superblock.
472 */
473 if (from->sb_qflags & XFS_GQUOTA_ACCT)
474 to->sb_gquotino = cpu_to_be64(from->sb_gquotino);
475 else if (from->sb_qflags & XFS_PQUOTA_ACCT)
476 to->sb_gquotino = cpu_to_be64(from->sb_pquotino);
477 else {
478 /*
479 * We can't rely on just the fields being logged to tell us
480 * that it is safe to write NULLFSINO - we should only do that
481 * if quotas are not actually enabled. Hence only write
482 * NULLFSINO if both in-core quota inodes are NULL.
483 */
484 if (from->sb_gquotino == NULLFSINO &&
485 from->sb_pquotino == NULLFSINO)
486 to->sb_gquotino = cpu_to_be64(NULLFSINO);
487 }
488
489 to->sb_pquotino = 0;
490 }
491
492 void
xfs_sb_to_disk(struct xfs_dsb * to,struct xfs_sb * from)493 xfs_sb_to_disk(
494 struct xfs_dsb *to,
495 struct xfs_sb *from)
496 {
497 xfs_sb_quota_to_disk(to, from);
498
499 to->sb_magicnum = cpu_to_be32(from->sb_magicnum);
500 to->sb_blocksize = cpu_to_be32(from->sb_blocksize);
501 to->sb_dblocks = cpu_to_be64(from->sb_dblocks);
502 to->sb_rblocks = cpu_to_be64(from->sb_rblocks);
503 to->sb_rextents = cpu_to_be64(from->sb_rextents);
504 memcpy(&to->sb_uuid, &from->sb_uuid, sizeof(to->sb_uuid));
505 to->sb_logstart = cpu_to_be64(from->sb_logstart);
506 to->sb_rootino = cpu_to_be64(from->sb_rootino);
507 to->sb_rbmino = cpu_to_be64(from->sb_rbmino);
508 to->sb_rsumino = cpu_to_be64(from->sb_rsumino);
509 to->sb_rextsize = cpu_to_be32(from->sb_rextsize);
510 to->sb_agblocks = cpu_to_be32(from->sb_agblocks);
511 to->sb_agcount = cpu_to_be32(from->sb_agcount);
512 to->sb_rbmblocks = cpu_to_be32(from->sb_rbmblocks);
513 to->sb_logblocks = cpu_to_be32(from->sb_logblocks);
514 to->sb_versionnum = cpu_to_be16(from->sb_versionnum);
515 to->sb_sectsize = cpu_to_be16(from->sb_sectsize);
516 to->sb_inodesize = cpu_to_be16(from->sb_inodesize);
517 to->sb_inopblock = cpu_to_be16(from->sb_inopblock);
518 memcpy(&to->sb_fname, &from->sb_fname, sizeof(to->sb_fname));
519 to->sb_blocklog = from->sb_blocklog;
520 to->sb_sectlog = from->sb_sectlog;
521 to->sb_inodelog = from->sb_inodelog;
522 to->sb_inopblog = from->sb_inopblog;
523 to->sb_agblklog = from->sb_agblklog;
524 to->sb_rextslog = from->sb_rextslog;
525 to->sb_inprogress = from->sb_inprogress;
526 to->sb_imax_pct = from->sb_imax_pct;
527 to->sb_icount = cpu_to_be64(from->sb_icount);
528 to->sb_ifree = cpu_to_be64(from->sb_ifree);
529 to->sb_fdblocks = cpu_to_be64(from->sb_fdblocks);
530 to->sb_frextents = cpu_to_be64(from->sb_frextents);
531
532 to->sb_flags = from->sb_flags;
533 to->sb_shared_vn = from->sb_shared_vn;
534 to->sb_inoalignmt = cpu_to_be32(from->sb_inoalignmt);
535 to->sb_unit = cpu_to_be32(from->sb_unit);
536 to->sb_width = cpu_to_be32(from->sb_width);
537 to->sb_dirblklog = from->sb_dirblklog;
538 to->sb_logsectlog = from->sb_logsectlog;
539 to->sb_logsectsize = cpu_to_be16(from->sb_logsectsize);
540 to->sb_logsunit = cpu_to_be32(from->sb_logsunit);
541
542 /*
543 * We need to ensure that bad_features2 always matches features2.
544 * Hence we enforce that here rather than having to remember to do it
545 * everywhere else that updates features2.
546 */
547 from->sb_bad_features2 = from->sb_features2;
548 to->sb_features2 = cpu_to_be32(from->sb_features2);
549 to->sb_bad_features2 = cpu_to_be32(from->sb_bad_features2);
550
551 if (xfs_sb_version_hascrc(from)) {
552 to->sb_features_compat = cpu_to_be32(from->sb_features_compat);
553 to->sb_features_ro_compat =
554 cpu_to_be32(from->sb_features_ro_compat);
555 to->sb_features_incompat =
556 cpu_to_be32(from->sb_features_incompat);
557 to->sb_features_log_incompat =
558 cpu_to_be32(from->sb_features_log_incompat);
559 to->sb_spino_align = cpu_to_be32(from->sb_spino_align);
560 to->sb_lsn = cpu_to_be64(from->sb_lsn);
561 if (xfs_sb_version_hasmetauuid(from))
562 uuid_copy(&to->sb_meta_uuid, &from->sb_meta_uuid);
563 }
564 }
565
566 static int
xfs_sb_verify(struct xfs_buf * bp,bool check_version)567 xfs_sb_verify(
568 struct xfs_buf *bp,
569 bool check_version)
570 {
571 struct xfs_mount *mp = bp->b_target->bt_mount;
572 struct xfs_sb sb;
573
574 /*
575 * Use call variant which doesn't convert quota flags from disk
576 * format, because xfs_mount_validate_sb checks the on-disk flags.
577 */
578 __xfs_sb_from_disk(&sb, XFS_BUF_TO_SBP(bp), false);
579
580 /*
581 * Only check the in progress field for the primary superblock as
582 * mkfs.xfs doesn't clear it from secondary superblocks.
583 */
584 return xfs_mount_validate_sb(mp, &sb,
585 bp->b_maps[0].bm_bn == XFS_SB_DADDR,
586 check_version);
587 }
588
589 /*
590 * If the superblock has the CRC feature bit set or the CRC field is non-null,
591 * check that the CRC is valid. We check the CRC field is non-null because a
592 * single bit error could clear the feature bit and unused parts of the
593 * superblock are supposed to be zero. Hence a non-null crc field indicates that
594 * we've potentially lost a feature bit and we should check it anyway.
595 *
596 * However, past bugs (i.e. in growfs) left non-zeroed regions beyond the
597 * last field in V4 secondary superblocks. So for secondary superblocks,
598 * we are more forgiving, and ignore CRC failures if the primary doesn't
599 * indicate that the fs version is V5.
600 */
601 static void
xfs_sb_read_verify(struct xfs_buf * bp)602 xfs_sb_read_verify(
603 struct xfs_buf *bp)
604 {
605 struct xfs_mount *mp = bp->b_target->bt_mount;
606 struct xfs_dsb *dsb = XFS_BUF_TO_SBP(bp);
607 int error;
608
609 /*
610 * open code the version check to avoid needing to convert the entire
611 * superblock from disk order just to check the version number
612 */
613 if (dsb->sb_magicnum == cpu_to_be32(XFS_SB_MAGIC) &&
614 (((be16_to_cpu(dsb->sb_versionnum) & XFS_SB_VERSION_NUMBITS) ==
615 XFS_SB_VERSION_5) ||
616 dsb->sb_crc != 0)) {
617
618 if (!xfs_buf_verify_cksum(bp, XFS_SB_CRC_OFF)) {
619 /* Only fail bad secondaries on a known V5 filesystem */
620 if (bp->b_bn == XFS_SB_DADDR ||
621 xfs_sb_version_hascrc(&mp->m_sb)) {
622 error = -EFSBADCRC;
623 goto out_error;
624 }
625 }
626 }
627 error = xfs_sb_verify(bp, true);
628
629 out_error:
630 if (error) {
631 xfs_buf_ioerror(bp, error);
632 if (error == -EFSCORRUPTED || error == -EFSBADCRC)
633 xfs_verifier_error(bp);
634 }
635 }
636
637 /*
638 * We may be probed for a filesystem match, so we may not want to emit
639 * messages when the superblock buffer is not actually an XFS superblock.
640 * If we find an XFS superblock, then run a normal, noisy mount because we are
641 * really going to mount it and want to know about errors.
642 */
643 static void
xfs_sb_quiet_read_verify(struct xfs_buf * bp)644 xfs_sb_quiet_read_verify(
645 struct xfs_buf *bp)
646 {
647 struct xfs_dsb *dsb = XFS_BUF_TO_SBP(bp);
648
649 if (dsb->sb_magicnum == cpu_to_be32(XFS_SB_MAGIC)) {
650 /* XFS filesystem, verify noisily! */
651 xfs_sb_read_verify(bp);
652 return;
653 }
654 /* quietly fail */
655 xfs_buf_ioerror(bp, -EWRONGFS);
656 }
657
658 static void
xfs_sb_write_verify(struct xfs_buf * bp)659 xfs_sb_write_verify(
660 struct xfs_buf *bp)
661 {
662 struct xfs_mount *mp = bp->b_target->bt_mount;
663 struct xfs_buf_log_item *bip = bp->b_fspriv;
664 int error;
665
666 error = xfs_sb_verify(bp, false);
667 if (error) {
668 xfs_buf_ioerror(bp, error);
669 xfs_verifier_error(bp);
670 return;
671 }
672
673 if (!xfs_sb_version_hascrc(&mp->m_sb))
674 return;
675
676 if (bip)
677 XFS_BUF_TO_SBP(bp)->sb_lsn = cpu_to_be64(bip->bli_item.li_lsn);
678
679 xfs_buf_update_cksum(bp, XFS_SB_CRC_OFF);
680 }
681
682 const struct xfs_buf_ops xfs_sb_buf_ops = {
683 .name = "xfs_sb",
684 .verify_read = xfs_sb_read_verify,
685 .verify_write = xfs_sb_write_verify,
686 };
687
688 const struct xfs_buf_ops xfs_sb_quiet_buf_ops = {
689 .name = "xfs_sb_quiet",
690 .verify_read = xfs_sb_quiet_read_verify,
691 .verify_write = xfs_sb_write_verify,
692 };
693
694 /*
695 * xfs_mount_common
696 *
697 * Mount initialization code establishing various mount
698 * fields from the superblock associated with the given
699 * mount structure
700 */
701 void
xfs_sb_mount_common(struct xfs_mount * mp,struct xfs_sb * sbp)702 xfs_sb_mount_common(
703 struct xfs_mount *mp,
704 struct xfs_sb *sbp)
705 {
706 mp->m_agfrotor = mp->m_agirotor = 0;
707 spin_lock_init(&mp->m_agirotor_lock);
708 mp->m_maxagi = mp->m_sb.sb_agcount;
709 mp->m_blkbit_log = sbp->sb_blocklog + XFS_NBBYLOG;
710 mp->m_blkbb_log = sbp->sb_blocklog - BBSHIFT;
711 mp->m_sectbb_log = sbp->sb_sectlog - BBSHIFT;
712 mp->m_agno_log = xfs_highbit32(sbp->sb_agcount - 1) + 1;
713 mp->m_agino_log = sbp->sb_inopblog + sbp->sb_agblklog;
714 mp->m_blockmask = sbp->sb_blocksize - 1;
715 mp->m_blockwsize = sbp->sb_blocksize >> XFS_WORDLOG;
716 mp->m_blockwmask = mp->m_blockwsize - 1;
717
718 mp->m_alloc_mxr[0] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 1);
719 mp->m_alloc_mxr[1] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 0);
720 mp->m_alloc_mnr[0] = mp->m_alloc_mxr[0] / 2;
721 mp->m_alloc_mnr[1] = mp->m_alloc_mxr[1] / 2;
722
723 mp->m_inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 1);
724 mp->m_inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 0);
725 mp->m_inobt_mnr[0] = mp->m_inobt_mxr[0] / 2;
726 mp->m_inobt_mnr[1] = mp->m_inobt_mxr[1] / 2;
727
728 mp->m_bmap_dmxr[0] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 1);
729 mp->m_bmap_dmxr[1] = xfs_bmbt_maxrecs(mp, sbp->sb_blocksize, 0);
730 mp->m_bmap_dmnr[0] = mp->m_bmap_dmxr[0] / 2;
731 mp->m_bmap_dmnr[1] = mp->m_bmap_dmxr[1] / 2;
732
733 mp->m_bsize = XFS_FSB_TO_BB(mp, 1);
734 mp->m_ialloc_inos = (int)MAX((__uint16_t)XFS_INODES_PER_CHUNK,
735 sbp->sb_inopblock);
736 mp->m_ialloc_blks = mp->m_ialloc_inos >> sbp->sb_inopblog;
737
738 if (sbp->sb_spino_align)
739 mp->m_ialloc_min_blks = sbp->sb_spino_align;
740 else
741 mp->m_ialloc_min_blks = mp->m_ialloc_blks;
742 }
743
744 /*
745 * xfs_initialize_perag_data
746 *
747 * Read in each per-ag structure so we can count up the number of
748 * allocated inodes, free inodes and used filesystem blocks as this
749 * information is no longer persistent in the superblock. Once we have
750 * this information, write it into the in-core superblock structure.
751 */
752 int
xfs_initialize_perag_data(struct xfs_mount * mp,xfs_agnumber_t agcount)753 xfs_initialize_perag_data(
754 struct xfs_mount *mp,
755 xfs_agnumber_t agcount)
756 {
757 xfs_agnumber_t index;
758 xfs_perag_t *pag;
759 xfs_sb_t *sbp = &mp->m_sb;
760 uint64_t ifree = 0;
761 uint64_t ialloc = 0;
762 uint64_t bfree = 0;
763 uint64_t bfreelst = 0;
764 uint64_t btree = 0;
765 int error;
766
767 for (index = 0; index < agcount; index++) {
768 /*
769 * read the agf, then the agi. This gets us
770 * all the information we need and populates the
771 * per-ag structures for us.
772 */
773 error = xfs_alloc_pagf_init(mp, NULL, index, 0);
774 if (error)
775 return error;
776
777 error = xfs_ialloc_pagi_init(mp, NULL, index);
778 if (error)
779 return error;
780 pag = xfs_perag_get(mp, index);
781 ifree += pag->pagi_freecount;
782 ialloc += pag->pagi_count;
783 bfree += pag->pagf_freeblks;
784 bfreelst += pag->pagf_flcount;
785 btree += pag->pagf_btreeblks;
786 xfs_perag_put(pag);
787 }
788
789 /* Overwrite incore superblock counters with just-read data */
790 spin_lock(&mp->m_sb_lock);
791 sbp->sb_ifree = ifree;
792 sbp->sb_icount = ialloc;
793 sbp->sb_fdblocks = bfree + bfreelst + btree;
794 spin_unlock(&mp->m_sb_lock);
795
796 xfs_reinit_percpu_counters(mp);
797
798 return 0;
799 }
800
801 /*
802 * xfs_log_sb() can be used to copy arbitrary changes to the in-core superblock
803 * into the superblock buffer to be logged. It does not provide the higher
804 * level of locking that is needed to protect the in-core superblock from
805 * concurrent access.
806 */
807 void
xfs_log_sb(struct xfs_trans * tp)808 xfs_log_sb(
809 struct xfs_trans *tp)
810 {
811 struct xfs_mount *mp = tp->t_mountp;
812 struct xfs_buf *bp = xfs_trans_getsb(tp, mp, 0);
813
814 mp->m_sb.sb_icount = percpu_counter_sum(&mp->m_icount);
815 mp->m_sb.sb_ifree = percpu_counter_sum(&mp->m_ifree);
816 mp->m_sb.sb_fdblocks = percpu_counter_sum(&mp->m_fdblocks);
817
818 xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb);
819 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
820 xfs_trans_log_buf(tp, bp, 0, sizeof(struct xfs_dsb));
821 }
822
823 /*
824 * xfs_sync_sb
825 *
826 * Sync the superblock to disk.
827 *
828 * Note that the caller is responsible for checking the frozen state of the
829 * filesystem. This procedure uses the non-blocking transaction allocator and
830 * thus will allow modifications to a frozen fs. This is required because this
831 * code can be called during the process of freezing where use of the high-level
832 * allocator would deadlock.
833 */
834 int
xfs_sync_sb(struct xfs_mount * mp,bool wait)835 xfs_sync_sb(
836 struct xfs_mount *mp,
837 bool wait)
838 {
839 struct xfs_trans *tp;
840 int error;
841
842 tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_CHANGE, KM_SLEEP);
843 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0);
844 if (error) {
845 xfs_trans_cancel(tp);
846 return error;
847 }
848
849 xfs_log_sb(tp);
850 if (wait)
851 xfs_trans_set_sync(tp);
852 return xfs_trans_commit(tp);
853 }
854