1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_sb.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_inode.h"
17 #include "xfs_trans.h"
18 #include "xfs_log.h"
19 #include "xfs_log_priv.h"
20 #include "xfs_log_recover.h"
21 #include "xfs_trans_priv.h"
22 #include "xfs_alloc.h"
23 #include "xfs_ialloc.h"
24 #include "xfs_trace.h"
25 #include "xfs_icache.h"
26 #include "xfs_error.h"
27 #include "xfs_buf_item.h"
28
29 #define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
30
31 STATIC int
32 xlog_find_zeroed(
33 struct xlog *,
34 xfs_daddr_t *);
35 STATIC int
36 xlog_clear_stale_blocks(
37 struct xlog *,
38 xfs_lsn_t);
39 #if defined(DEBUG)
40 STATIC void
41 xlog_recover_check_summary(
42 struct xlog *);
43 #else
44 #define xlog_recover_check_summary(log)
45 #endif
46 STATIC int
47 xlog_do_recovery_pass(
48 struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *);
49
50 /*
51 * Sector aligned buffer routines for buffer create/read/write/access
52 */
53
54 /*
55 * Verify the log-relative block number and length in basic blocks are valid for
56 * an operation involving the given XFS log buffer. Returns true if the fields
57 * are valid, false otherwise.
58 */
59 static inline bool
xlog_verify_bno(struct xlog * log,xfs_daddr_t blk_no,int bbcount)60 xlog_verify_bno(
61 struct xlog *log,
62 xfs_daddr_t blk_no,
63 int bbcount)
64 {
65 if (blk_no < 0 || blk_no >= log->l_logBBsize)
66 return false;
67 if (bbcount <= 0 || (blk_no + bbcount) > log->l_logBBsize)
68 return false;
69 return true;
70 }
71
72 /*
73 * Allocate a buffer to hold log data. The buffer needs to be able to map to
74 * a range of nbblks basic blocks at any valid offset within the log.
75 */
76 static char *
xlog_alloc_buffer(struct xlog * log,int nbblks)77 xlog_alloc_buffer(
78 struct xlog *log,
79 int nbblks)
80 {
81 int align_mask = xfs_buftarg_dma_alignment(log->l_targ);
82
83 /*
84 * Pass log block 0 since we don't have an addr yet, buffer will be
85 * verified on read.
86 */
87 if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, 0, nbblks))) {
88 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
89 nbblks);
90 return NULL;
91 }
92
93 /*
94 * We do log I/O in units of log sectors (a power-of-2 multiple of the
95 * basic block size), so we round up the requested size to accommodate
96 * the basic blocks required for complete log sectors.
97 *
98 * In addition, the buffer may be used for a non-sector-aligned block
99 * offset, in which case an I/O of the requested size could extend
100 * beyond the end of the buffer. If the requested size is only 1 basic
101 * block it will never straddle a sector boundary, so this won't be an
102 * issue. Nor will this be a problem if the log I/O is done in basic
103 * blocks (sector size 1). But otherwise we extend the buffer by one
104 * extra log sector to ensure there's space to accommodate this
105 * possibility.
106 */
107 if (nbblks > 1 && log->l_sectBBsize > 1)
108 nbblks += log->l_sectBBsize;
109 nbblks = round_up(nbblks, log->l_sectBBsize);
110 return kmem_alloc_io(BBTOB(nbblks), align_mask, KM_MAYFAIL | KM_ZERO);
111 }
112
113 /*
114 * Return the address of the start of the given block number's data
115 * in a log buffer. The buffer covers a log sector-aligned region.
116 */
117 static inline unsigned int
xlog_align(struct xlog * log,xfs_daddr_t blk_no)118 xlog_align(
119 struct xlog *log,
120 xfs_daddr_t blk_no)
121 {
122 return BBTOB(blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1));
123 }
124
125 static int
xlog_do_io(struct xlog * log,xfs_daddr_t blk_no,unsigned int nbblks,char * data,unsigned int op)126 xlog_do_io(
127 struct xlog *log,
128 xfs_daddr_t blk_no,
129 unsigned int nbblks,
130 char *data,
131 unsigned int op)
132 {
133 int error;
134
135 if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, blk_no, nbblks))) {
136 xfs_warn(log->l_mp,
137 "Invalid log block/length (0x%llx, 0x%x) for buffer",
138 blk_no, nbblks);
139 return -EFSCORRUPTED;
140 }
141
142 blk_no = round_down(blk_no, log->l_sectBBsize);
143 nbblks = round_up(nbblks, log->l_sectBBsize);
144 ASSERT(nbblks > 0);
145
146 error = xfs_rw_bdev(log->l_targ->bt_bdev, log->l_logBBstart + blk_no,
147 BBTOB(nbblks), data, op);
148 if (error && !XFS_FORCED_SHUTDOWN(log->l_mp)) {
149 xfs_alert(log->l_mp,
150 "log recovery %s I/O error at daddr 0x%llx len %d error %d",
151 op == REQ_OP_WRITE ? "write" : "read",
152 blk_no, nbblks, error);
153 }
154 return error;
155 }
156
157 STATIC int
xlog_bread_noalign(struct xlog * log,xfs_daddr_t blk_no,int nbblks,char * data)158 xlog_bread_noalign(
159 struct xlog *log,
160 xfs_daddr_t blk_no,
161 int nbblks,
162 char *data)
163 {
164 return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
165 }
166
167 STATIC int
xlog_bread(struct xlog * log,xfs_daddr_t blk_no,int nbblks,char * data,char ** offset)168 xlog_bread(
169 struct xlog *log,
170 xfs_daddr_t blk_no,
171 int nbblks,
172 char *data,
173 char **offset)
174 {
175 int error;
176
177 error = xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
178 if (!error)
179 *offset = data + xlog_align(log, blk_no);
180 return error;
181 }
182
183 STATIC int
xlog_bwrite(struct xlog * log,xfs_daddr_t blk_no,int nbblks,char * data)184 xlog_bwrite(
185 struct xlog *log,
186 xfs_daddr_t blk_no,
187 int nbblks,
188 char *data)
189 {
190 return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_WRITE);
191 }
192
193 #ifdef DEBUG
194 /*
195 * dump debug superblock and log record information
196 */
197 STATIC void
xlog_header_check_dump(xfs_mount_t * mp,xlog_rec_header_t * head)198 xlog_header_check_dump(
199 xfs_mount_t *mp,
200 xlog_rec_header_t *head)
201 {
202 xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d",
203 __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
204 xfs_debug(mp, " log : uuid = %pU, fmt = %d",
205 &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
206 }
207 #else
208 #define xlog_header_check_dump(mp, head)
209 #endif
210
211 /*
212 * check log record header for recovery
213 */
214 STATIC int
xlog_header_check_recover(xfs_mount_t * mp,xlog_rec_header_t * head)215 xlog_header_check_recover(
216 xfs_mount_t *mp,
217 xlog_rec_header_t *head)
218 {
219 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
220
221 /*
222 * IRIX doesn't write the h_fmt field and leaves it zeroed
223 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
224 * a dirty log created in IRIX.
225 */
226 if (XFS_IS_CORRUPT(mp, head->h_fmt != cpu_to_be32(XLOG_FMT))) {
227 xfs_warn(mp,
228 "dirty log written in incompatible format - can't recover");
229 xlog_header_check_dump(mp, head);
230 return -EFSCORRUPTED;
231 }
232 if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid,
233 &head->h_fs_uuid))) {
234 xfs_warn(mp,
235 "dirty log entry has mismatched uuid - can't recover");
236 xlog_header_check_dump(mp, head);
237 return -EFSCORRUPTED;
238 }
239 return 0;
240 }
241
242 /*
243 * read the head block of the log and check the header
244 */
245 STATIC int
xlog_header_check_mount(xfs_mount_t * mp,xlog_rec_header_t * head)246 xlog_header_check_mount(
247 xfs_mount_t *mp,
248 xlog_rec_header_t *head)
249 {
250 ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
251
252 if (uuid_is_null(&head->h_fs_uuid)) {
253 /*
254 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
255 * h_fs_uuid is null, we assume this log was last mounted
256 * by IRIX and continue.
257 */
258 xfs_warn(mp, "null uuid in log - IRIX style log");
259 } else if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid,
260 &head->h_fs_uuid))) {
261 xfs_warn(mp, "log has mismatched uuid - can't recover");
262 xlog_header_check_dump(mp, head);
263 return -EFSCORRUPTED;
264 }
265 return 0;
266 }
267
268 /*
269 * This routine finds (to an approximation) the first block in the physical
270 * log which contains the given cycle. It uses a binary search algorithm.
271 * Note that the algorithm can not be perfect because the disk will not
272 * necessarily be perfect.
273 */
274 STATIC int
xlog_find_cycle_start(struct xlog * log,char * buffer,xfs_daddr_t first_blk,xfs_daddr_t * last_blk,uint cycle)275 xlog_find_cycle_start(
276 struct xlog *log,
277 char *buffer,
278 xfs_daddr_t first_blk,
279 xfs_daddr_t *last_blk,
280 uint cycle)
281 {
282 char *offset;
283 xfs_daddr_t mid_blk;
284 xfs_daddr_t end_blk;
285 uint mid_cycle;
286 int error;
287
288 end_blk = *last_blk;
289 mid_blk = BLK_AVG(first_blk, end_blk);
290 while (mid_blk != first_blk && mid_blk != end_blk) {
291 error = xlog_bread(log, mid_blk, 1, buffer, &offset);
292 if (error)
293 return error;
294 mid_cycle = xlog_get_cycle(offset);
295 if (mid_cycle == cycle)
296 end_blk = mid_blk; /* last_half_cycle == mid_cycle */
297 else
298 first_blk = mid_blk; /* first_half_cycle == mid_cycle */
299 mid_blk = BLK_AVG(first_blk, end_blk);
300 }
301 ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
302 (mid_blk == end_blk && mid_blk-1 == first_blk));
303
304 *last_blk = end_blk;
305
306 return 0;
307 }
308
309 /*
310 * Check that a range of blocks does not contain stop_on_cycle_no.
311 * Fill in *new_blk with the block offset where such a block is
312 * found, or with -1 (an invalid block number) if there is no such
313 * block in the range. The scan needs to occur from front to back
314 * and the pointer into the region must be updated since a later
315 * routine will need to perform another test.
316 */
317 STATIC int
xlog_find_verify_cycle(struct xlog * log,xfs_daddr_t start_blk,int nbblks,uint stop_on_cycle_no,xfs_daddr_t * new_blk)318 xlog_find_verify_cycle(
319 struct xlog *log,
320 xfs_daddr_t start_blk,
321 int nbblks,
322 uint stop_on_cycle_no,
323 xfs_daddr_t *new_blk)
324 {
325 xfs_daddr_t i, j;
326 uint cycle;
327 char *buffer;
328 xfs_daddr_t bufblks;
329 char *buf = NULL;
330 int error = 0;
331
332 /*
333 * Greedily allocate a buffer big enough to handle the full
334 * range of basic blocks we'll be examining. If that fails,
335 * try a smaller size. We need to be able to read at least
336 * a log sector, or we're out of luck.
337 */
338 bufblks = 1 << ffs(nbblks);
339 while (bufblks > log->l_logBBsize)
340 bufblks >>= 1;
341 while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
342 bufblks >>= 1;
343 if (bufblks < log->l_sectBBsize)
344 return -ENOMEM;
345 }
346
347 for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
348 int bcount;
349
350 bcount = min(bufblks, (start_blk + nbblks - i));
351
352 error = xlog_bread(log, i, bcount, buffer, &buf);
353 if (error)
354 goto out;
355
356 for (j = 0; j < bcount; j++) {
357 cycle = xlog_get_cycle(buf);
358 if (cycle == stop_on_cycle_no) {
359 *new_blk = i+j;
360 goto out;
361 }
362
363 buf += BBSIZE;
364 }
365 }
366
367 *new_blk = -1;
368
369 out:
370 kmem_free(buffer);
371 return error;
372 }
373
374 static inline int
xlog_logrec_hblks(struct xlog * log,struct xlog_rec_header * rh)375 xlog_logrec_hblks(struct xlog *log, struct xlog_rec_header *rh)
376 {
377 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
378 int h_size = be32_to_cpu(rh->h_size);
379
380 if ((be32_to_cpu(rh->h_version) & XLOG_VERSION_2) &&
381 h_size > XLOG_HEADER_CYCLE_SIZE)
382 return DIV_ROUND_UP(h_size, XLOG_HEADER_CYCLE_SIZE);
383 }
384 return 1;
385 }
386
387 /*
388 * Potentially backup over partial log record write.
389 *
390 * In the typical case, last_blk is the number of the block directly after
391 * a good log record. Therefore, we subtract one to get the block number
392 * of the last block in the given buffer. extra_bblks contains the number
393 * of blocks we would have read on a previous read. This happens when the
394 * last log record is split over the end of the physical log.
395 *
396 * extra_bblks is the number of blocks potentially verified on a previous
397 * call to this routine.
398 */
399 STATIC int
xlog_find_verify_log_record(struct xlog * log,xfs_daddr_t start_blk,xfs_daddr_t * last_blk,int extra_bblks)400 xlog_find_verify_log_record(
401 struct xlog *log,
402 xfs_daddr_t start_blk,
403 xfs_daddr_t *last_blk,
404 int extra_bblks)
405 {
406 xfs_daddr_t i;
407 char *buffer;
408 char *offset = NULL;
409 xlog_rec_header_t *head = NULL;
410 int error = 0;
411 int smallmem = 0;
412 int num_blks = *last_blk - start_blk;
413 int xhdrs;
414
415 ASSERT(start_blk != 0 || *last_blk != start_blk);
416
417 buffer = xlog_alloc_buffer(log, num_blks);
418 if (!buffer) {
419 buffer = xlog_alloc_buffer(log, 1);
420 if (!buffer)
421 return -ENOMEM;
422 smallmem = 1;
423 } else {
424 error = xlog_bread(log, start_blk, num_blks, buffer, &offset);
425 if (error)
426 goto out;
427 offset += ((num_blks - 1) << BBSHIFT);
428 }
429
430 for (i = (*last_blk) - 1; i >= 0; i--) {
431 if (i < start_blk) {
432 /* valid log record not found */
433 xfs_warn(log->l_mp,
434 "Log inconsistent (didn't find previous header)");
435 ASSERT(0);
436 error = -EFSCORRUPTED;
437 goto out;
438 }
439
440 if (smallmem) {
441 error = xlog_bread(log, i, 1, buffer, &offset);
442 if (error)
443 goto out;
444 }
445
446 head = (xlog_rec_header_t *)offset;
447
448 if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
449 break;
450
451 if (!smallmem)
452 offset -= BBSIZE;
453 }
454
455 /*
456 * We hit the beginning of the physical log & still no header. Return
457 * to caller. If caller can handle a return of -1, then this routine
458 * will be called again for the end of the physical log.
459 */
460 if (i == -1) {
461 error = 1;
462 goto out;
463 }
464
465 /*
466 * We have the final block of the good log (the first block
467 * of the log record _before_ the head. So we check the uuid.
468 */
469 if ((error = xlog_header_check_mount(log->l_mp, head)))
470 goto out;
471
472 /*
473 * We may have found a log record header before we expected one.
474 * last_blk will be the 1st block # with a given cycle #. We may end
475 * up reading an entire log record. In this case, we don't want to
476 * reset last_blk. Only when last_blk points in the middle of a log
477 * record do we update last_blk.
478 */
479 xhdrs = xlog_logrec_hblks(log, head);
480
481 if (*last_blk - i + extra_bblks !=
482 BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
483 *last_blk = i;
484
485 out:
486 kmem_free(buffer);
487 return error;
488 }
489
490 /*
491 * Head is defined to be the point of the log where the next log write
492 * could go. This means that incomplete LR writes at the end are
493 * eliminated when calculating the head. We aren't guaranteed that previous
494 * LR have complete transactions. We only know that a cycle number of
495 * current cycle number -1 won't be present in the log if we start writing
496 * from our current block number.
497 *
498 * last_blk contains the block number of the first block with a given
499 * cycle number.
500 *
501 * Return: zero if normal, non-zero if error.
502 */
503 STATIC int
xlog_find_head(struct xlog * log,xfs_daddr_t * return_head_blk)504 xlog_find_head(
505 struct xlog *log,
506 xfs_daddr_t *return_head_blk)
507 {
508 char *buffer;
509 char *offset;
510 xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk;
511 int num_scan_bblks;
512 uint first_half_cycle, last_half_cycle;
513 uint stop_on_cycle;
514 int error, log_bbnum = log->l_logBBsize;
515
516 /* Is the end of the log device zeroed? */
517 error = xlog_find_zeroed(log, &first_blk);
518 if (error < 0) {
519 xfs_warn(log->l_mp, "empty log check failed");
520 return error;
521 }
522 if (error == 1) {
523 *return_head_blk = first_blk;
524
525 /* Is the whole lot zeroed? */
526 if (!first_blk) {
527 /* Linux XFS shouldn't generate totally zeroed logs -
528 * mkfs etc write a dummy unmount record to a fresh
529 * log so we can store the uuid in there
530 */
531 xfs_warn(log->l_mp, "totally zeroed log");
532 }
533
534 return 0;
535 }
536
537 first_blk = 0; /* get cycle # of 1st block */
538 buffer = xlog_alloc_buffer(log, 1);
539 if (!buffer)
540 return -ENOMEM;
541
542 error = xlog_bread(log, 0, 1, buffer, &offset);
543 if (error)
544 goto out_free_buffer;
545
546 first_half_cycle = xlog_get_cycle(offset);
547
548 last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */
549 error = xlog_bread(log, last_blk, 1, buffer, &offset);
550 if (error)
551 goto out_free_buffer;
552
553 last_half_cycle = xlog_get_cycle(offset);
554 ASSERT(last_half_cycle != 0);
555
556 /*
557 * If the 1st half cycle number is equal to the last half cycle number,
558 * then the entire log is stamped with the same cycle number. In this
559 * case, head_blk can't be set to zero (which makes sense). The below
560 * math doesn't work out properly with head_blk equal to zero. Instead,
561 * we set it to log_bbnum which is an invalid block number, but this
562 * value makes the math correct. If head_blk doesn't changed through
563 * all the tests below, *head_blk is set to zero at the very end rather
564 * than log_bbnum. In a sense, log_bbnum and zero are the same block
565 * in a circular file.
566 */
567 if (first_half_cycle == last_half_cycle) {
568 /*
569 * In this case we believe that the entire log should have
570 * cycle number last_half_cycle. We need to scan backwards
571 * from the end verifying that there are no holes still
572 * containing last_half_cycle - 1. If we find such a hole,
573 * then the start of that hole will be the new head. The
574 * simple case looks like
575 * x | x ... | x - 1 | x
576 * Another case that fits this picture would be
577 * x | x + 1 | x ... | x
578 * In this case the head really is somewhere at the end of the
579 * log, as one of the latest writes at the beginning was
580 * incomplete.
581 * One more case is
582 * x | x + 1 | x ... | x - 1 | x
583 * This is really the combination of the above two cases, and
584 * the head has to end up at the start of the x-1 hole at the
585 * end of the log.
586 *
587 * In the 256k log case, we will read from the beginning to the
588 * end of the log and search for cycle numbers equal to x-1.
589 * We don't worry about the x+1 blocks that we encounter,
590 * because we know that they cannot be the head since the log
591 * started with x.
592 */
593 head_blk = log_bbnum;
594 stop_on_cycle = last_half_cycle - 1;
595 } else {
596 /*
597 * In this case we want to find the first block with cycle
598 * number matching last_half_cycle. We expect the log to be
599 * some variation on
600 * x + 1 ... | x ... | x
601 * The first block with cycle number x (last_half_cycle) will
602 * be where the new head belongs. First we do a binary search
603 * for the first occurrence of last_half_cycle. The binary
604 * search may not be totally accurate, so then we scan back
605 * from there looking for occurrences of last_half_cycle before
606 * us. If that backwards scan wraps around the beginning of
607 * the log, then we look for occurrences of last_half_cycle - 1
608 * at the end of the log. The cases we're looking for look
609 * like
610 * v binary search stopped here
611 * x + 1 ... | x | x + 1 | x ... | x
612 * ^ but we want to locate this spot
613 * or
614 * <---------> less than scan distance
615 * x + 1 ... | x ... | x - 1 | x
616 * ^ we want to locate this spot
617 */
618 stop_on_cycle = last_half_cycle;
619 error = xlog_find_cycle_start(log, buffer, first_blk, &head_blk,
620 last_half_cycle);
621 if (error)
622 goto out_free_buffer;
623 }
624
625 /*
626 * Now validate the answer. Scan back some number of maximum possible
627 * blocks and make sure each one has the expected cycle number. The
628 * maximum is determined by the total possible amount of buffering
629 * in the in-core log. The following number can be made tighter if
630 * we actually look at the block size of the filesystem.
631 */
632 num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log));
633 if (head_blk >= num_scan_bblks) {
634 /*
635 * We are guaranteed that the entire check can be performed
636 * in one buffer.
637 */
638 start_blk = head_blk - num_scan_bblks;
639 if ((error = xlog_find_verify_cycle(log,
640 start_blk, num_scan_bblks,
641 stop_on_cycle, &new_blk)))
642 goto out_free_buffer;
643 if (new_blk != -1)
644 head_blk = new_blk;
645 } else { /* need to read 2 parts of log */
646 /*
647 * We are going to scan backwards in the log in two parts.
648 * First we scan the physical end of the log. In this part
649 * of the log, we are looking for blocks with cycle number
650 * last_half_cycle - 1.
651 * If we find one, then we know that the log starts there, as
652 * we've found a hole that didn't get written in going around
653 * the end of the physical log. The simple case for this is
654 * x + 1 ... | x ... | x - 1 | x
655 * <---------> less than scan distance
656 * If all of the blocks at the end of the log have cycle number
657 * last_half_cycle, then we check the blocks at the start of
658 * the log looking for occurrences of last_half_cycle. If we
659 * find one, then our current estimate for the location of the
660 * first occurrence of last_half_cycle is wrong and we move
661 * back to the hole we've found. This case looks like
662 * x + 1 ... | x | x + 1 | x ...
663 * ^ binary search stopped here
664 * Another case we need to handle that only occurs in 256k
665 * logs is
666 * x + 1 ... | x ... | x+1 | x ...
667 * ^ binary search stops here
668 * In a 256k log, the scan at the end of the log will see the
669 * x + 1 blocks. We need to skip past those since that is
670 * certainly not the head of the log. By searching for
671 * last_half_cycle-1 we accomplish that.
672 */
673 ASSERT(head_blk <= INT_MAX &&
674 (xfs_daddr_t) num_scan_bblks >= head_blk);
675 start_blk = log_bbnum - (num_scan_bblks - head_blk);
676 if ((error = xlog_find_verify_cycle(log, start_blk,
677 num_scan_bblks - (int)head_blk,
678 (stop_on_cycle - 1), &new_blk)))
679 goto out_free_buffer;
680 if (new_blk != -1) {
681 head_blk = new_blk;
682 goto validate_head;
683 }
684
685 /*
686 * Scan beginning of log now. The last part of the physical
687 * log is good. This scan needs to verify that it doesn't find
688 * the last_half_cycle.
689 */
690 start_blk = 0;
691 ASSERT(head_blk <= INT_MAX);
692 if ((error = xlog_find_verify_cycle(log,
693 start_blk, (int)head_blk,
694 stop_on_cycle, &new_blk)))
695 goto out_free_buffer;
696 if (new_blk != -1)
697 head_blk = new_blk;
698 }
699
700 validate_head:
701 /*
702 * Now we need to make sure head_blk is not pointing to a block in
703 * the middle of a log record.
704 */
705 num_scan_bblks = XLOG_REC_SHIFT(log);
706 if (head_blk >= num_scan_bblks) {
707 start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
708
709 /* start ptr at last block ptr before head_blk */
710 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
711 if (error == 1)
712 error = -EIO;
713 if (error)
714 goto out_free_buffer;
715 } else {
716 start_blk = 0;
717 ASSERT(head_blk <= INT_MAX);
718 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
719 if (error < 0)
720 goto out_free_buffer;
721 if (error == 1) {
722 /* We hit the beginning of the log during our search */
723 start_blk = log_bbnum - (num_scan_bblks - head_blk);
724 new_blk = log_bbnum;
725 ASSERT(start_blk <= INT_MAX &&
726 (xfs_daddr_t) log_bbnum-start_blk >= 0);
727 ASSERT(head_blk <= INT_MAX);
728 error = xlog_find_verify_log_record(log, start_blk,
729 &new_blk, (int)head_blk);
730 if (error == 1)
731 error = -EIO;
732 if (error)
733 goto out_free_buffer;
734 if (new_blk != log_bbnum)
735 head_blk = new_blk;
736 } else if (error)
737 goto out_free_buffer;
738 }
739
740 kmem_free(buffer);
741 if (head_blk == log_bbnum)
742 *return_head_blk = 0;
743 else
744 *return_head_blk = head_blk;
745 /*
746 * When returning here, we have a good block number. Bad block
747 * means that during a previous crash, we didn't have a clean break
748 * from cycle number N to cycle number N-1. In this case, we need
749 * to find the first block with cycle number N-1.
750 */
751 return 0;
752
753 out_free_buffer:
754 kmem_free(buffer);
755 if (error)
756 xfs_warn(log->l_mp, "failed to find log head");
757 return error;
758 }
759
760 /*
761 * Seek backwards in the log for log record headers.
762 *
763 * Given a starting log block, walk backwards until we find the provided number
764 * of records or hit the provided tail block. The return value is the number of
765 * records encountered or a negative error code. The log block and buffer
766 * pointer of the last record seen are returned in rblk and rhead respectively.
767 */
768 STATIC int
xlog_rseek_logrec_hdr(struct xlog * log,xfs_daddr_t head_blk,xfs_daddr_t tail_blk,int count,char * buffer,xfs_daddr_t * rblk,struct xlog_rec_header ** rhead,bool * wrapped)769 xlog_rseek_logrec_hdr(
770 struct xlog *log,
771 xfs_daddr_t head_blk,
772 xfs_daddr_t tail_blk,
773 int count,
774 char *buffer,
775 xfs_daddr_t *rblk,
776 struct xlog_rec_header **rhead,
777 bool *wrapped)
778 {
779 int i;
780 int error;
781 int found = 0;
782 char *offset = NULL;
783 xfs_daddr_t end_blk;
784
785 *wrapped = false;
786
787 /*
788 * Walk backwards from the head block until we hit the tail or the first
789 * block in the log.
790 */
791 end_blk = head_blk > tail_blk ? tail_blk : 0;
792 for (i = (int) head_blk - 1; i >= end_blk; i--) {
793 error = xlog_bread(log, i, 1, buffer, &offset);
794 if (error)
795 goto out_error;
796
797 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
798 *rblk = i;
799 *rhead = (struct xlog_rec_header *) offset;
800 if (++found == count)
801 break;
802 }
803 }
804
805 /*
806 * If we haven't hit the tail block or the log record header count,
807 * start looking again from the end of the physical log. Note that
808 * callers can pass head == tail if the tail is not yet known.
809 */
810 if (tail_blk >= head_blk && found != count) {
811 for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) {
812 error = xlog_bread(log, i, 1, buffer, &offset);
813 if (error)
814 goto out_error;
815
816 if (*(__be32 *)offset ==
817 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
818 *wrapped = true;
819 *rblk = i;
820 *rhead = (struct xlog_rec_header *) offset;
821 if (++found == count)
822 break;
823 }
824 }
825 }
826
827 return found;
828
829 out_error:
830 return error;
831 }
832
833 /*
834 * Seek forward in the log for log record headers.
835 *
836 * Given head and tail blocks, walk forward from the tail block until we find
837 * the provided number of records or hit the head block. The return value is the
838 * number of records encountered or a negative error code. The log block and
839 * buffer pointer of the last record seen are returned in rblk and rhead
840 * respectively.
841 */
842 STATIC int
xlog_seek_logrec_hdr(struct xlog * log,xfs_daddr_t head_blk,xfs_daddr_t tail_blk,int count,char * buffer,xfs_daddr_t * rblk,struct xlog_rec_header ** rhead,bool * wrapped)843 xlog_seek_logrec_hdr(
844 struct xlog *log,
845 xfs_daddr_t head_blk,
846 xfs_daddr_t tail_blk,
847 int count,
848 char *buffer,
849 xfs_daddr_t *rblk,
850 struct xlog_rec_header **rhead,
851 bool *wrapped)
852 {
853 int i;
854 int error;
855 int found = 0;
856 char *offset = NULL;
857 xfs_daddr_t end_blk;
858
859 *wrapped = false;
860
861 /*
862 * Walk forward from the tail block until we hit the head or the last
863 * block in the log.
864 */
865 end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1;
866 for (i = (int) tail_blk; i <= end_blk; i++) {
867 error = xlog_bread(log, i, 1, buffer, &offset);
868 if (error)
869 goto out_error;
870
871 if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
872 *rblk = i;
873 *rhead = (struct xlog_rec_header *) offset;
874 if (++found == count)
875 break;
876 }
877 }
878
879 /*
880 * If we haven't hit the head block or the log record header count,
881 * start looking again from the start of the physical log.
882 */
883 if (tail_blk > head_blk && found != count) {
884 for (i = 0; i < (int) head_blk; i++) {
885 error = xlog_bread(log, i, 1, buffer, &offset);
886 if (error)
887 goto out_error;
888
889 if (*(__be32 *)offset ==
890 cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
891 *wrapped = true;
892 *rblk = i;
893 *rhead = (struct xlog_rec_header *) offset;
894 if (++found == count)
895 break;
896 }
897 }
898 }
899
900 return found;
901
902 out_error:
903 return error;
904 }
905
906 /*
907 * Calculate distance from head to tail (i.e., unused space in the log).
908 */
909 static inline int
xlog_tail_distance(struct xlog * log,xfs_daddr_t head_blk,xfs_daddr_t tail_blk)910 xlog_tail_distance(
911 struct xlog *log,
912 xfs_daddr_t head_blk,
913 xfs_daddr_t tail_blk)
914 {
915 if (head_blk < tail_blk)
916 return tail_blk - head_blk;
917
918 return tail_blk + (log->l_logBBsize - head_blk);
919 }
920
921 /*
922 * Verify the log tail. This is particularly important when torn or incomplete
923 * writes have been detected near the front of the log and the head has been
924 * walked back accordingly.
925 *
926 * We also have to handle the case where the tail was pinned and the head
927 * blocked behind the tail right before a crash. If the tail had been pushed
928 * immediately prior to the crash and the subsequent checkpoint was only
929 * partially written, it's possible it overwrote the last referenced tail in the
930 * log with garbage. This is not a coherency problem because the tail must have
931 * been pushed before it can be overwritten, but appears as log corruption to
932 * recovery because we have no way to know the tail was updated if the
933 * subsequent checkpoint didn't write successfully.
934 *
935 * Therefore, CRC check the log from tail to head. If a failure occurs and the
936 * offending record is within max iclog bufs from the head, walk the tail
937 * forward and retry until a valid tail is found or corruption is detected out
938 * of the range of a possible overwrite.
939 */
940 STATIC int
xlog_verify_tail(struct xlog * log,xfs_daddr_t head_blk,xfs_daddr_t * tail_blk,int hsize)941 xlog_verify_tail(
942 struct xlog *log,
943 xfs_daddr_t head_blk,
944 xfs_daddr_t *tail_blk,
945 int hsize)
946 {
947 struct xlog_rec_header *thead;
948 char *buffer;
949 xfs_daddr_t first_bad;
950 int error = 0;
951 bool wrapped;
952 xfs_daddr_t tmp_tail;
953 xfs_daddr_t orig_tail = *tail_blk;
954
955 buffer = xlog_alloc_buffer(log, 1);
956 if (!buffer)
957 return -ENOMEM;
958
959 /*
960 * Make sure the tail points to a record (returns positive count on
961 * success).
962 */
963 error = xlog_seek_logrec_hdr(log, head_blk, *tail_blk, 1, buffer,
964 &tmp_tail, &thead, &wrapped);
965 if (error < 0)
966 goto out;
967 if (*tail_blk != tmp_tail)
968 *tail_blk = tmp_tail;
969
970 /*
971 * Run a CRC check from the tail to the head. We can't just check
972 * MAX_ICLOGS records past the tail because the tail may point to stale
973 * blocks cleared during the search for the head/tail. These blocks are
974 * overwritten with zero-length records and thus record count is not a
975 * reliable indicator of the iclog state before a crash.
976 */
977 first_bad = 0;
978 error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
979 XLOG_RECOVER_CRCPASS, &first_bad);
980 while ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
981 int tail_distance;
982
983 /*
984 * Is corruption within range of the head? If so, retry from
985 * the next record. Otherwise return an error.
986 */
987 tail_distance = xlog_tail_distance(log, head_blk, first_bad);
988 if (tail_distance > BTOBB(XLOG_MAX_ICLOGS * hsize))
989 break;
990
991 /* skip to the next record; returns positive count on success */
992 error = xlog_seek_logrec_hdr(log, head_blk, first_bad, 2,
993 buffer, &tmp_tail, &thead, &wrapped);
994 if (error < 0)
995 goto out;
996
997 *tail_blk = tmp_tail;
998 first_bad = 0;
999 error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
1000 XLOG_RECOVER_CRCPASS, &first_bad);
1001 }
1002
1003 if (!error && *tail_blk != orig_tail)
1004 xfs_warn(log->l_mp,
1005 "Tail block (0x%llx) overwrite detected. Updated to 0x%llx",
1006 orig_tail, *tail_blk);
1007 out:
1008 kmem_free(buffer);
1009 return error;
1010 }
1011
1012 /*
1013 * Detect and trim torn writes from the head of the log.
1014 *
1015 * Storage without sector atomicity guarantees can result in torn writes in the
1016 * log in the event of a crash. Our only means to detect this scenario is via
1017 * CRC verification. While we can't always be certain that CRC verification
1018 * failure is due to a torn write vs. an unrelated corruption, we do know that
1019 * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at
1020 * one time. Therefore, CRC verify up to XLOG_MAX_ICLOGS records at the head of
1021 * the log and treat failures in this range as torn writes as a matter of
1022 * policy. In the event of CRC failure, the head is walked back to the last good
1023 * record in the log and the tail is updated from that record and verified.
1024 */
1025 STATIC int
xlog_verify_head(struct xlog * log,xfs_daddr_t * head_blk,xfs_daddr_t * tail_blk,char * buffer,xfs_daddr_t * rhead_blk,struct xlog_rec_header ** rhead,bool * wrapped)1026 xlog_verify_head(
1027 struct xlog *log,
1028 xfs_daddr_t *head_blk, /* in/out: unverified head */
1029 xfs_daddr_t *tail_blk, /* out: tail block */
1030 char *buffer,
1031 xfs_daddr_t *rhead_blk, /* start blk of last record */
1032 struct xlog_rec_header **rhead, /* ptr to last record */
1033 bool *wrapped) /* last rec. wraps phys. log */
1034 {
1035 struct xlog_rec_header *tmp_rhead;
1036 char *tmp_buffer;
1037 xfs_daddr_t first_bad;
1038 xfs_daddr_t tmp_rhead_blk;
1039 int found;
1040 int error;
1041 bool tmp_wrapped;
1042
1043 /*
1044 * Check the head of the log for torn writes. Search backwards from the
1045 * head until we hit the tail or the maximum number of log record I/Os
1046 * that could have been in flight at one time. Use a temporary buffer so
1047 * we don't trash the rhead/buffer pointers from the caller.
1048 */
1049 tmp_buffer = xlog_alloc_buffer(log, 1);
1050 if (!tmp_buffer)
1051 return -ENOMEM;
1052 error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk,
1053 XLOG_MAX_ICLOGS, tmp_buffer,
1054 &tmp_rhead_blk, &tmp_rhead, &tmp_wrapped);
1055 kmem_free(tmp_buffer);
1056 if (error < 0)
1057 return error;
1058
1059 /*
1060 * Now run a CRC verification pass over the records starting at the
1061 * block found above to the current head. If a CRC failure occurs, the
1062 * log block of the first bad record is saved in first_bad.
1063 */
1064 error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk,
1065 XLOG_RECOVER_CRCPASS, &first_bad);
1066 if ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
1067 /*
1068 * We've hit a potential torn write. Reset the error and warn
1069 * about it.
1070 */
1071 error = 0;
1072 xfs_warn(log->l_mp,
1073 "Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.",
1074 first_bad, *head_blk);
1075
1076 /*
1077 * Get the header block and buffer pointer for the last good
1078 * record before the bad record.
1079 *
1080 * Note that xlog_find_tail() clears the blocks at the new head
1081 * (i.e., the records with invalid CRC) if the cycle number
1082 * matches the current cycle.
1083 */
1084 found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1,
1085 buffer, rhead_blk, rhead, wrapped);
1086 if (found < 0)
1087 return found;
1088 if (found == 0) /* XXX: right thing to do here? */
1089 return -EIO;
1090
1091 /*
1092 * Reset the head block to the starting block of the first bad
1093 * log record and set the tail block based on the last good
1094 * record.
1095 *
1096 * Bail out if the updated head/tail match as this indicates
1097 * possible corruption outside of the acceptable
1098 * (XLOG_MAX_ICLOGS) range. This is a job for xfs_repair...
1099 */
1100 *head_blk = first_bad;
1101 *tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn));
1102 if (*head_blk == *tail_blk) {
1103 ASSERT(0);
1104 return 0;
1105 }
1106 }
1107 if (error)
1108 return error;
1109
1110 return xlog_verify_tail(log, *head_blk, tail_blk,
1111 be32_to_cpu((*rhead)->h_size));
1112 }
1113
1114 /*
1115 * We need to make sure we handle log wrapping properly, so we can't use the
1116 * calculated logbno directly. Make sure it wraps to the correct bno inside the
1117 * log.
1118 *
1119 * The log is limited to 32 bit sizes, so we use the appropriate modulus
1120 * operation here and cast it back to a 64 bit daddr on return.
1121 */
1122 static inline xfs_daddr_t
xlog_wrap_logbno(struct xlog * log,xfs_daddr_t bno)1123 xlog_wrap_logbno(
1124 struct xlog *log,
1125 xfs_daddr_t bno)
1126 {
1127 int mod;
1128
1129 div_s64_rem(bno, log->l_logBBsize, &mod);
1130 return mod;
1131 }
1132
1133 /*
1134 * Check whether the head of the log points to an unmount record. In other
1135 * words, determine whether the log is clean. If so, update the in-core state
1136 * appropriately.
1137 */
1138 static int
xlog_check_unmount_rec(struct xlog * log,xfs_daddr_t * head_blk,xfs_daddr_t * tail_blk,struct xlog_rec_header * rhead,xfs_daddr_t rhead_blk,char * buffer,bool * clean)1139 xlog_check_unmount_rec(
1140 struct xlog *log,
1141 xfs_daddr_t *head_blk,
1142 xfs_daddr_t *tail_blk,
1143 struct xlog_rec_header *rhead,
1144 xfs_daddr_t rhead_blk,
1145 char *buffer,
1146 bool *clean)
1147 {
1148 struct xlog_op_header *op_head;
1149 xfs_daddr_t umount_data_blk;
1150 xfs_daddr_t after_umount_blk;
1151 int hblks;
1152 int error;
1153 char *offset;
1154
1155 *clean = false;
1156
1157 /*
1158 * Look for unmount record. If we find it, then we know there was a
1159 * clean unmount. Since 'i' could be the last block in the physical
1160 * log, we convert to a log block before comparing to the head_blk.
1161 *
1162 * Save the current tail lsn to use to pass to xlog_clear_stale_blocks()
1163 * below. We won't want to clear the unmount record if there is one, so
1164 * we pass the lsn of the unmount record rather than the block after it.
1165 */
1166 hblks = xlog_logrec_hblks(log, rhead);
1167 after_umount_blk = xlog_wrap_logbno(log,
1168 rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len)));
1169
1170 if (*head_blk == after_umount_blk &&
1171 be32_to_cpu(rhead->h_num_logops) == 1) {
1172 umount_data_blk = xlog_wrap_logbno(log, rhead_blk + hblks);
1173 error = xlog_bread(log, umount_data_blk, 1, buffer, &offset);
1174 if (error)
1175 return error;
1176
1177 op_head = (struct xlog_op_header *)offset;
1178 if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1179 /*
1180 * Set tail and last sync so that newly written log
1181 * records will point recovery to after the current
1182 * unmount record.
1183 */
1184 xlog_assign_atomic_lsn(&log->l_tail_lsn,
1185 log->l_curr_cycle, after_umount_blk);
1186 xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1187 log->l_curr_cycle, after_umount_blk);
1188 *tail_blk = after_umount_blk;
1189
1190 *clean = true;
1191 }
1192 }
1193
1194 return 0;
1195 }
1196
1197 static void
xlog_set_state(struct xlog * log,xfs_daddr_t head_blk,struct xlog_rec_header * rhead,xfs_daddr_t rhead_blk,bool bump_cycle)1198 xlog_set_state(
1199 struct xlog *log,
1200 xfs_daddr_t head_blk,
1201 struct xlog_rec_header *rhead,
1202 xfs_daddr_t rhead_blk,
1203 bool bump_cycle)
1204 {
1205 /*
1206 * Reset log values according to the state of the log when we
1207 * crashed. In the case where head_blk == 0, we bump curr_cycle
1208 * one because the next write starts a new cycle rather than
1209 * continuing the cycle of the last good log record. At this
1210 * point we have guaranteed that all partial log records have been
1211 * accounted for. Therefore, we know that the last good log record
1212 * written was complete and ended exactly on the end boundary
1213 * of the physical log.
1214 */
1215 log->l_prev_block = rhead_blk;
1216 log->l_curr_block = (int)head_blk;
1217 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
1218 if (bump_cycle)
1219 log->l_curr_cycle++;
1220 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
1221 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
1222 xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
1223 BBTOB(log->l_curr_block));
1224 xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
1225 BBTOB(log->l_curr_block));
1226 }
1227
1228 /*
1229 * Find the sync block number or the tail of the log.
1230 *
1231 * This will be the block number of the last record to have its
1232 * associated buffers synced to disk. Every log record header has
1233 * a sync lsn embedded in it. LSNs hold block numbers, so it is easy
1234 * to get a sync block number. The only concern is to figure out which
1235 * log record header to believe.
1236 *
1237 * The following algorithm uses the log record header with the largest
1238 * lsn. The entire log record does not need to be valid. We only care
1239 * that the header is valid.
1240 *
1241 * We could speed up search by using current head_blk buffer, but it is not
1242 * available.
1243 */
1244 STATIC int
xlog_find_tail(struct xlog * log,xfs_daddr_t * head_blk,xfs_daddr_t * tail_blk)1245 xlog_find_tail(
1246 struct xlog *log,
1247 xfs_daddr_t *head_blk,
1248 xfs_daddr_t *tail_blk)
1249 {
1250 xlog_rec_header_t *rhead;
1251 char *offset = NULL;
1252 char *buffer;
1253 int error;
1254 xfs_daddr_t rhead_blk;
1255 xfs_lsn_t tail_lsn;
1256 bool wrapped = false;
1257 bool clean = false;
1258
1259 /*
1260 * Find previous log record
1261 */
1262 if ((error = xlog_find_head(log, head_blk)))
1263 return error;
1264 ASSERT(*head_blk < INT_MAX);
1265
1266 buffer = xlog_alloc_buffer(log, 1);
1267 if (!buffer)
1268 return -ENOMEM;
1269 if (*head_blk == 0) { /* special case */
1270 error = xlog_bread(log, 0, 1, buffer, &offset);
1271 if (error)
1272 goto done;
1273
1274 if (xlog_get_cycle(offset) == 0) {
1275 *tail_blk = 0;
1276 /* leave all other log inited values alone */
1277 goto done;
1278 }
1279 }
1280
1281 /*
1282 * Search backwards through the log looking for the log record header
1283 * block. This wraps all the way back around to the head so something is
1284 * seriously wrong if we can't find it.
1285 */
1286 error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, buffer,
1287 &rhead_blk, &rhead, &wrapped);
1288 if (error < 0)
1289 goto done;
1290 if (!error) {
1291 xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
1292 error = -EFSCORRUPTED;
1293 goto done;
1294 }
1295 *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
1296
1297 /*
1298 * Set the log state based on the current head record.
1299 */
1300 xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
1301 tail_lsn = atomic64_read(&log->l_tail_lsn);
1302
1303 /*
1304 * Look for an unmount record at the head of the log. This sets the log
1305 * state to determine whether recovery is necessary.
1306 */
1307 error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead,
1308 rhead_blk, buffer, &clean);
1309 if (error)
1310 goto done;
1311
1312 /*
1313 * Verify the log head if the log is not clean (e.g., we have anything
1314 * but an unmount record at the head). This uses CRC verification to
1315 * detect and trim torn writes. If discovered, CRC failures are
1316 * considered torn writes and the log head is trimmed accordingly.
1317 *
1318 * Note that we can only run CRC verification when the log is dirty
1319 * because there's no guarantee that the log data behind an unmount
1320 * record is compatible with the current architecture.
1321 */
1322 if (!clean) {
1323 xfs_daddr_t orig_head = *head_blk;
1324
1325 error = xlog_verify_head(log, head_blk, tail_blk, buffer,
1326 &rhead_blk, &rhead, &wrapped);
1327 if (error)
1328 goto done;
1329
1330 /* update in-core state again if the head changed */
1331 if (*head_blk != orig_head) {
1332 xlog_set_state(log, *head_blk, rhead, rhead_blk,
1333 wrapped);
1334 tail_lsn = atomic64_read(&log->l_tail_lsn);
1335 error = xlog_check_unmount_rec(log, head_blk, tail_blk,
1336 rhead, rhead_blk, buffer,
1337 &clean);
1338 if (error)
1339 goto done;
1340 }
1341 }
1342
1343 /*
1344 * Note that the unmount was clean. If the unmount was not clean, we
1345 * need to know this to rebuild the superblock counters from the perag
1346 * headers if we have a filesystem using non-persistent counters.
1347 */
1348 if (clean)
1349 log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
1350
1351 /*
1352 * Make sure that there are no blocks in front of the head
1353 * with the same cycle number as the head. This can happen
1354 * because we allow multiple outstanding log writes concurrently,
1355 * and the later writes might make it out before earlier ones.
1356 *
1357 * We use the lsn from before modifying it so that we'll never
1358 * overwrite the unmount record after a clean unmount.
1359 *
1360 * Do this only if we are going to recover the filesystem
1361 *
1362 * NOTE: This used to say "if (!readonly)"
1363 * However on Linux, we can & do recover a read-only filesystem.
1364 * We only skip recovery if NORECOVERY is specified on mount,
1365 * in which case we would not be here.
1366 *
1367 * But... if the -device- itself is readonly, just skip this.
1368 * We can't recover this device anyway, so it won't matter.
1369 */
1370 if (!xfs_readonly_buftarg(log->l_targ))
1371 error = xlog_clear_stale_blocks(log, tail_lsn);
1372
1373 done:
1374 kmem_free(buffer);
1375
1376 if (error)
1377 xfs_warn(log->l_mp, "failed to locate log tail");
1378 return error;
1379 }
1380
1381 /*
1382 * Is the log zeroed at all?
1383 *
1384 * The last binary search should be changed to perform an X block read
1385 * once X becomes small enough. You can then search linearly through
1386 * the X blocks. This will cut down on the number of reads we need to do.
1387 *
1388 * If the log is partially zeroed, this routine will pass back the blkno
1389 * of the first block with cycle number 0. It won't have a complete LR
1390 * preceding it.
1391 *
1392 * Return:
1393 * 0 => the log is completely written to
1394 * 1 => use *blk_no as the first block of the log
1395 * <0 => error has occurred
1396 */
1397 STATIC int
xlog_find_zeroed(struct xlog * log,xfs_daddr_t * blk_no)1398 xlog_find_zeroed(
1399 struct xlog *log,
1400 xfs_daddr_t *blk_no)
1401 {
1402 char *buffer;
1403 char *offset;
1404 uint first_cycle, last_cycle;
1405 xfs_daddr_t new_blk, last_blk, start_blk;
1406 xfs_daddr_t num_scan_bblks;
1407 int error, log_bbnum = log->l_logBBsize;
1408
1409 *blk_no = 0;
1410
1411 /* check totally zeroed log */
1412 buffer = xlog_alloc_buffer(log, 1);
1413 if (!buffer)
1414 return -ENOMEM;
1415 error = xlog_bread(log, 0, 1, buffer, &offset);
1416 if (error)
1417 goto out_free_buffer;
1418
1419 first_cycle = xlog_get_cycle(offset);
1420 if (first_cycle == 0) { /* completely zeroed log */
1421 *blk_no = 0;
1422 kmem_free(buffer);
1423 return 1;
1424 }
1425
1426 /* check partially zeroed log */
1427 error = xlog_bread(log, log_bbnum-1, 1, buffer, &offset);
1428 if (error)
1429 goto out_free_buffer;
1430
1431 last_cycle = xlog_get_cycle(offset);
1432 if (last_cycle != 0) { /* log completely written to */
1433 kmem_free(buffer);
1434 return 0;
1435 }
1436
1437 /* we have a partially zeroed log */
1438 last_blk = log_bbnum-1;
1439 error = xlog_find_cycle_start(log, buffer, 0, &last_blk, 0);
1440 if (error)
1441 goto out_free_buffer;
1442
1443 /*
1444 * Validate the answer. Because there is no way to guarantee that
1445 * the entire log is made up of log records which are the same size,
1446 * we scan over the defined maximum blocks. At this point, the maximum
1447 * is not chosen to mean anything special. XXXmiken
1448 */
1449 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1450 ASSERT(num_scan_bblks <= INT_MAX);
1451
1452 if (last_blk < num_scan_bblks)
1453 num_scan_bblks = last_blk;
1454 start_blk = last_blk - num_scan_bblks;
1455
1456 /*
1457 * We search for any instances of cycle number 0 that occur before
1458 * our current estimate of the head. What we're trying to detect is
1459 * 1 ... | 0 | 1 | 0...
1460 * ^ binary search ends here
1461 */
1462 if ((error = xlog_find_verify_cycle(log, start_blk,
1463 (int)num_scan_bblks, 0, &new_blk)))
1464 goto out_free_buffer;
1465 if (new_blk != -1)
1466 last_blk = new_blk;
1467
1468 /*
1469 * Potentially backup over partial log record write. We don't need
1470 * to search the end of the log because we know it is zero.
1471 */
1472 error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0);
1473 if (error == 1)
1474 error = -EIO;
1475 if (error)
1476 goto out_free_buffer;
1477
1478 *blk_no = last_blk;
1479 out_free_buffer:
1480 kmem_free(buffer);
1481 if (error)
1482 return error;
1483 return 1;
1484 }
1485
1486 /*
1487 * These are simple subroutines used by xlog_clear_stale_blocks() below
1488 * to initialize a buffer full of empty log record headers and write
1489 * them into the log.
1490 */
1491 STATIC void
xlog_add_record(struct xlog * log,char * buf,int cycle,int block,int tail_cycle,int tail_block)1492 xlog_add_record(
1493 struct xlog *log,
1494 char *buf,
1495 int cycle,
1496 int block,
1497 int tail_cycle,
1498 int tail_block)
1499 {
1500 xlog_rec_header_t *recp = (xlog_rec_header_t *)buf;
1501
1502 memset(buf, 0, BBSIZE);
1503 recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1504 recp->h_cycle = cpu_to_be32(cycle);
1505 recp->h_version = cpu_to_be32(
1506 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1507 recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1508 recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1509 recp->h_fmt = cpu_to_be32(XLOG_FMT);
1510 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1511 }
1512
1513 STATIC int
xlog_write_log_records(struct xlog * log,int cycle,int start_block,int blocks,int tail_cycle,int tail_block)1514 xlog_write_log_records(
1515 struct xlog *log,
1516 int cycle,
1517 int start_block,
1518 int blocks,
1519 int tail_cycle,
1520 int tail_block)
1521 {
1522 char *offset;
1523 char *buffer;
1524 int balign, ealign;
1525 int sectbb = log->l_sectBBsize;
1526 int end_block = start_block + blocks;
1527 int bufblks;
1528 int error = 0;
1529 int i, j = 0;
1530
1531 /*
1532 * Greedily allocate a buffer big enough to handle the full
1533 * range of basic blocks to be written. If that fails, try
1534 * a smaller size. We need to be able to write at least a
1535 * log sector, or we're out of luck.
1536 */
1537 bufblks = 1 << ffs(blocks);
1538 while (bufblks > log->l_logBBsize)
1539 bufblks >>= 1;
1540 while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
1541 bufblks >>= 1;
1542 if (bufblks < sectbb)
1543 return -ENOMEM;
1544 }
1545
1546 /* We may need to do a read at the start to fill in part of
1547 * the buffer in the starting sector not covered by the first
1548 * write below.
1549 */
1550 balign = round_down(start_block, sectbb);
1551 if (balign != start_block) {
1552 error = xlog_bread_noalign(log, start_block, 1, buffer);
1553 if (error)
1554 goto out_free_buffer;
1555
1556 j = start_block - balign;
1557 }
1558
1559 for (i = start_block; i < end_block; i += bufblks) {
1560 int bcount, endcount;
1561
1562 bcount = min(bufblks, end_block - start_block);
1563 endcount = bcount - j;
1564
1565 /* We may need to do a read at the end to fill in part of
1566 * the buffer in the final sector not covered by the write.
1567 * If this is the same sector as the above read, skip it.
1568 */
1569 ealign = round_down(end_block, sectbb);
1570 if (j == 0 && (start_block + endcount > ealign)) {
1571 error = xlog_bread_noalign(log, ealign, sectbb,
1572 buffer + BBTOB(ealign - start_block));
1573 if (error)
1574 break;
1575
1576 }
1577
1578 offset = buffer + xlog_align(log, start_block);
1579 for (; j < endcount; j++) {
1580 xlog_add_record(log, offset, cycle, i+j,
1581 tail_cycle, tail_block);
1582 offset += BBSIZE;
1583 }
1584 error = xlog_bwrite(log, start_block, endcount, buffer);
1585 if (error)
1586 break;
1587 start_block += endcount;
1588 j = 0;
1589 }
1590
1591 out_free_buffer:
1592 kmem_free(buffer);
1593 return error;
1594 }
1595
1596 /*
1597 * This routine is called to blow away any incomplete log writes out
1598 * in front of the log head. We do this so that we won't become confused
1599 * if we come up, write only a little bit more, and then crash again.
1600 * If we leave the partial log records out there, this situation could
1601 * cause us to think those partial writes are valid blocks since they
1602 * have the current cycle number. We get rid of them by overwriting them
1603 * with empty log records with the old cycle number rather than the
1604 * current one.
1605 *
1606 * The tail lsn is passed in rather than taken from
1607 * the log so that we will not write over the unmount record after a
1608 * clean unmount in a 512 block log. Doing so would leave the log without
1609 * any valid log records in it until a new one was written. If we crashed
1610 * during that time we would not be able to recover.
1611 */
1612 STATIC int
xlog_clear_stale_blocks(struct xlog * log,xfs_lsn_t tail_lsn)1613 xlog_clear_stale_blocks(
1614 struct xlog *log,
1615 xfs_lsn_t tail_lsn)
1616 {
1617 int tail_cycle, head_cycle;
1618 int tail_block, head_block;
1619 int tail_distance, max_distance;
1620 int distance;
1621 int error;
1622
1623 tail_cycle = CYCLE_LSN(tail_lsn);
1624 tail_block = BLOCK_LSN(tail_lsn);
1625 head_cycle = log->l_curr_cycle;
1626 head_block = log->l_curr_block;
1627
1628 /*
1629 * Figure out the distance between the new head of the log
1630 * and the tail. We want to write over any blocks beyond the
1631 * head that we may have written just before the crash, but
1632 * we don't want to overwrite the tail of the log.
1633 */
1634 if (head_cycle == tail_cycle) {
1635 /*
1636 * The tail is behind the head in the physical log,
1637 * so the distance from the head to the tail is the
1638 * distance from the head to the end of the log plus
1639 * the distance from the beginning of the log to the
1640 * tail.
1641 */
1642 if (XFS_IS_CORRUPT(log->l_mp,
1643 head_block < tail_block ||
1644 head_block >= log->l_logBBsize))
1645 return -EFSCORRUPTED;
1646 tail_distance = tail_block + (log->l_logBBsize - head_block);
1647 } else {
1648 /*
1649 * The head is behind the tail in the physical log,
1650 * so the distance from the head to the tail is just
1651 * the tail block minus the head block.
1652 */
1653 if (XFS_IS_CORRUPT(log->l_mp,
1654 head_block >= tail_block ||
1655 head_cycle != tail_cycle + 1))
1656 return -EFSCORRUPTED;
1657 tail_distance = tail_block - head_block;
1658 }
1659
1660 /*
1661 * If the head is right up against the tail, we can't clear
1662 * anything.
1663 */
1664 if (tail_distance <= 0) {
1665 ASSERT(tail_distance == 0);
1666 return 0;
1667 }
1668
1669 max_distance = XLOG_TOTAL_REC_SHIFT(log);
1670 /*
1671 * Take the smaller of the maximum amount of outstanding I/O
1672 * we could have and the distance to the tail to clear out.
1673 * We take the smaller so that we don't overwrite the tail and
1674 * we don't waste all day writing from the head to the tail
1675 * for no reason.
1676 */
1677 max_distance = min(max_distance, tail_distance);
1678
1679 if ((head_block + max_distance) <= log->l_logBBsize) {
1680 /*
1681 * We can stomp all the blocks we need to without
1682 * wrapping around the end of the log. Just do it
1683 * in a single write. Use the cycle number of the
1684 * current cycle minus one so that the log will look like:
1685 * n ... | n - 1 ...
1686 */
1687 error = xlog_write_log_records(log, (head_cycle - 1),
1688 head_block, max_distance, tail_cycle,
1689 tail_block);
1690 if (error)
1691 return error;
1692 } else {
1693 /*
1694 * We need to wrap around the end of the physical log in
1695 * order to clear all the blocks. Do it in two separate
1696 * I/Os. The first write should be from the head to the
1697 * end of the physical log, and it should use the current
1698 * cycle number minus one just like above.
1699 */
1700 distance = log->l_logBBsize - head_block;
1701 error = xlog_write_log_records(log, (head_cycle - 1),
1702 head_block, distance, tail_cycle,
1703 tail_block);
1704
1705 if (error)
1706 return error;
1707
1708 /*
1709 * Now write the blocks at the start of the physical log.
1710 * This writes the remainder of the blocks we want to clear.
1711 * It uses the current cycle number since we're now on the
1712 * same cycle as the head so that we get:
1713 * n ... n ... | n - 1 ...
1714 * ^^^^^ blocks we're writing
1715 */
1716 distance = max_distance - (log->l_logBBsize - head_block);
1717 error = xlog_write_log_records(log, head_cycle, 0, distance,
1718 tail_cycle, tail_block);
1719 if (error)
1720 return error;
1721 }
1722
1723 return 0;
1724 }
1725
1726 /*
1727 * Release the recovered intent item in the AIL that matches the given intent
1728 * type and intent id.
1729 */
1730 void
xlog_recover_release_intent(struct xlog * log,unsigned short intent_type,uint64_t intent_id)1731 xlog_recover_release_intent(
1732 struct xlog *log,
1733 unsigned short intent_type,
1734 uint64_t intent_id)
1735 {
1736 struct xfs_ail_cursor cur;
1737 struct xfs_log_item *lip;
1738 struct xfs_ail *ailp = log->l_ailp;
1739
1740 spin_lock(&ailp->ail_lock);
1741 for (lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); lip != NULL;
1742 lip = xfs_trans_ail_cursor_next(ailp, &cur)) {
1743 if (lip->li_type != intent_type)
1744 continue;
1745 if (!lip->li_ops->iop_match(lip, intent_id))
1746 continue;
1747
1748 spin_unlock(&ailp->ail_lock);
1749 lip->li_ops->iop_release(lip);
1750 spin_lock(&ailp->ail_lock);
1751 break;
1752 }
1753
1754 xfs_trans_ail_cursor_done(&cur);
1755 spin_unlock(&ailp->ail_lock);
1756 }
1757
1758 /******************************************************************************
1759 *
1760 * Log recover routines
1761 *
1762 ******************************************************************************
1763 */
1764 static const struct xlog_recover_item_ops *xlog_recover_item_ops[] = {
1765 &xlog_buf_item_ops,
1766 &xlog_inode_item_ops,
1767 &xlog_dquot_item_ops,
1768 &xlog_quotaoff_item_ops,
1769 &xlog_icreate_item_ops,
1770 &xlog_efi_item_ops,
1771 &xlog_efd_item_ops,
1772 &xlog_rui_item_ops,
1773 &xlog_rud_item_ops,
1774 &xlog_cui_item_ops,
1775 &xlog_cud_item_ops,
1776 &xlog_bui_item_ops,
1777 &xlog_bud_item_ops,
1778 };
1779
1780 static const struct xlog_recover_item_ops *
xlog_find_item_ops(struct xlog_recover_item * item)1781 xlog_find_item_ops(
1782 struct xlog_recover_item *item)
1783 {
1784 unsigned int i;
1785
1786 for (i = 0; i < ARRAY_SIZE(xlog_recover_item_ops); i++)
1787 if (ITEM_TYPE(item) == xlog_recover_item_ops[i]->item_type)
1788 return xlog_recover_item_ops[i];
1789
1790 return NULL;
1791 }
1792
1793 /*
1794 * Sort the log items in the transaction.
1795 *
1796 * The ordering constraints are defined by the inode allocation and unlink
1797 * behaviour. The rules are:
1798 *
1799 * 1. Every item is only logged once in a given transaction. Hence it
1800 * represents the last logged state of the item. Hence ordering is
1801 * dependent on the order in which operations need to be performed so
1802 * required initial conditions are always met.
1803 *
1804 * 2. Cancelled buffers are recorded in pass 1 in a separate table and
1805 * there's nothing to replay from them so we can simply cull them
1806 * from the transaction. However, we can't do that until after we've
1807 * replayed all the other items because they may be dependent on the
1808 * cancelled buffer and replaying the cancelled buffer can remove it
1809 * form the cancelled buffer table. Hence they have tobe done last.
1810 *
1811 * 3. Inode allocation buffers must be replayed before inode items that
1812 * read the buffer and replay changes into it. For filesystems using the
1813 * ICREATE transactions, this means XFS_LI_ICREATE objects need to get
1814 * treated the same as inode allocation buffers as they create and
1815 * initialise the buffers directly.
1816 *
1817 * 4. Inode unlink buffers must be replayed after inode items are replayed.
1818 * This ensures that inodes are completely flushed to the inode buffer
1819 * in a "free" state before we remove the unlinked inode list pointer.
1820 *
1821 * Hence the ordering needs to be inode allocation buffers first, inode items
1822 * second, inode unlink buffers third and cancelled buffers last.
1823 *
1824 * But there's a problem with that - we can't tell an inode allocation buffer
1825 * apart from a regular buffer, so we can't separate them. We can, however,
1826 * tell an inode unlink buffer from the others, and so we can separate them out
1827 * from all the other buffers and move them to last.
1828 *
1829 * Hence, 4 lists, in order from head to tail:
1830 * - buffer_list for all buffers except cancelled/inode unlink buffers
1831 * - item_list for all non-buffer items
1832 * - inode_buffer_list for inode unlink buffers
1833 * - cancel_list for the cancelled buffers
1834 *
1835 * Note that we add objects to the tail of the lists so that first-to-last
1836 * ordering is preserved within the lists. Adding objects to the head of the
1837 * list means when we traverse from the head we walk them in last-to-first
1838 * order. For cancelled buffers and inode unlink buffers this doesn't matter,
1839 * but for all other items there may be specific ordering that we need to
1840 * preserve.
1841 */
1842 STATIC int
xlog_recover_reorder_trans(struct xlog * log,struct xlog_recover * trans,int pass)1843 xlog_recover_reorder_trans(
1844 struct xlog *log,
1845 struct xlog_recover *trans,
1846 int pass)
1847 {
1848 struct xlog_recover_item *item, *n;
1849 int error = 0;
1850 LIST_HEAD(sort_list);
1851 LIST_HEAD(cancel_list);
1852 LIST_HEAD(buffer_list);
1853 LIST_HEAD(inode_buffer_list);
1854 LIST_HEAD(item_list);
1855
1856 list_splice_init(&trans->r_itemq, &sort_list);
1857 list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1858 enum xlog_recover_reorder fate = XLOG_REORDER_ITEM_LIST;
1859
1860 item->ri_ops = xlog_find_item_ops(item);
1861 if (!item->ri_ops) {
1862 xfs_warn(log->l_mp,
1863 "%s: unrecognized type of log operation (%d)",
1864 __func__, ITEM_TYPE(item));
1865 ASSERT(0);
1866 /*
1867 * return the remaining items back to the transaction
1868 * item list so they can be freed in caller.
1869 */
1870 if (!list_empty(&sort_list))
1871 list_splice_init(&sort_list, &trans->r_itemq);
1872 error = -EFSCORRUPTED;
1873 break;
1874 }
1875
1876 if (item->ri_ops->reorder)
1877 fate = item->ri_ops->reorder(item);
1878
1879 switch (fate) {
1880 case XLOG_REORDER_BUFFER_LIST:
1881 list_move_tail(&item->ri_list, &buffer_list);
1882 break;
1883 case XLOG_REORDER_CANCEL_LIST:
1884 trace_xfs_log_recover_item_reorder_head(log,
1885 trans, item, pass);
1886 list_move(&item->ri_list, &cancel_list);
1887 break;
1888 case XLOG_REORDER_INODE_BUFFER_LIST:
1889 list_move(&item->ri_list, &inode_buffer_list);
1890 break;
1891 case XLOG_REORDER_ITEM_LIST:
1892 trace_xfs_log_recover_item_reorder_tail(log,
1893 trans, item, pass);
1894 list_move_tail(&item->ri_list, &item_list);
1895 break;
1896 }
1897 }
1898
1899 ASSERT(list_empty(&sort_list));
1900 if (!list_empty(&buffer_list))
1901 list_splice(&buffer_list, &trans->r_itemq);
1902 if (!list_empty(&item_list))
1903 list_splice_tail(&item_list, &trans->r_itemq);
1904 if (!list_empty(&inode_buffer_list))
1905 list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1906 if (!list_empty(&cancel_list))
1907 list_splice_tail(&cancel_list, &trans->r_itemq);
1908 return error;
1909 }
1910
1911 void
xlog_buf_readahead(struct xlog * log,xfs_daddr_t blkno,uint len,const struct xfs_buf_ops * ops)1912 xlog_buf_readahead(
1913 struct xlog *log,
1914 xfs_daddr_t blkno,
1915 uint len,
1916 const struct xfs_buf_ops *ops)
1917 {
1918 if (!xlog_is_buffer_cancelled(log, blkno, len))
1919 xfs_buf_readahead(log->l_mp->m_ddev_targp, blkno, len, ops);
1920 }
1921
1922 STATIC int
xlog_recover_items_pass2(struct xlog * log,struct xlog_recover * trans,struct list_head * buffer_list,struct list_head * item_list)1923 xlog_recover_items_pass2(
1924 struct xlog *log,
1925 struct xlog_recover *trans,
1926 struct list_head *buffer_list,
1927 struct list_head *item_list)
1928 {
1929 struct xlog_recover_item *item;
1930 int error = 0;
1931
1932 list_for_each_entry(item, item_list, ri_list) {
1933 trace_xfs_log_recover_item_recover(log, trans, item,
1934 XLOG_RECOVER_PASS2);
1935
1936 if (item->ri_ops->commit_pass2)
1937 error = item->ri_ops->commit_pass2(log, buffer_list,
1938 item, trans->r_lsn);
1939 if (error)
1940 return error;
1941 }
1942
1943 return error;
1944 }
1945
1946 /*
1947 * Perform the transaction.
1948 *
1949 * If the transaction modifies a buffer or inode, do it now. Otherwise,
1950 * EFIs and EFDs get queued up by adding entries into the AIL for them.
1951 */
1952 STATIC int
xlog_recover_commit_trans(struct xlog * log,struct xlog_recover * trans,int pass,struct list_head * buffer_list)1953 xlog_recover_commit_trans(
1954 struct xlog *log,
1955 struct xlog_recover *trans,
1956 int pass,
1957 struct list_head *buffer_list)
1958 {
1959 int error = 0;
1960 int items_queued = 0;
1961 struct xlog_recover_item *item;
1962 struct xlog_recover_item *next;
1963 LIST_HEAD (ra_list);
1964 LIST_HEAD (done_list);
1965
1966 #define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
1967
1968 hlist_del_init(&trans->r_list);
1969
1970 error = xlog_recover_reorder_trans(log, trans, pass);
1971 if (error)
1972 return error;
1973
1974 list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
1975 trace_xfs_log_recover_item_recover(log, trans, item, pass);
1976
1977 switch (pass) {
1978 case XLOG_RECOVER_PASS1:
1979 if (item->ri_ops->commit_pass1)
1980 error = item->ri_ops->commit_pass1(log, item);
1981 break;
1982 case XLOG_RECOVER_PASS2:
1983 if (item->ri_ops->ra_pass2)
1984 item->ri_ops->ra_pass2(log, item);
1985 list_move_tail(&item->ri_list, &ra_list);
1986 items_queued++;
1987 if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
1988 error = xlog_recover_items_pass2(log, trans,
1989 buffer_list, &ra_list);
1990 list_splice_tail_init(&ra_list, &done_list);
1991 items_queued = 0;
1992 }
1993
1994 break;
1995 default:
1996 ASSERT(0);
1997 }
1998
1999 if (error)
2000 goto out;
2001 }
2002
2003 out:
2004 if (!list_empty(&ra_list)) {
2005 if (!error)
2006 error = xlog_recover_items_pass2(log, trans,
2007 buffer_list, &ra_list);
2008 list_splice_tail_init(&ra_list, &done_list);
2009 }
2010
2011 if (!list_empty(&done_list))
2012 list_splice_init(&done_list, &trans->r_itemq);
2013
2014 return error;
2015 }
2016
2017 STATIC void
xlog_recover_add_item(struct list_head * head)2018 xlog_recover_add_item(
2019 struct list_head *head)
2020 {
2021 struct xlog_recover_item *item;
2022
2023 item = kmem_zalloc(sizeof(struct xlog_recover_item), 0);
2024 INIT_LIST_HEAD(&item->ri_list);
2025 list_add_tail(&item->ri_list, head);
2026 }
2027
2028 STATIC int
xlog_recover_add_to_cont_trans(struct xlog * log,struct xlog_recover * trans,char * dp,int len)2029 xlog_recover_add_to_cont_trans(
2030 struct xlog *log,
2031 struct xlog_recover *trans,
2032 char *dp,
2033 int len)
2034 {
2035 struct xlog_recover_item *item;
2036 char *ptr, *old_ptr;
2037 int old_len;
2038
2039 /*
2040 * If the transaction is empty, the header was split across this and the
2041 * previous record. Copy the rest of the header.
2042 */
2043 if (list_empty(&trans->r_itemq)) {
2044 ASSERT(len <= sizeof(struct xfs_trans_header));
2045 if (len > sizeof(struct xfs_trans_header)) {
2046 xfs_warn(log->l_mp, "%s: bad header length", __func__);
2047 return -EFSCORRUPTED;
2048 }
2049
2050 xlog_recover_add_item(&trans->r_itemq);
2051 ptr = (char *)&trans->r_theader +
2052 sizeof(struct xfs_trans_header) - len;
2053 memcpy(ptr, dp, len);
2054 return 0;
2055 }
2056
2057 /* take the tail entry */
2058 item = list_entry(trans->r_itemq.prev, struct xlog_recover_item,
2059 ri_list);
2060
2061 old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
2062 old_len = item->ri_buf[item->ri_cnt-1].i_len;
2063
2064 ptr = kvrealloc(old_ptr, old_len, len + old_len, GFP_KERNEL);
2065 if (!ptr)
2066 return -ENOMEM;
2067 memcpy(&ptr[old_len], dp, len);
2068 item->ri_buf[item->ri_cnt-1].i_len += len;
2069 item->ri_buf[item->ri_cnt-1].i_addr = ptr;
2070 trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
2071 return 0;
2072 }
2073
2074 /*
2075 * The next region to add is the start of a new region. It could be
2076 * a whole region or it could be the first part of a new region. Because
2077 * of this, the assumption here is that the type and size fields of all
2078 * format structures fit into the first 32 bits of the structure.
2079 *
2080 * This works because all regions must be 32 bit aligned. Therefore, we
2081 * either have both fields or we have neither field. In the case we have
2082 * neither field, the data part of the region is zero length. We only have
2083 * a log_op_header and can throw away the header since a new one will appear
2084 * later. If we have at least 4 bytes, then we can determine how many regions
2085 * will appear in the current log item.
2086 */
2087 STATIC int
xlog_recover_add_to_trans(struct xlog * log,struct xlog_recover * trans,char * dp,int len)2088 xlog_recover_add_to_trans(
2089 struct xlog *log,
2090 struct xlog_recover *trans,
2091 char *dp,
2092 int len)
2093 {
2094 struct xfs_inode_log_format *in_f; /* any will do */
2095 struct xlog_recover_item *item;
2096 char *ptr;
2097
2098 if (!len)
2099 return 0;
2100 if (list_empty(&trans->r_itemq)) {
2101 /* we need to catch log corruptions here */
2102 if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
2103 xfs_warn(log->l_mp, "%s: bad header magic number",
2104 __func__);
2105 ASSERT(0);
2106 return -EFSCORRUPTED;
2107 }
2108
2109 if (len > sizeof(struct xfs_trans_header)) {
2110 xfs_warn(log->l_mp, "%s: bad header length", __func__);
2111 ASSERT(0);
2112 return -EFSCORRUPTED;
2113 }
2114
2115 /*
2116 * The transaction header can be arbitrarily split across op
2117 * records. If we don't have the whole thing here, copy what we
2118 * do have and handle the rest in the next record.
2119 */
2120 if (len == sizeof(struct xfs_trans_header))
2121 xlog_recover_add_item(&trans->r_itemq);
2122 memcpy(&trans->r_theader, dp, len);
2123 return 0;
2124 }
2125
2126 ptr = kmem_alloc(len, 0);
2127 memcpy(ptr, dp, len);
2128 in_f = (struct xfs_inode_log_format *)ptr;
2129
2130 /* take the tail entry */
2131 item = list_entry(trans->r_itemq.prev, struct xlog_recover_item,
2132 ri_list);
2133 if (item->ri_total != 0 &&
2134 item->ri_total == item->ri_cnt) {
2135 /* tail item is in use, get a new one */
2136 xlog_recover_add_item(&trans->r_itemq);
2137 item = list_entry(trans->r_itemq.prev,
2138 struct xlog_recover_item, ri_list);
2139 }
2140
2141 if (item->ri_total == 0) { /* first region to be added */
2142 if (in_f->ilf_size == 0 ||
2143 in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
2144 xfs_warn(log->l_mp,
2145 "bad number of regions (%d) in inode log format",
2146 in_f->ilf_size);
2147 ASSERT(0);
2148 kmem_free(ptr);
2149 return -EFSCORRUPTED;
2150 }
2151
2152 item->ri_total = in_f->ilf_size;
2153 item->ri_buf =
2154 kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
2155 0);
2156 }
2157
2158 if (item->ri_total <= item->ri_cnt) {
2159 xfs_warn(log->l_mp,
2160 "log item region count (%d) overflowed size (%d)",
2161 item->ri_cnt, item->ri_total);
2162 ASSERT(0);
2163 kmem_free(ptr);
2164 return -EFSCORRUPTED;
2165 }
2166
2167 /* Description region is ri_buf[0] */
2168 item->ri_buf[item->ri_cnt].i_addr = ptr;
2169 item->ri_buf[item->ri_cnt].i_len = len;
2170 item->ri_cnt++;
2171 trace_xfs_log_recover_item_add(log, trans, item, 0);
2172 return 0;
2173 }
2174
2175 /*
2176 * Free up any resources allocated by the transaction
2177 *
2178 * Remember that EFIs, EFDs, and IUNLINKs are handled later.
2179 */
2180 STATIC void
xlog_recover_free_trans(struct xlog_recover * trans)2181 xlog_recover_free_trans(
2182 struct xlog_recover *trans)
2183 {
2184 struct xlog_recover_item *item, *n;
2185 int i;
2186
2187 hlist_del_init(&trans->r_list);
2188
2189 list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
2190 /* Free the regions in the item. */
2191 list_del(&item->ri_list);
2192 for (i = 0; i < item->ri_cnt; i++)
2193 kmem_free(item->ri_buf[i].i_addr);
2194 /* Free the item itself */
2195 kmem_free(item->ri_buf);
2196 kmem_free(item);
2197 }
2198 /* Free the transaction recover structure */
2199 kmem_free(trans);
2200 }
2201
2202 /*
2203 * On error or completion, trans is freed.
2204 */
2205 STATIC int
xlog_recovery_process_trans(struct xlog * log,struct xlog_recover * trans,char * dp,unsigned int len,unsigned int flags,int pass,struct list_head * buffer_list)2206 xlog_recovery_process_trans(
2207 struct xlog *log,
2208 struct xlog_recover *trans,
2209 char *dp,
2210 unsigned int len,
2211 unsigned int flags,
2212 int pass,
2213 struct list_head *buffer_list)
2214 {
2215 int error = 0;
2216 bool freeit = false;
2217
2218 /* mask off ophdr transaction container flags */
2219 flags &= ~XLOG_END_TRANS;
2220 if (flags & XLOG_WAS_CONT_TRANS)
2221 flags &= ~XLOG_CONTINUE_TRANS;
2222
2223 /*
2224 * Callees must not free the trans structure. We'll decide if we need to
2225 * free it or not based on the operation being done and it's result.
2226 */
2227 switch (flags) {
2228 /* expected flag values */
2229 case 0:
2230 case XLOG_CONTINUE_TRANS:
2231 error = xlog_recover_add_to_trans(log, trans, dp, len);
2232 break;
2233 case XLOG_WAS_CONT_TRANS:
2234 error = xlog_recover_add_to_cont_trans(log, trans, dp, len);
2235 break;
2236 case XLOG_COMMIT_TRANS:
2237 error = xlog_recover_commit_trans(log, trans, pass,
2238 buffer_list);
2239 /* success or fail, we are now done with this transaction. */
2240 freeit = true;
2241 break;
2242
2243 /* unexpected flag values */
2244 case XLOG_UNMOUNT_TRANS:
2245 /* just skip trans */
2246 xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
2247 freeit = true;
2248 break;
2249 case XLOG_START_TRANS:
2250 default:
2251 xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags);
2252 ASSERT(0);
2253 error = -EFSCORRUPTED;
2254 break;
2255 }
2256 if (error || freeit)
2257 xlog_recover_free_trans(trans);
2258 return error;
2259 }
2260
2261 /*
2262 * Lookup the transaction recovery structure associated with the ID in the
2263 * current ophdr. If the transaction doesn't exist and the start flag is set in
2264 * the ophdr, then allocate a new transaction for future ID matches to find.
2265 * Either way, return what we found during the lookup - an existing transaction
2266 * or nothing.
2267 */
2268 STATIC struct xlog_recover *
xlog_recover_ophdr_to_trans(struct hlist_head rhash[],struct xlog_rec_header * rhead,struct xlog_op_header * ohead)2269 xlog_recover_ophdr_to_trans(
2270 struct hlist_head rhash[],
2271 struct xlog_rec_header *rhead,
2272 struct xlog_op_header *ohead)
2273 {
2274 struct xlog_recover *trans;
2275 xlog_tid_t tid;
2276 struct hlist_head *rhp;
2277
2278 tid = be32_to_cpu(ohead->oh_tid);
2279 rhp = &rhash[XLOG_RHASH(tid)];
2280 hlist_for_each_entry(trans, rhp, r_list) {
2281 if (trans->r_log_tid == tid)
2282 return trans;
2283 }
2284
2285 /*
2286 * skip over non-start transaction headers - we could be
2287 * processing slack space before the next transaction starts
2288 */
2289 if (!(ohead->oh_flags & XLOG_START_TRANS))
2290 return NULL;
2291
2292 ASSERT(be32_to_cpu(ohead->oh_len) == 0);
2293
2294 /*
2295 * This is a new transaction so allocate a new recovery container to
2296 * hold the recovery ops that will follow.
2297 */
2298 trans = kmem_zalloc(sizeof(struct xlog_recover), 0);
2299 trans->r_log_tid = tid;
2300 trans->r_lsn = be64_to_cpu(rhead->h_lsn);
2301 INIT_LIST_HEAD(&trans->r_itemq);
2302 INIT_HLIST_NODE(&trans->r_list);
2303 hlist_add_head(&trans->r_list, rhp);
2304
2305 /*
2306 * Nothing more to do for this ophdr. Items to be added to this new
2307 * transaction will be in subsequent ophdr containers.
2308 */
2309 return NULL;
2310 }
2311
2312 STATIC int
xlog_recover_process_ophdr(struct xlog * log,struct hlist_head rhash[],struct xlog_rec_header * rhead,struct xlog_op_header * ohead,char * dp,char * end,int pass,struct list_head * buffer_list)2313 xlog_recover_process_ophdr(
2314 struct xlog *log,
2315 struct hlist_head rhash[],
2316 struct xlog_rec_header *rhead,
2317 struct xlog_op_header *ohead,
2318 char *dp,
2319 char *end,
2320 int pass,
2321 struct list_head *buffer_list)
2322 {
2323 struct xlog_recover *trans;
2324 unsigned int len;
2325 int error;
2326
2327 /* Do we understand who wrote this op? */
2328 if (ohead->oh_clientid != XFS_TRANSACTION &&
2329 ohead->oh_clientid != XFS_LOG) {
2330 xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
2331 __func__, ohead->oh_clientid);
2332 ASSERT(0);
2333 return -EFSCORRUPTED;
2334 }
2335
2336 /*
2337 * Check the ophdr contains all the data it is supposed to contain.
2338 */
2339 len = be32_to_cpu(ohead->oh_len);
2340 if (dp + len > end) {
2341 xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len);
2342 WARN_ON(1);
2343 return -EFSCORRUPTED;
2344 }
2345
2346 trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead);
2347 if (!trans) {
2348 /* nothing to do, so skip over this ophdr */
2349 return 0;
2350 }
2351
2352 /*
2353 * The recovered buffer queue is drained only once we know that all
2354 * recovery items for the current LSN have been processed. This is
2355 * required because:
2356 *
2357 * - Buffer write submission updates the metadata LSN of the buffer.
2358 * - Log recovery skips items with a metadata LSN >= the current LSN of
2359 * the recovery item.
2360 * - Separate recovery items against the same metadata buffer can share
2361 * a current LSN. I.e., consider that the LSN of a recovery item is
2362 * defined as the starting LSN of the first record in which its
2363 * transaction appears, that a record can hold multiple transactions,
2364 * and/or that a transaction can span multiple records.
2365 *
2366 * In other words, we are allowed to submit a buffer from log recovery
2367 * once per current LSN. Otherwise, we may incorrectly skip recovery
2368 * items and cause corruption.
2369 *
2370 * We don't know up front whether buffers are updated multiple times per
2371 * LSN. Therefore, track the current LSN of each commit log record as it
2372 * is processed and drain the queue when it changes. Use commit records
2373 * because they are ordered correctly by the logging code.
2374 */
2375 if (log->l_recovery_lsn != trans->r_lsn &&
2376 ohead->oh_flags & XLOG_COMMIT_TRANS) {
2377 error = xfs_buf_delwri_submit(buffer_list);
2378 if (error)
2379 return error;
2380 log->l_recovery_lsn = trans->r_lsn;
2381 }
2382
2383 return xlog_recovery_process_trans(log, trans, dp, len,
2384 ohead->oh_flags, pass, buffer_list);
2385 }
2386
2387 /*
2388 * There are two valid states of the r_state field. 0 indicates that the
2389 * transaction structure is in a normal state. We have either seen the
2390 * start of the transaction or the last operation we added was not a partial
2391 * operation. If the last operation we added to the transaction was a
2392 * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
2393 *
2394 * NOTE: skip LRs with 0 data length.
2395 */
2396 STATIC int
xlog_recover_process_data(struct xlog * log,struct hlist_head rhash[],struct xlog_rec_header * rhead,char * dp,int pass,struct list_head * buffer_list)2397 xlog_recover_process_data(
2398 struct xlog *log,
2399 struct hlist_head rhash[],
2400 struct xlog_rec_header *rhead,
2401 char *dp,
2402 int pass,
2403 struct list_head *buffer_list)
2404 {
2405 struct xlog_op_header *ohead;
2406 char *end;
2407 int num_logops;
2408 int error;
2409
2410 end = dp + be32_to_cpu(rhead->h_len);
2411 num_logops = be32_to_cpu(rhead->h_num_logops);
2412
2413 /* check the log format matches our own - else we can't recover */
2414 if (xlog_header_check_recover(log->l_mp, rhead))
2415 return -EIO;
2416
2417 trace_xfs_log_recover_record(log, rhead, pass);
2418 while ((dp < end) && num_logops) {
2419
2420 ohead = (struct xlog_op_header *)dp;
2421 dp += sizeof(*ohead);
2422 ASSERT(dp <= end);
2423
2424 /* errors will abort recovery */
2425 error = xlog_recover_process_ophdr(log, rhash, rhead, ohead,
2426 dp, end, pass, buffer_list);
2427 if (error)
2428 return error;
2429
2430 dp += be32_to_cpu(ohead->oh_len);
2431 num_logops--;
2432 }
2433 return 0;
2434 }
2435
2436 /* Take all the collected deferred ops and finish them in order. */
2437 static int
xlog_finish_defer_ops(struct xfs_mount * mp,struct list_head * capture_list)2438 xlog_finish_defer_ops(
2439 struct xfs_mount *mp,
2440 struct list_head *capture_list)
2441 {
2442 struct xfs_defer_capture *dfc, *next;
2443 struct xfs_trans *tp;
2444 struct xfs_inode *ip;
2445 int error = 0;
2446
2447 list_for_each_entry_safe(dfc, next, capture_list, dfc_list) {
2448 struct xfs_trans_res resv;
2449
2450 /*
2451 * Create a new transaction reservation from the captured
2452 * information. Set logcount to 1 to force the new transaction
2453 * to regrant every roll so that we can make forward progress
2454 * in recovery no matter how full the log might be.
2455 */
2456 resv.tr_logres = dfc->dfc_logres;
2457 resv.tr_logcount = 1;
2458 resv.tr_logflags = XFS_TRANS_PERM_LOG_RES;
2459
2460 error = xfs_trans_alloc(mp, &resv, dfc->dfc_blkres,
2461 dfc->dfc_rtxres, XFS_TRANS_RESERVE, &tp);
2462 if (error) {
2463 xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
2464 return error;
2465 }
2466
2467 /*
2468 * Transfer to this new transaction all the dfops we captured
2469 * from recovering a single intent item.
2470 */
2471 list_del_init(&dfc->dfc_list);
2472 xfs_defer_ops_continue(dfc, tp, &ip);
2473
2474 error = xfs_trans_commit(tp);
2475 if (ip) {
2476 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2477 xfs_irele(ip);
2478 }
2479 if (error)
2480 return error;
2481 }
2482
2483 ASSERT(list_empty(capture_list));
2484 return 0;
2485 }
2486
2487 /* Release all the captured defer ops and capture structures in this list. */
2488 static void
xlog_abort_defer_ops(struct xfs_mount * mp,struct list_head * capture_list)2489 xlog_abort_defer_ops(
2490 struct xfs_mount *mp,
2491 struct list_head *capture_list)
2492 {
2493 struct xfs_defer_capture *dfc;
2494 struct xfs_defer_capture *next;
2495
2496 list_for_each_entry_safe(dfc, next, capture_list, dfc_list) {
2497 list_del_init(&dfc->dfc_list);
2498 xfs_defer_ops_release(mp, dfc);
2499 }
2500 }
2501 /*
2502 * When this is called, all of the log intent items which did not have
2503 * corresponding log done items should be in the AIL. What we do now
2504 * is update the data structures associated with each one.
2505 *
2506 * Since we process the log intent items in normal transactions, they
2507 * will be removed at some point after the commit. This prevents us
2508 * from just walking down the list processing each one. We'll use a
2509 * flag in the intent item to skip those that we've already processed
2510 * and use the AIL iteration mechanism's generation count to try to
2511 * speed this up at least a bit.
2512 *
2513 * When we start, we know that the intents are the only things in the
2514 * AIL. As we process them, however, other items are added to the
2515 * AIL.
2516 */
2517 STATIC int
xlog_recover_process_intents(struct xlog * log)2518 xlog_recover_process_intents(
2519 struct xlog *log)
2520 {
2521 LIST_HEAD(capture_list);
2522 struct xfs_ail_cursor cur;
2523 struct xfs_log_item *lip;
2524 struct xfs_ail *ailp;
2525 int error = 0;
2526 #if defined(DEBUG) || defined(XFS_WARN)
2527 xfs_lsn_t last_lsn;
2528 #endif
2529
2530 ailp = log->l_ailp;
2531 spin_lock(&ailp->ail_lock);
2532 #if defined(DEBUG) || defined(XFS_WARN)
2533 last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
2534 #endif
2535 for (lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
2536 lip != NULL;
2537 lip = xfs_trans_ail_cursor_next(ailp, &cur)) {
2538 /*
2539 * We're done when we see something other than an intent.
2540 * There should be no intents left in the AIL now.
2541 */
2542 if (!xlog_item_is_intent(lip)) {
2543 #ifdef DEBUG
2544 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
2545 ASSERT(!xlog_item_is_intent(lip));
2546 #endif
2547 break;
2548 }
2549
2550 /*
2551 * We should never see a redo item with a LSN higher than
2552 * the last transaction we found in the log at the start
2553 * of recovery.
2554 */
2555 ASSERT(XFS_LSN_CMP(last_lsn, lip->li_lsn) >= 0);
2556
2557 /*
2558 * NOTE: If your intent processing routine can create more
2559 * deferred ops, you /must/ attach them to the capture list in
2560 * the recover routine or else those subsequent intents will be
2561 * replayed in the wrong order!
2562 */
2563 spin_unlock(&ailp->ail_lock);
2564 error = lip->li_ops->iop_recover(lip, &capture_list);
2565 spin_lock(&ailp->ail_lock);
2566 if (error)
2567 break;
2568 }
2569
2570 xfs_trans_ail_cursor_done(&cur);
2571 spin_unlock(&ailp->ail_lock);
2572 if (error)
2573 goto err;
2574
2575 error = xlog_finish_defer_ops(log->l_mp, &capture_list);
2576 if (error)
2577 goto err;
2578
2579 return 0;
2580 err:
2581 xlog_abort_defer_ops(log->l_mp, &capture_list);
2582 return error;
2583 }
2584
2585 /*
2586 * A cancel occurs when the mount has failed and we're bailing out.
2587 * Release all pending log intent items so they don't pin the AIL.
2588 */
2589 STATIC void
xlog_recover_cancel_intents(struct xlog * log)2590 xlog_recover_cancel_intents(
2591 struct xlog *log)
2592 {
2593 struct xfs_log_item *lip;
2594 struct xfs_ail_cursor cur;
2595 struct xfs_ail *ailp;
2596
2597 ailp = log->l_ailp;
2598 spin_lock(&ailp->ail_lock);
2599 lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
2600 while (lip != NULL) {
2601 /*
2602 * We're done when we see something other than an intent.
2603 * There should be no intents left in the AIL now.
2604 */
2605 if (!xlog_item_is_intent(lip)) {
2606 #ifdef DEBUG
2607 for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
2608 ASSERT(!xlog_item_is_intent(lip));
2609 #endif
2610 break;
2611 }
2612
2613 spin_unlock(&ailp->ail_lock);
2614 lip->li_ops->iop_release(lip);
2615 spin_lock(&ailp->ail_lock);
2616 lip = xfs_trans_ail_cursor_next(ailp, &cur);
2617 }
2618
2619 xfs_trans_ail_cursor_done(&cur);
2620 spin_unlock(&ailp->ail_lock);
2621 }
2622
2623 /*
2624 * This routine performs a transaction to null out a bad inode pointer
2625 * in an agi unlinked inode hash bucket.
2626 */
2627 STATIC void
xlog_recover_clear_agi_bucket(xfs_mount_t * mp,xfs_agnumber_t agno,int bucket)2628 xlog_recover_clear_agi_bucket(
2629 xfs_mount_t *mp,
2630 xfs_agnumber_t agno,
2631 int bucket)
2632 {
2633 xfs_trans_t *tp;
2634 xfs_agi_t *agi;
2635 xfs_buf_t *agibp;
2636 int offset;
2637 int error;
2638
2639 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_clearagi, 0, 0, 0, &tp);
2640 if (error)
2641 goto out_error;
2642
2643 error = xfs_read_agi(mp, tp, agno, &agibp);
2644 if (error)
2645 goto out_abort;
2646
2647 agi = agibp->b_addr;
2648 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
2649 offset = offsetof(xfs_agi_t, agi_unlinked) +
2650 (sizeof(xfs_agino_t) * bucket);
2651 xfs_trans_log_buf(tp, agibp, offset,
2652 (offset + sizeof(xfs_agino_t) - 1));
2653
2654 error = xfs_trans_commit(tp);
2655 if (error)
2656 goto out_error;
2657 return;
2658
2659 out_abort:
2660 xfs_trans_cancel(tp);
2661 out_error:
2662 xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
2663 return;
2664 }
2665
2666 STATIC xfs_agino_t
xlog_recover_process_one_iunlink(struct xfs_mount * mp,xfs_agnumber_t agno,xfs_agino_t agino,int bucket)2667 xlog_recover_process_one_iunlink(
2668 struct xfs_mount *mp,
2669 xfs_agnumber_t agno,
2670 xfs_agino_t agino,
2671 int bucket)
2672 {
2673 struct xfs_buf *ibp;
2674 struct xfs_dinode *dip;
2675 struct xfs_inode *ip;
2676 xfs_ino_t ino;
2677 int error;
2678
2679 ino = XFS_AGINO_TO_INO(mp, agno, agino);
2680 error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
2681 if (error)
2682 goto fail;
2683
2684 /*
2685 * Get the on disk inode to find the next inode in the bucket.
2686 */
2687 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0);
2688 if (error)
2689 goto fail_iput;
2690
2691 xfs_iflags_clear(ip, XFS_IRECOVERY);
2692 ASSERT(VFS_I(ip)->i_nlink == 0);
2693 ASSERT(VFS_I(ip)->i_mode != 0);
2694
2695 /* setup for the next pass */
2696 agino = be32_to_cpu(dip->di_next_unlinked);
2697 xfs_buf_relse(ibp);
2698
2699 /*
2700 * Prevent any DMAPI event from being sent when the reference on
2701 * the inode is dropped.
2702 */
2703 ip->i_d.di_dmevmask = 0;
2704
2705 xfs_irele(ip);
2706 return agino;
2707
2708 fail_iput:
2709 xfs_irele(ip);
2710 fail:
2711 /*
2712 * We can't read in the inode this bucket points to, or this inode
2713 * is messed up. Just ditch this bucket of inodes. We will lose
2714 * some inodes and space, but at least we won't hang.
2715 *
2716 * Call xlog_recover_clear_agi_bucket() to perform a transaction to
2717 * clear the inode pointer in the bucket.
2718 */
2719 xlog_recover_clear_agi_bucket(mp, agno, bucket);
2720 return NULLAGINO;
2721 }
2722
2723 /*
2724 * Recover AGI unlinked lists
2725 *
2726 * This is called during recovery to process any inodes which we unlinked but
2727 * not freed when the system crashed. These inodes will be on the lists in the
2728 * AGI blocks. What we do here is scan all the AGIs and fully truncate and free
2729 * any inodes found on the lists. Each inode is removed from the lists when it
2730 * has been fully truncated and is freed. The freeing of the inode and its
2731 * removal from the list must be atomic.
2732 *
2733 * If everything we touch in the agi processing loop is already in memory, this
2734 * loop can hold the cpu for a long time. It runs without lock contention,
2735 * memory allocation contention, the need wait for IO, etc, and so will run
2736 * until we either run out of inodes to process, run low on memory or we run out
2737 * of log space.
2738 *
2739 * This behaviour is bad for latency on single CPU and non-preemptible kernels,
2740 * and can prevent other filesytem work (such as CIL pushes) from running. This
2741 * can lead to deadlocks if the recovery process runs out of log reservation
2742 * space. Hence we need to yield the CPU when there is other kernel work
2743 * scheduled on this CPU to ensure other scheduled work can run without undue
2744 * latency.
2745 */
2746 STATIC void
xlog_recover_process_iunlinks(struct xlog * log)2747 xlog_recover_process_iunlinks(
2748 struct xlog *log)
2749 {
2750 xfs_mount_t *mp;
2751 xfs_agnumber_t agno;
2752 xfs_agi_t *agi;
2753 xfs_buf_t *agibp;
2754 xfs_agino_t agino;
2755 int bucket;
2756 int error;
2757
2758 mp = log->l_mp;
2759
2760 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
2761 /*
2762 * Find the agi for this ag.
2763 */
2764 error = xfs_read_agi(mp, NULL, agno, &agibp);
2765 if (error) {
2766 /*
2767 * AGI is b0rked. Don't process it.
2768 *
2769 * We should probably mark the filesystem as corrupt
2770 * after we've recovered all the ag's we can....
2771 */
2772 continue;
2773 }
2774 /*
2775 * Unlock the buffer so that it can be acquired in the normal
2776 * course of the transaction to truncate and free each inode.
2777 * Because we are not racing with anyone else here for the AGI
2778 * buffer, we don't even need to hold it locked to read the
2779 * initial unlinked bucket entries out of the buffer. We keep
2780 * buffer reference though, so that it stays pinned in memory
2781 * while we need the buffer.
2782 */
2783 agi = agibp->b_addr;
2784 xfs_buf_unlock(agibp);
2785
2786 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
2787 agino = be32_to_cpu(agi->agi_unlinked[bucket]);
2788 while (agino != NULLAGINO) {
2789 agino = xlog_recover_process_one_iunlink(mp,
2790 agno, agino, bucket);
2791 cond_resched();
2792 }
2793 }
2794 xfs_buf_rele(agibp);
2795 }
2796 }
2797
2798 STATIC void
xlog_unpack_data(struct xlog_rec_header * rhead,char * dp,struct xlog * log)2799 xlog_unpack_data(
2800 struct xlog_rec_header *rhead,
2801 char *dp,
2802 struct xlog *log)
2803 {
2804 int i, j, k;
2805
2806 for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
2807 i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
2808 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
2809 dp += BBSIZE;
2810 }
2811
2812 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
2813 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
2814 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
2815 j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
2816 k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
2817 *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
2818 dp += BBSIZE;
2819 }
2820 }
2821 }
2822
2823 /*
2824 * CRC check, unpack and process a log record.
2825 */
2826 STATIC int
xlog_recover_process(struct xlog * log,struct hlist_head rhash[],struct xlog_rec_header * rhead,char * dp,int pass,struct list_head * buffer_list)2827 xlog_recover_process(
2828 struct xlog *log,
2829 struct hlist_head rhash[],
2830 struct xlog_rec_header *rhead,
2831 char *dp,
2832 int pass,
2833 struct list_head *buffer_list)
2834 {
2835 __le32 old_crc = rhead->h_crc;
2836 __le32 crc;
2837
2838 crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
2839
2840 /*
2841 * Nothing else to do if this is a CRC verification pass. Just return
2842 * if this a record with a non-zero crc. Unfortunately, mkfs always
2843 * sets old_crc to 0 so we must consider this valid even on v5 supers.
2844 * Otherwise, return EFSBADCRC on failure so the callers up the stack
2845 * know precisely what failed.
2846 */
2847 if (pass == XLOG_RECOVER_CRCPASS) {
2848 if (old_crc && crc != old_crc)
2849 return -EFSBADCRC;
2850 return 0;
2851 }
2852
2853 /*
2854 * We're in the normal recovery path. Issue a warning if and only if the
2855 * CRC in the header is non-zero. This is an advisory warning and the
2856 * zero CRC check prevents warnings from being emitted when upgrading
2857 * the kernel from one that does not add CRCs by default.
2858 */
2859 if (crc != old_crc) {
2860 if (old_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
2861 xfs_alert(log->l_mp,
2862 "log record CRC mismatch: found 0x%x, expected 0x%x.",
2863 le32_to_cpu(old_crc),
2864 le32_to_cpu(crc));
2865 xfs_hex_dump(dp, 32);
2866 }
2867
2868 /*
2869 * If the filesystem is CRC enabled, this mismatch becomes a
2870 * fatal log corruption failure.
2871 */
2872 if (xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
2873 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
2874 return -EFSCORRUPTED;
2875 }
2876 }
2877
2878 xlog_unpack_data(rhead, dp, log);
2879
2880 return xlog_recover_process_data(log, rhash, rhead, dp, pass,
2881 buffer_list);
2882 }
2883
2884 STATIC int
xlog_valid_rec_header(struct xlog * log,struct xlog_rec_header * rhead,xfs_daddr_t blkno,int bufsize)2885 xlog_valid_rec_header(
2886 struct xlog *log,
2887 struct xlog_rec_header *rhead,
2888 xfs_daddr_t blkno,
2889 int bufsize)
2890 {
2891 int hlen;
2892
2893 if (XFS_IS_CORRUPT(log->l_mp,
2894 rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM)))
2895 return -EFSCORRUPTED;
2896 if (XFS_IS_CORRUPT(log->l_mp,
2897 (!rhead->h_version ||
2898 (be32_to_cpu(rhead->h_version) &
2899 (~XLOG_VERSION_OKBITS))))) {
2900 xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
2901 __func__, be32_to_cpu(rhead->h_version));
2902 return -EFSCORRUPTED;
2903 }
2904
2905 /*
2906 * LR body must have data (or it wouldn't have been written)
2907 * and h_len must not be greater than LR buffer size.
2908 */
2909 hlen = be32_to_cpu(rhead->h_len);
2910 if (XFS_IS_CORRUPT(log->l_mp, hlen <= 0 || hlen > bufsize))
2911 return -EFSCORRUPTED;
2912
2913 if (XFS_IS_CORRUPT(log->l_mp,
2914 blkno > log->l_logBBsize || blkno > INT_MAX))
2915 return -EFSCORRUPTED;
2916 return 0;
2917 }
2918
2919 /*
2920 * Read the log from tail to head and process the log records found.
2921 * Handle the two cases where the tail and head are in the same cycle
2922 * and where the active portion of the log wraps around the end of
2923 * the physical log separately. The pass parameter is passed through
2924 * to the routines called to process the data and is not looked at
2925 * here.
2926 */
2927 STATIC int
xlog_do_recovery_pass(struct xlog * log,xfs_daddr_t head_blk,xfs_daddr_t tail_blk,int pass,xfs_daddr_t * first_bad)2928 xlog_do_recovery_pass(
2929 struct xlog *log,
2930 xfs_daddr_t head_blk,
2931 xfs_daddr_t tail_blk,
2932 int pass,
2933 xfs_daddr_t *first_bad) /* out: first bad log rec */
2934 {
2935 xlog_rec_header_t *rhead;
2936 xfs_daddr_t blk_no, rblk_no;
2937 xfs_daddr_t rhead_blk;
2938 char *offset;
2939 char *hbp, *dbp;
2940 int error = 0, h_size, h_len;
2941 int error2 = 0;
2942 int bblks, split_bblks;
2943 int hblks, split_hblks, wrapped_hblks;
2944 int i;
2945 struct hlist_head rhash[XLOG_RHASH_SIZE];
2946 LIST_HEAD (buffer_list);
2947
2948 ASSERT(head_blk != tail_blk);
2949 blk_no = rhead_blk = tail_blk;
2950
2951 for (i = 0; i < XLOG_RHASH_SIZE; i++)
2952 INIT_HLIST_HEAD(&rhash[i]);
2953
2954 /*
2955 * Read the header of the tail block and get the iclog buffer size from
2956 * h_size. Use this to tell how many sectors make up the log header.
2957 */
2958 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
2959 /*
2960 * When using variable length iclogs, read first sector of
2961 * iclog header and extract the header size from it. Get a
2962 * new hbp that is the correct size.
2963 */
2964 hbp = xlog_alloc_buffer(log, 1);
2965 if (!hbp)
2966 return -ENOMEM;
2967
2968 error = xlog_bread(log, tail_blk, 1, hbp, &offset);
2969 if (error)
2970 goto bread_err1;
2971
2972 rhead = (xlog_rec_header_t *)offset;
2973
2974 /*
2975 * xfsprogs has a bug where record length is based on lsunit but
2976 * h_size (iclog size) is hardcoded to 32k. Now that we
2977 * unconditionally CRC verify the unmount record, this means the
2978 * log buffer can be too small for the record and cause an
2979 * overrun.
2980 *
2981 * Detect this condition here. Use lsunit for the buffer size as
2982 * long as this looks like the mkfs case. Otherwise, return an
2983 * error to avoid a buffer overrun.
2984 */
2985 h_size = be32_to_cpu(rhead->h_size);
2986 h_len = be32_to_cpu(rhead->h_len);
2987 if (h_len > h_size && h_len <= log->l_mp->m_logbsize &&
2988 rhead->h_num_logops == cpu_to_be32(1)) {
2989 xfs_warn(log->l_mp,
2990 "invalid iclog size (%d bytes), using lsunit (%d bytes)",
2991 h_size, log->l_mp->m_logbsize);
2992 h_size = log->l_mp->m_logbsize;
2993 }
2994
2995 error = xlog_valid_rec_header(log, rhead, tail_blk, h_size);
2996 if (error)
2997 goto bread_err1;
2998
2999 hblks = xlog_logrec_hblks(log, rhead);
3000 if (hblks != 1) {
3001 kmem_free(hbp);
3002 hbp = xlog_alloc_buffer(log, hblks);
3003 }
3004 } else {
3005 ASSERT(log->l_sectBBsize == 1);
3006 hblks = 1;
3007 hbp = xlog_alloc_buffer(log, 1);
3008 h_size = XLOG_BIG_RECORD_BSIZE;
3009 }
3010
3011 if (!hbp)
3012 return -ENOMEM;
3013 dbp = xlog_alloc_buffer(log, BTOBB(h_size));
3014 if (!dbp) {
3015 kmem_free(hbp);
3016 return -ENOMEM;
3017 }
3018
3019 memset(rhash, 0, sizeof(rhash));
3020 if (tail_blk > head_blk) {
3021 /*
3022 * Perform recovery around the end of the physical log.
3023 * When the head is not on the same cycle number as the tail,
3024 * we can't do a sequential recovery.
3025 */
3026 while (blk_no < log->l_logBBsize) {
3027 /*
3028 * Check for header wrapping around physical end-of-log
3029 */
3030 offset = hbp;
3031 split_hblks = 0;
3032 wrapped_hblks = 0;
3033 if (blk_no + hblks <= log->l_logBBsize) {
3034 /* Read header in one read */
3035 error = xlog_bread(log, blk_no, hblks, hbp,
3036 &offset);
3037 if (error)
3038 goto bread_err2;
3039 } else {
3040 /* This LR is split across physical log end */
3041 if (blk_no != log->l_logBBsize) {
3042 /* some data before physical log end */
3043 ASSERT(blk_no <= INT_MAX);
3044 split_hblks = log->l_logBBsize - (int)blk_no;
3045 ASSERT(split_hblks > 0);
3046 error = xlog_bread(log, blk_no,
3047 split_hblks, hbp,
3048 &offset);
3049 if (error)
3050 goto bread_err2;
3051 }
3052
3053 /*
3054 * Note: this black magic still works with
3055 * large sector sizes (non-512) only because:
3056 * - we increased the buffer size originally
3057 * by 1 sector giving us enough extra space
3058 * for the second read;
3059 * - the log start is guaranteed to be sector
3060 * aligned;
3061 * - we read the log end (LR header start)
3062 * _first_, then the log start (LR header end)
3063 * - order is important.
3064 */
3065 wrapped_hblks = hblks - split_hblks;
3066 error = xlog_bread_noalign(log, 0,
3067 wrapped_hblks,
3068 offset + BBTOB(split_hblks));
3069 if (error)
3070 goto bread_err2;
3071 }
3072 rhead = (xlog_rec_header_t *)offset;
3073 error = xlog_valid_rec_header(log, rhead,
3074 split_hblks ? blk_no : 0, h_size);
3075 if (error)
3076 goto bread_err2;
3077
3078 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3079 blk_no += hblks;
3080
3081 /*
3082 * Read the log record data in multiple reads if it
3083 * wraps around the end of the log. Note that if the
3084 * header already wrapped, blk_no could point past the
3085 * end of the log. The record data is contiguous in
3086 * that case.
3087 */
3088 if (blk_no + bblks <= log->l_logBBsize ||
3089 blk_no >= log->l_logBBsize) {
3090 rblk_no = xlog_wrap_logbno(log, blk_no);
3091 error = xlog_bread(log, rblk_no, bblks, dbp,
3092 &offset);
3093 if (error)
3094 goto bread_err2;
3095 } else {
3096 /* This log record is split across the
3097 * physical end of log */
3098 offset = dbp;
3099 split_bblks = 0;
3100 if (blk_no != log->l_logBBsize) {
3101 /* some data is before the physical
3102 * end of log */
3103 ASSERT(!wrapped_hblks);
3104 ASSERT(blk_no <= INT_MAX);
3105 split_bblks =
3106 log->l_logBBsize - (int)blk_no;
3107 ASSERT(split_bblks > 0);
3108 error = xlog_bread(log, blk_no,
3109 split_bblks, dbp,
3110 &offset);
3111 if (error)
3112 goto bread_err2;
3113 }
3114
3115 /*
3116 * Note: this black magic still works with
3117 * large sector sizes (non-512) only because:
3118 * - we increased the buffer size originally
3119 * by 1 sector giving us enough extra space
3120 * for the second read;
3121 * - the log start is guaranteed to be sector
3122 * aligned;
3123 * - we read the log end (LR header start)
3124 * _first_, then the log start (LR header end)
3125 * - order is important.
3126 */
3127 error = xlog_bread_noalign(log, 0,
3128 bblks - split_bblks,
3129 offset + BBTOB(split_bblks));
3130 if (error)
3131 goto bread_err2;
3132 }
3133
3134 error = xlog_recover_process(log, rhash, rhead, offset,
3135 pass, &buffer_list);
3136 if (error)
3137 goto bread_err2;
3138
3139 blk_no += bblks;
3140 rhead_blk = blk_no;
3141 }
3142
3143 ASSERT(blk_no >= log->l_logBBsize);
3144 blk_no -= log->l_logBBsize;
3145 rhead_blk = blk_no;
3146 }
3147
3148 /* read first part of physical log */
3149 while (blk_no < head_blk) {
3150 error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3151 if (error)
3152 goto bread_err2;
3153
3154 rhead = (xlog_rec_header_t *)offset;
3155 error = xlog_valid_rec_header(log, rhead, blk_no, h_size);
3156 if (error)
3157 goto bread_err2;
3158
3159 /* blocks in data section */
3160 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3161 error = xlog_bread(log, blk_no+hblks, bblks, dbp,
3162 &offset);
3163 if (error)
3164 goto bread_err2;
3165
3166 error = xlog_recover_process(log, rhash, rhead, offset, pass,
3167 &buffer_list);
3168 if (error)
3169 goto bread_err2;
3170
3171 blk_no += bblks + hblks;
3172 rhead_blk = blk_no;
3173 }
3174
3175 bread_err2:
3176 kmem_free(dbp);
3177 bread_err1:
3178 kmem_free(hbp);
3179
3180 /*
3181 * Submit buffers that have been added from the last record processed,
3182 * regardless of error status.
3183 */
3184 if (!list_empty(&buffer_list))
3185 error2 = xfs_buf_delwri_submit(&buffer_list);
3186
3187 if (error && first_bad)
3188 *first_bad = rhead_blk;
3189
3190 /*
3191 * Transactions are freed at commit time but transactions without commit
3192 * records on disk are never committed. Free any that may be left in the
3193 * hash table.
3194 */
3195 for (i = 0; i < XLOG_RHASH_SIZE; i++) {
3196 struct hlist_node *tmp;
3197 struct xlog_recover *trans;
3198
3199 hlist_for_each_entry_safe(trans, tmp, &rhash[i], r_list)
3200 xlog_recover_free_trans(trans);
3201 }
3202
3203 return error ? error : error2;
3204 }
3205
3206 /*
3207 * Do the recovery of the log. We actually do this in two phases.
3208 * The two passes are necessary in order to implement the function
3209 * of cancelling a record written into the log. The first pass
3210 * determines those things which have been cancelled, and the
3211 * second pass replays log items normally except for those which
3212 * have been cancelled. The handling of the replay and cancellations
3213 * takes place in the log item type specific routines.
3214 *
3215 * The table of items which have cancel records in the log is allocated
3216 * and freed at this level, since only here do we know when all of
3217 * the log recovery has been completed.
3218 */
3219 STATIC int
xlog_do_log_recovery(struct xlog * log,xfs_daddr_t head_blk,xfs_daddr_t tail_blk)3220 xlog_do_log_recovery(
3221 struct xlog *log,
3222 xfs_daddr_t head_blk,
3223 xfs_daddr_t tail_blk)
3224 {
3225 int error, i;
3226
3227 ASSERT(head_blk != tail_blk);
3228
3229 /*
3230 * First do a pass to find all of the cancelled buf log items.
3231 * Store them in the buf_cancel_table for use in the second pass.
3232 */
3233 log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
3234 sizeof(struct list_head),
3235 0);
3236 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3237 INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
3238
3239 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3240 XLOG_RECOVER_PASS1, NULL);
3241 if (error != 0) {
3242 kmem_free(log->l_buf_cancel_table);
3243 log->l_buf_cancel_table = NULL;
3244 return error;
3245 }
3246 /*
3247 * Then do a second pass to actually recover the items in the log.
3248 * When it is complete free the table of buf cancel items.
3249 */
3250 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3251 XLOG_RECOVER_PASS2, NULL);
3252 #ifdef DEBUG
3253 if (!error) {
3254 int i;
3255
3256 for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3257 ASSERT(list_empty(&log->l_buf_cancel_table[i]));
3258 }
3259 #endif /* DEBUG */
3260
3261 kmem_free(log->l_buf_cancel_table);
3262 log->l_buf_cancel_table = NULL;
3263
3264 return error;
3265 }
3266
3267 /*
3268 * Do the actual recovery
3269 */
3270 STATIC int
xlog_do_recover(struct xlog * log,xfs_daddr_t head_blk,xfs_daddr_t tail_blk)3271 xlog_do_recover(
3272 struct xlog *log,
3273 xfs_daddr_t head_blk,
3274 xfs_daddr_t tail_blk)
3275 {
3276 struct xfs_mount *mp = log->l_mp;
3277 struct xfs_buf *bp = mp->m_sb_bp;
3278 struct xfs_sb *sbp = &mp->m_sb;
3279 int error;
3280
3281 trace_xfs_log_recover(log, head_blk, tail_blk);
3282
3283 /*
3284 * First replay the images in the log.
3285 */
3286 error = xlog_do_log_recovery(log, head_blk, tail_blk);
3287 if (error)
3288 return error;
3289
3290 /*
3291 * If IO errors happened during recovery, bail out.
3292 */
3293 if (XFS_FORCED_SHUTDOWN(mp))
3294 return -EIO;
3295
3296 /*
3297 * We now update the tail_lsn since much of the recovery has completed
3298 * and there may be space available to use. If there were no extent
3299 * or iunlinks, we can free up the entire log and set the tail_lsn to
3300 * be the last_sync_lsn. This was set in xlog_find_tail to be the
3301 * lsn of the last known good LR on disk. If there are extent frees
3302 * or iunlinks they will have some entries in the AIL; so we look at
3303 * the AIL to determine how to set the tail_lsn.
3304 */
3305 xlog_assign_tail_lsn(mp);
3306
3307 /*
3308 * Now that we've finished replaying all buffer and inode updates,
3309 * re-read the superblock and reverify it.
3310 */
3311 xfs_buf_lock(bp);
3312 xfs_buf_hold(bp);
3313 error = _xfs_buf_read(bp, XBF_READ);
3314 if (error) {
3315 if (!XFS_FORCED_SHUTDOWN(mp)) {
3316 xfs_buf_ioerror_alert(bp, __this_address);
3317 ASSERT(0);
3318 }
3319 xfs_buf_relse(bp);
3320 return error;
3321 }
3322
3323 /* Convert superblock from on-disk format */
3324 xfs_sb_from_disk(sbp, bp->b_addr);
3325 xfs_buf_relse(bp);
3326
3327 /* re-initialise in-core superblock and geometry structures */
3328 xfs_reinit_percpu_counters(mp);
3329 error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
3330 if (error) {
3331 xfs_warn(mp, "Failed post-recovery per-ag init: %d", error);
3332 return error;
3333 }
3334 mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
3335
3336 xlog_recover_check_summary(log);
3337
3338 /* Normal transactions can now occur */
3339 log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
3340 return 0;
3341 }
3342
3343 /*
3344 * Perform recovery and re-initialize some log variables in xlog_find_tail.
3345 *
3346 * Return error or zero.
3347 */
3348 int
xlog_recover(struct xlog * log)3349 xlog_recover(
3350 struct xlog *log)
3351 {
3352 xfs_daddr_t head_blk, tail_blk;
3353 int error;
3354
3355 /* find the tail of the log */
3356 error = xlog_find_tail(log, &head_blk, &tail_blk);
3357 if (error)
3358 return error;
3359
3360 /*
3361 * The superblock was read before the log was available and thus the LSN
3362 * could not be verified. Check the superblock LSN against the current
3363 * LSN now that it's known.
3364 */
3365 if (xfs_sb_version_hascrc(&log->l_mp->m_sb) &&
3366 !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn))
3367 return -EINVAL;
3368
3369 if (tail_blk != head_blk) {
3370 /* There used to be a comment here:
3371 *
3372 * disallow recovery on read-only mounts. note -- mount
3373 * checks for ENOSPC and turns it into an intelligent
3374 * error message.
3375 * ...but this is no longer true. Now, unless you specify
3376 * NORECOVERY (in which case this function would never be
3377 * called), we just go ahead and recover. We do this all
3378 * under the vfs layer, so we can get away with it unless
3379 * the device itself is read-only, in which case we fail.
3380 */
3381 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
3382 return error;
3383 }
3384
3385 /*
3386 * Version 5 superblock log feature mask validation. We know the
3387 * log is dirty so check if there are any unknown log features
3388 * in what we need to recover. If there are unknown features
3389 * (e.g. unsupported transactions, then simply reject the
3390 * attempt at recovery before touching anything.
3391 */
3392 if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 &&
3393 xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
3394 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
3395 xfs_warn(log->l_mp,
3396 "Superblock has unknown incompatible log features (0x%x) enabled.",
3397 (log->l_mp->m_sb.sb_features_log_incompat &
3398 XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
3399 xfs_warn(log->l_mp,
3400 "The log can not be fully and/or safely recovered by this kernel.");
3401 xfs_warn(log->l_mp,
3402 "Please recover the log on a kernel that supports the unknown features.");
3403 return -EINVAL;
3404 }
3405
3406 /*
3407 * Delay log recovery if the debug hook is set. This is debug
3408 * instrumention to coordinate simulation of I/O failures with
3409 * log recovery.
3410 */
3411 if (xfs_globals.log_recovery_delay) {
3412 xfs_notice(log->l_mp,
3413 "Delaying log recovery for %d seconds.",
3414 xfs_globals.log_recovery_delay);
3415 msleep(xfs_globals.log_recovery_delay * 1000);
3416 }
3417
3418 xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
3419 log->l_mp->m_logname ? log->l_mp->m_logname
3420 : "internal");
3421
3422 error = xlog_do_recover(log, head_blk, tail_blk);
3423 log->l_flags |= XLOG_RECOVERY_NEEDED;
3424 }
3425 return error;
3426 }
3427
3428 /*
3429 * In the first part of recovery we replay inodes and buffers and build
3430 * up the list of extent free items which need to be processed. Here
3431 * we process the extent free items and clean up the on disk unlinked
3432 * inode lists. This is separated from the first part of recovery so
3433 * that the root and real-time bitmap inodes can be read in from disk in
3434 * between the two stages. This is necessary so that we can free space
3435 * in the real-time portion of the file system.
3436 */
3437 int
xlog_recover_finish(struct xlog * log)3438 xlog_recover_finish(
3439 struct xlog *log)
3440 {
3441 /*
3442 * Now we're ready to do the transactions needed for the
3443 * rest of recovery. Start with completing all the extent
3444 * free intent records and then process the unlinked inode
3445 * lists. At this point, we essentially run in normal mode
3446 * except that we're still performing recovery actions
3447 * rather than accepting new requests.
3448 */
3449 if (log->l_flags & XLOG_RECOVERY_NEEDED) {
3450 int error;
3451 error = xlog_recover_process_intents(log);
3452 if (error) {
3453 /*
3454 * Cancel all the unprocessed intent items now so that
3455 * we don't leave them pinned in the AIL. This can
3456 * cause the AIL to livelock on the pinned item if
3457 * anyone tries to push the AIL (inode reclaim does
3458 * this) before we get around to xfs_log_mount_cancel.
3459 */
3460 xlog_recover_cancel_intents(log);
3461 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
3462 xfs_alert(log->l_mp, "Failed to recover intents");
3463 return error;
3464 }
3465
3466 /*
3467 * Sync the log to get all the intents out of the AIL.
3468 * This isn't absolutely necessary, but it helps in
3469 * case the unlink transactions would have problems
3470 * pushing the intents out of the way.
3471 */
3472 xfs_log_force(log->l_mp, XFS_LOG_SYNC);
3473
3474 xlog_recover_process_iunlinks(log);
3475
3476 xlog_recover_check_summary(log);
3477
3478 xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
3479 log->l_mp->m_logname ? log->l_mp->m_logname
3480 : "internal");
3481 log->l_flags &= ~XLOG_RECOVERY_NEEDED;
3482 } else {
3483 xfs_info(log->l_mp, "Ending clean mount");
3484 }
3485 return 0;
3486 }
3487
3488 void
xlog_recover_cancel(struct xlog * log)3489 xlog_recover_cancel(
3490 struct xlog *log)
3491 {
3492 if (log->l_flags & XLOG_RECOVERY_NEEDED)
3493 xlog_recover_cancel_intents(log);
3494 }
3495
3496 #if defined(DEBUG)
3497 /*
3498 * Read all of the agf and agi counters and check that they
3499 * are consistent with the superblock counters.
3500 */
3501 STATIC void
xlog_recover_check_summary(struct xlog * log)3502 xlog_recover_check_summary(
3503 struct xlog *log)
3504 {
3505 xfs_mount_t *mp;
3506 xfs_buf_t *agfbp;
3507 xfs_buf_t *agibp;
3508 xfs_agnumber_t agno;
3509 uint64_t freeblks;
3510 uint64_t itotal;
3511 uint64_t ifree;
3512 int error;
3513
3514 mp = log->l_mp;
3515
3516 freeblks = 0LL;
3517 itotal = 0LL;
3518 ifree = 0LL;
3519 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3520 error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
3521 if (error) {
3522 xfs_alert(mp, "%s agf read failed agno %d error %d",
3523 __func__, agno, error);
3524 } else {
3525 struct xfs_agf *agfp = agfbp->b_addr;
3526
3527 freeblks += be32_to_cpu(agfp->agf_freeblks) +
3528 be32_to_cpu(agfp->agf_flcount);
3529 xfs_buf_relse(agfbp);
3530 }
3531
3532 error = xfs_read_agi(mp, NULL, agno, &agibp);
3533 if (error) {
3534 xfs_alert(mp, "%s agi read failed agno %d error %d",
3535 __func__, agno, error);
3536 } else {
3537 struct xfs_agi *agi = agibp->b_addr;
3538
3539 itotal += be32_to_cpu(agi->agi_count);
3540 ifree += be32_to_cpu(agi->agi_freecount);
3541 xfs_buf_relse(agibp);
3542 }
3543 }
3544 }
3545 #endif /* DEBUG */
3546