• Home
  • Raw
  • Download

Lines Matching full:log

44 #define	xlog_recover_check_summary(log)  argument
55 * Verify the log-relative block number and length in basic blocks are valid for
56 * an operation involving the given XFS log buffer. Returns true if the fields
61 struct xlog *log, in xlog_verify_bno() argument
65 if (blk_no < 0 || blk_no >= log->l_logBBsize) in xlog_verify_bno()
67 if (bbcount <= 0 || (blk_no + bbcount) > log->l_logBBsize) in xlog_verify_bno()
73 * Allocate a buffer to hold log data. The buffer needs to be able to map to
74 * a range of nbblks basic blocks at any valid offset within the log.
78 struct xlog *log, in xlog_alloc_buffer() argument
81 int align_mask = xfs_buftarg_dma_alignment(log->l_targ); in xlog_alloc_buffer()
84 * Pass log block 0 since we don't have an addr yet, buffer will be in xlog_alloc_buffer()
87 if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, 0, nbblks))) { in xlog_alloc_buffer()
88 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer", in xlog_alloc_buffer()
94 * We do log I/O in units of log sectors (a power-of-2 multiple of the in xlog_alloc_buffer()
96 * the basic blocks required for complete log sectors. in xlog_alloc_buffer()
102 * issue. Nor will this be a problem if the log I/O is done in basic in xlog_alloc_buffer()
104 * extra log sector to ensure there's space to accommodate this in xlog_alloc_buffer()
107 if (nbblks > 1 && log->l_sectBBsize > 1) in xlog_alloc_buffer()
108 nbblks += log->l_sectBBsize; in xlog_alloc_buffer()
109 nbblks = round_up(nbblks, log->l_sectBBsize); in xlog_alloc_buffer()
115 * in a log buffer. The buffer covers a log sector-aligned region.
119 struct xlog *log, in xlog_align() argument
122 return BBTOB(blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1)); in xlog_align()
127 struct xlog *log, in xlog_do_io() argument
135 if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, blk_no, nbblks))) { in xlog_do_io()
136 xfs_warn(log->l_mp, in xlog_do_io()
137 "Invalid log block/length (0x%llx, 0x%x) for buffer", in xlog_do_io()
142 blk_no = round_down(blk_no, log->l_sectBBsize); in xlog_do_io()
143 nbblks = round_up(nbblks, log->l_sectBBsize); in xlog_do_io()
146 error = xfs_rw_bdev(log->l_targ->bt_bdev, log->l_logBBstart + blk_no, in xlog_do_io()
148 if (error && !XFS_FORCED_SHUTDOWN(log->l_mp)) { in xlog_do_io()
149 xfs_alert(log->l_mp, in xlog_do_io()
150 "log recovery %s I/O error at daddr 0x%llx len %d error %d", in xlog_do_io()
159 struct xlog *log, in xlog_bread_noalign() argument
164 return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ); in xlog_bread_noalign()
169 struct xlog *log, in xlog_bread() argument
177 error = xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ); in xlog_bread()
179 *offset = data + xlog_align(log, blk_no); in xlog_bread()
185 struct xlog *log, in xlog_bwrite() argument
190 return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_WRITE); in xlog_bwrite()
195 * dump debug superblock and log record information
204 xfs_debug(mp, " log : uuid = %pU, fmt = %d", in xlog_header_check_dump()
212 * check log record header for recovery
224 * a dirty log created in IRIX. in xlog_header_check_recover()
228 "dirty log written in incompatible format - can't recover"); in xlog_header_check_recover()
235 "dirty log entry has mismatched uuid - can't recover"); in xlog_header_check_recover()
243 * read the head block of the log and check the header
255 * h_fs_uuid is null, we assume this log was last mounted in xlog_header_check_mount()
258 xfs_warn(mp, "null uuid in log - IRIX style log"); in xlog_header_check_mount()
261 xfs_warn(mp, "log has mismatched uuid - can't recover"); in xlog_header_check_mount()
270 * log which contains the given cycle. It uses a binary search algorithm.
276 struct xlog *log, in xlog_find_cycle_start() argument
291 error = xlog_bread(log, mid_blk, 1, buffer, &offset); in xlog_find_cycle_start()
319 struct xlog *log, in xlog_find_verify_cycle() argument
336 * a log sector, or we're out of luck. in xlog_find_verify_cycle()
339 while (bufblks > log->l_logBBsize) in xlog_find_verify_cycle()
341 while (!(buffer = xlog_alloc_buffer(log, bufblks))) { in xlog_find_verify_cycle()
343 if (bufblks < log->l_sectBBsize) in xlog_find_verify_cycle()
352 error = xlog_bread(log, i, bcount, buffer, &buf); in xlog_find_verify_cycle()
375 xlog_logrec_hblks(struct xlog *log, struct xlog_rec_header *rh) in xlog_logrec_hblks() argument
377 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { in xlog_logrec_hblks()
388 * Potentially backup over partial log record write.
391 * a good log record. Therefore, we subtract one to get the block number
394 * last log record is split over the end of the physical log.
401 struct xlog *log, in xlog_find_verify_log_record() argument
417 buffer = xlog_alloc_buffer(log, num_blks); in xlog_find_verify_log_record()
419 buffer = xlog_alloc_buffer(log, 1); in xlog_find_verify_log_record()
424 error = xlog_bread(log, start_blk, num_blks, buffer, &offset); in xlog_find_verify_log_record()
432 /* valid log record not found */ in xlog_find_verify_log_record()
433 xfs_warn(log->l_mp, in xlog_find_verify_log_record()
434 "Log inconsistent (didn't find previous header)"); in xlog_find_verify_log_record()
441 error = xlog_bread(log, i, 1, buffer, &offset); in xlog_find_verify_log_record()
456 * We hit the beginning of the physical log & still no header. Return in xlog_find_verify_log_record()
458 * will be called again for the end of the physical log. in xlog_find_verify_log_record()
466 * We have the final block of the good log (the first block in xlog_find_verify_log_record()
467 * of the log record _before_ the head. So we check the uuid. in xlog_find_verify_log_record()
469 if ((error = xlog_header_check_mount(log->l_mp, head))) in xlog_find_verify_log_record()
473 * We may have found a log record header before we expected one. in xlog_find_verify_log_record()
475 * up reading an entire log record. In this case, we don't want to in xlog_find_verify_log_record()
476 * reset last_blk. Only when last_blk points in the middle of a log in xlog_find_verify_log_record()
479 xhdrs = xlog_logrec_hblks(log, head); in xlog_find_verify_log_record()
491 * Head is defined to be the point of the log where the next log write
495 * current cycle number -1 won't be present in the log if we start writing
505 struct xlog *log, in xlog_find_head() argument
514 int error, log_bbnum = log->l_logBBsize; in xlog_find_head()
516 /* Is the end of the log device zeroed? */ in xlog_find_head()
517 error = xlog_find_zeroed(log, &first_blk); in xlog_find_head()
519 xfs_warn(log->l_mp, "empty log check failed"); in xlog_find_head()
529 * log so we can store the uuid in there in xlog_find_head()
531 xfs_warn(log->l_mp, "totally zeroed log"); in xlog_find_head()
538 buffer = xlog_alloc_buffer(log, 1); in xlog_find_head()
542 error = xlog_bread(log, 0, 1, buffer, &offset); in xlog_find_head()
549 error = xlog_bread(log, last_blk, 1, buffer, &offset); in xlog_find_head()
558 * then the entire log is stamped with the same cycle number. In this in xlog_find_head()
569 * In this case we believe that the entire log should have in xlog_find_head()
579 * log, as one of the latest writes at the beginning was in xlog_find_head()
585 * end of the log. in xlog_find_head()
587 * In the 256k log case, we will read from the beginning to the in xlog_find_head()
588 * end of the log and search for cycle numbers equal to x-1. in xlog_find_head()
590 * because we know that they cannot be the head since the log in xlog_find_head()
598 * number matching last_half_cycle. We expect the log to be in xlog_find_head()
607 * the log, then we look for occurrences of last_half_cycle - 1 in xlog_find_head()
608 * at the end of the log. The cases we're looking for look in xlog_find_head()
619 error = xlog_find_cycle_start(log, buffer, first_blk, &head_blk, in xlog_find_head()
629 * in the in-core log. The following number can be made tighter if in xlog_find_head()
632 num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log)); in xlog_find_head()
639 if ((error = xlog_find_verify_cycle(log, in xlog_find_head()
645 } else { /* need to read 2 parts of log */ in xlog_find_head()
647 * We are going to scan backwards in the log in two parts. in xlog_find_head()
648 * First we scan the physical end of the log. In this part in xlog_find_head()
649 * of the log, we are looking for blocks with cycle number in xlog_find_head()
651 * If we find one, then we know that the log starts there, as in xlog_find_head()
653 * the end of the physical log. The simple case for this is in xlog_find_head()
656 * If all of the blocks at the end of the log have cycle number in xlog_find_head()
658 * the log looking for occurrences of last_half_cycle. If we in xlog_find_head()
668 * In a 256k log, the scan at the end of the log will see the in xlog_find_head()
670 * certainly not the head of the log. By searching for in xlog_find_head()
676 if ((error = xlog_find_verify_cycle(log, start_blk, in xlog_find_head()
686 * Scan beginning of log now. The last part of the physical in xlog_find_head()
687 * log is good. This scan needs to verify that it doesn't find in xlog_find_head()
692 if ((error = xlog_find_verify_cycle(log, in xlog_find_head()
703 * the middle of a log record. in xlog_find_head()
705 num_scan_bblks = XLOG_REC_SHIFT(log); in xlog_find_head()
710 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0); in xlog_find_head()
718 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0); in xlog_find_head()
722 /* We hit the beginning of the log during our search */ in xlog_find_head()
728 error = xlog_find_verify_log_record(log, start_blk, in xlog_find_head()
756 xfs_warn(log->l_mp, "failed to find log head"); in xlog_find_head()
761 * Seek backwards in the log for log record headers.
763 * Given a starting log block, walk backwards until we find the provided number
765 * records encountered or a negative error code. The log block and buffer
770 struct xlog *log, in xlog_rseek_logrec_hdr() argument
789 * block in the log. in xlog_rseek_logrec_hdr()
793 error = xlog_bread(log, i, 1, buffer, &offset); in xlog_rseek_logrec_hdr()
806 * If we haven't hit the tail block or the log record header count, in xlog_rseek_logrec_hdr()
807 * start looking again from the end of the physical log. Note that in xlog_rseek_logrec_hdr()
811 for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) { in xlog_rseek_logrec_hdr()
812 error = xlog_bread(log, i, 1, buffer, &offset); in xlog_rseek_logrec_hdr()
834 * Seek forward in the log for log record headers.
838 * number of records encountered or a negative error code. The log block and
844 struct xlog *log, in xlog_seek_logrec_hdr() argument
863 * block in the log. in xlog_seek_logrec_hdr()
865 end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1; in xlog_seek_logrec_hdr()
867 error = xlog_bread(log, i, 1, buffer, &offset); in xlog_seek_logrec_hdr()
880 * If we haven't hit the head block or the log record header count, in xlog_seek_logrec_hdr()
881 * start looking again from the start of the physical log. in xlog_seek_logrec_hdr()
885 error = xlog_bread(log, i, 1, buffer, &offset); in xlog_seek_logrec_hdr()
907 * Calculate distance from head to tail (i.e., unused space in the log).
911 struct xlog *log, in xlog_tail_distance() argument
918 return tail_blk + (log->l_logBBsize - head_blk); in xlog_tail_distance()
922 * Verify the log tail. This is particularly important when torn or incomplete
923 * writes have been detected near the front of the log and the head has been
930 * log with garbage. This is not a coherency problem because the tail must have
931 * been pushed before it can be overwritten, but appears as log corruption to
935 * Therefore, CRC check the log from tail to head. If a failure occurs and the
942 struct xlog *log, in xlog_verify_tail() argument
955 buffer = xlog_alloc_buffer(log, 1); in xlog_verify_tail()
963 error = xlog_seek_logrec_hdr(log, head_blk, *tail_blk, 1, buffer, in xlog_verify_tail()
978 error = xlog_do_recovery_pass(log, head_blk, *tail_blk, in xlog_verify_tail()
987 tail_distance = xlog_tail_distance(log, head_blk, first_bad); in xlog_verify_tail()
992 error = xlog_seek_logrec_hdr(log, head_blk, first_bad, 2, in xlog_verify_tail()
999 error = xlog_do_recovery_pass(log, head_blk, *tail_blk, in xlog_verify_tail()
1004 xfs_warn(log->l_mp, in xlog_verify_tail()
1013 * Detect and trim torn writes from the head of the log.
1016 * log in the event of a crash. Our only means to detect this scenario is via
1019 * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at
1021 * the log and treat failures in this range as torn writes as a matter of
1023 * record in the log and the tail is updated from that record and verified.
1027 struct xlog *log, in xlog_verify_head() argument
1033 bool *wrapped) /* last rec. wraps phys. log */ in xlog_verify_head()
1044 * Check the head of the log for torn writes. Search backwards from the in xlog_verify_head()
1045 * head until we hit the tail or the maximum number of log record I/Os in xlog_verify_head()
1049 tmp_buffer = xlog_alloc_buffer(log, 1); in xlog_verify_head()
1052 error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk, in xlog_verify_head()
1062 * log block of the first bad record is saved in first_bad. in xlog_verify_head()
1064 error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk, in xlog_verify_head()
1072 xfs_warn(log->l_mp, in xlog_verify_head()
1073 "Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.", in xlog_verify_head()
1084 found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1, in xlog_verify_head()
1093 * log record and set the tail block based on the last good in xlog_verify_head()
1110 return xlog_verify_tail(log, *head_blk, tail_blk, in xlog_verify_head()
1115 * We need to make sure we handle log wrapping properly, so we can't use the
1117 * log.
1119 * The log is limited to 32 bit sizes, so we use the appropriate modulus
1124 struct xlog *log, in xlog_wrap_logbno() argument
1129 div_s64_rem(bno, log->l_logBBsize, &mod); in xlog_wrap_logbno()
1134 * Check whether the head of the log points to an unmount record. In other
1135 * words, determine whether the log is clean. If so, update the in-core state
1140 struct xlog *log, in xlog_check_unmount_rec() argument
1160 * log, we convert to a log block before comparing to the head_blk. in xlog_check_unmount_rec()
1166 hblks = xlog_logrec_hblks(log, rhead); in xlog_check_unmount_rec()
1167 after_umount_blk = xlog_wrap_logbno(log, in xlog_check_unmount_rec()
1172 umount_data_blk = xlog_wrap_logbno(log, rhead_blk + hblks); in xlog_check_unmount_rec()
1173 error = xlog_bread(log, umount_data_blk, 1, buffer, &offset); in xlog_check_unmount_rec()
1180 * Set tail and last sync so that newly written log in xlog_check_unmount_rec()
1184 xlog_assign_atomic_lsn(&log->l_tail_lsn, in xlog_check_unmount_rec()
1185 log->l_curr_cycle, after_umount_blk); in xlog_check_unmount_rec()
1186 xlog_assign_atomic_lsn(&log->l_last_sync_lsn, in xlog_check_unmount_rec()
1187 log->l_curr_cycle, after_umount_blk); in xlog_check_unmount_rec()
1199 struct xlog *log, in xlog_set_state() argument
1206 * Reset log values according to the state of the log when we in xlog_set_state()
1209 * continuing the cycle of the last good log record. At this in xlog_set_state()
1210 * point we have guaranteed that all partial log records have been in xlog_set_state()
1211 * accounted for. Therefore, we know that the last good log record in xlog_set_state()
1213 * of the physical log. in xlog_set_state()
1215 log->l_prev_block = rhead_blk; in xlog_set_state()
1216 log->l_curr_block = (int)head_blk; in xlog_set_state()
1217 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle); in xlog_set_state()
1219 log->l_curr_cycle++; in xlog_set_state()
1220 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn)); in xlog_set_state()
1221 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn)); in xlog_set_state()
1222 xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle, in xlog_set_state()
1223 BBTOB(log->l_curr_block)); in xlog_set_state()
1224 xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle, in xlog_set_state()
1225 BBTOB(log->l_curr_block)); in xlog_set_state()
1229 * Find the sync block number or the tail of the log.
1232 * associated buffers synced to disk. Every log record header has
1235 * log record header to believe.
1237 * The following algorithm uses the log record header with the largest
1238 * lsn. The entire log record does not need to be valid. We only care
1246 struct xlog *log, in xlog_find_tail() argument
1260 * Find previous log record in xlog_find_tail()
1262 if ((error = xlog_find_head(log, head_blk))) in xlog_find_tail()
1266 buffer = xlog_alloc_buffer(log, 1); in xlog_find_tail()
1270 error = xlog_bread(log, 0, 1, buffer, &offset); in xlog_find_tail()
1276 /* leave all other log inited values alone */ in xlog_find_tail()
1282 * Search backwards through the log looking for the log record header in xlog_find_tail()
1286 error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, buffer, in xlog_find_tail()
1291 xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__); in xlog_find_tail()
1298 * Set the log state based on the current head record. in xlog_find_tail()
1300 xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped); in xlog_find_tail()
1301 tail_lsn = atomic64_read(&log->l_tail_lsn); in xlog_find_tail()
1304 * Look for an unmount record at the head of the log. This sets the log in xlog_find_tail()
1307 error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead, in xlog_find_tail()
1313 * Verify the log head if the log is not clean (e.g., we have anything in xlog_find_tail()
1316 * considered torn writes and the log head is trimmed accordingly. in xlog_find_tail()
1318 * Note that we can only run CRC verification when the log is dirty in xlog_find_tail()
1319 * because there's no guarantee that the log data behind an unmount in xlog_find_tail()
1325 error = xlog_verify_head(log, head_blk, tail_blk, buffer, in xlog_find_tail()
1332 xlog_set_state(log, *head_blk, rhead, rhead_blk, in xlog_find_tail()
1334 tail_lsn = atomic64_read(&log->l_tail_lsn); in xlog_find_tail()
1335 error = xlog_check_unmount_rec(log, head_blk, tail_blk, in xlog_find_tail()
1349 log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN; in xlog_find_tail()
1354 * because we allow multiple outstanding log writes concurrently, in xlog_find_tail()
1370 if (!xfs_readonly_buftarg(log->l_targ)) in xlog_find_tail()
1371 error = xlog_clear_stale_blocks(log, tail_lsn); in xlog_find_tail()
1377 xfs_warn(log->l_mp, "failed to locate log tail"); in xlog_find_tail()
1382 * Is the log zeroed at all?
1388 * If the log is partially zeroed, this routine will pass back the blkno
1393 * 0 => the log is completely written to
1394 * 1 => use *blk_no as the first block of the log
1399 struct xlog *log, in xlog_find_zeroed() argument
1407 int error, log_bbnum = log->l_logBBsize; in xlog_find_zeroed()
1411 /* check totally zeroed log */ in xlog_find_zeroed()
1412 buffer = xlog_alloc_buffer(log, 1); in xlog_find_zeroed()
1415 error = xlog_bread(log, 0, 1, buffer, &offset); in xlog_find_zeroed()
1420 if (first_cycle == 0) { /* completely zeroed log */ in xlog_find_zeroed()
1426 /* check partially zeroed log */ in xlog_find_zeroed()
1427 error = xlog_bread(log, log_bbnum-1, 1, buffer, &offset); in xlog_find_zeroed()
1432 if (last_cycle != 0) { /* log completely written to */ in xlog_find_zeroed()
1437 /* we have a partially zeroed log */ in xlog_find_zeroed()
1439 error = xlog_find_cycle_start(log, buffer, 0, &last_blk, 0); in xlog_find_zeroed()
1445 * the entire log is made up of log records which are the same size, in xlog_find_zeroed()
1449 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log); in xlog_find_zeroed()
1462 if ((error = xlog_find_verify_cycle(log, start_blk, in xlog_find_zeroed()
1469 * Potentially backup over partial log record write. We don't need in xlog_find_zeroed()
1470 * to search the end of the log because we know it is zero. in xlog_find_zeroed()
1472 error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0); in xlog_find_zeroed()
1488 * to initialize a buffer full of empty log record headers and write
1489 * them into the log.
1493 struct xlog *log, in xlog_add_record() argument
1506 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1); in xlog_add_record()
1510 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t)); in xlog_add_record()
1515 struct xlog *log, in xlog_write_log_records() argument
1525 int sectbb = log->l_sectBBsize; in xlog_write_log_records()
1535 * log sector, or we're out of luck. in xlog_write_log_records()
1538 while (bufblks > log->l_logBBsize) in xlog_write_log_records()
1540 while (!(buffer = xlog_alloc_buffer(log, bufblks))) { in xlog_write_log_records()
1552 error = xlog_bread_noalign(log, start_block, 1, buffer); in xlog_write_log_records()
1571 error = xlog_bread_noalign(log, ealign, sectbb, in xlog_write_log_records()
1578 offset = buffer + xlog_align(log, start_block); in xlog_write_log_records()
1580 xlog_add_record(log, offset, cycle, i+j, in xlog_write_log_records()
1584 error = xlog_bwrite(log, start_block, endcount, buffer); in xlog_write_log_records()
1597 * This routine is called to blow away any incomplete log writes out
1598 * in front of the log head. We do this so that we won't become confused
1600 * If we leave the partial log records out there, this situation could
1603 * with empty log records with the old cycle number rather than the
1607 * the log so that we will not write over the unmount record after a
1608 * clean unmount in a 512 block log. Doing so would leave the log without
1609 * any valid log records in it until a new one was written. If we crashed
1614 struct xlog *log, in xlog_clear_stale_blocks() argument
1625 head_cycle = log->l_curr_cycle; in xlog_clear_stale_blocks()
1626 head_block = log->l_curr_block; in xlog_clear_stale_blocks()
1629 * Figure out the distance between the new head of the log in xlog_clear_stale_blocks()
1632 * we don't want to overwrite the tail of the log. in xlog_clear_stale_blocks()
1636 * The tail is behind the head in the physical log, in xlog_clear_stale_blocks()
1638 * distance from the head to the end of the log plus in xlog_clear_stale_blocks()
1639 * the distance from the beginning of the log to the in xlog_clear_stale_blocks()
1642 if (XFS_IS_CORRUPT(log->l_mp, in xlog_clear_stale_blocks()
1644 head_block >= log->l_logBBsize)) in xlog_clear_stale_blocks()
1646 tail_distance = tail_block + (log->l_logBBsize - head_block); in xlog_clear_stale_blocks()
1649 * The head is behind the tail in the physical log, in xlog_clear_stale_blocks()
1653 if (XFS_IS_CORRUPT(log->l_mp, in xlog_clear_stale_blocks()
1669 max_distance = XLOG_TOTAL_REC_SHIFT(log); in xlog_clear_stale_blocks()
1679 if ((head_block + max_distance) <= log->l_logBBsize) { in xlog_clear_stale_blocks()
1682 * wrapping around the end of the log. Just do it in xlog_clear_stale_blocks()
1684 * current cycle minus one so that the log will look like: in xlog_clear_stale_blocks()
1687 error = xlog_write_log_records(log, (head_cycle - 1), in xlog_clear_stale_blocks()
1694 * We need to wrap around the end of the physical log in in xlog_clear_stale_blocks()
1697 * end of the physical log, and it should use the current in xlog_clear_stale_blocks()
1700 distance = log->l_logBBsize - head_block; in xlog_clear_stale_blocks()
1701 error = xlog_write_log_records(log, (head_cycle - 1), in xlog_clear_stale_blocks()
1709 * Now write the blocks at the start of the physical log. in xlog_clear_stale_blocks()
1716 distance = max_distance - (log->l_logBBsize - head_block); in xlog_clear_stale_blocks()
1717 error = xlog_write_log_records(log, head_cycle, 0, distance, in xlog_clear_stale_blocks()
1732 struct xlog *log, in xlog_recover_release_intent() argument
1738 struct xfs_ail *ailp = log->l_ailp; in xlog_recover_release_intent()
1760 * Log recover routines
1794 * Sort the log items in the transaction.
1844 struct xlog *log, in xlog_recover_reorder_trans() argument
1862 xfs_warn(log->l_mp, in xlog_recover_reorder_trans()
1863 "%s: unrecognized type of log operation (%d)", in xlog_recover_reorder_trans()
1884 trace_xfs_log_recover_item_reorder_head(log, in xlog_recover_reorder_trans()
1892 trace_xfs_log_recover_item_reorder_tail(log, in xlog_recover_reorder_trans()
1913 struct xlog *log, in xlog_buf_readahead() argument
1918 if (!xlog_is_buffer_cancelled(log, blkno, len)) in xlog_buf_readahead()
1919 xfs_buf_readahead(log->l_mp->m_ddev_targp, blkno, len, ops); in xlog_buf_readahead()
1924 struct xlog *log, in xlog_recover_items_pass2() argument
1933 trace_xfs_log_recover_item_recover(log, trans, item, in xlog_recover_items_pass2()
1937 error = item->ri_ops->commit_pass2(log, buffer_list, in xlog_recover_items_pass2()
1954 struct xlog *log, in xlog_recover_commit_trans() argument
1970 error = xlog_recover_reorder_trans(log, trans, pass); in xlog_recover_commit_trans()
1975 trace_xfs_log_recover_item_recover(log, trans, item, pass); in xlog_recover_commit_trans()
1980 error = item->ri_ops->commit_pass1(log, item); in xlog_recover_commit_trans()
1984 item->ri_ops->ra_pass2(log, item); in xlog_recover_commit_trans()
1988 error = xlog_recover_items_pass2(log, trans, in xlog_recover_commit_trans()
2006 error = xlog_recover_items_pass2(log, trans, in xlog_recover_commit_trans()
2030 struct xlog *log, in xlog_recover_add_to_cont_trans() argument
2046 xfs_warn(log->l_mp, "%s: bad header length", __func__); in xlog_recover_add_to_cont_trans()
2070 trace_xfs_log_recover_item_add_cont(log, trans, item, 0); in xlog_recover_add_to_cont_trans()
2085 * will appear in the current log item.
2089 struct xlog *log, in xlog_recover_add_to_trans() argument
2101 /* we need to catch log corruptions here */ in xlog_recover_add_to_trans()
2103 xfs_warn(log->l_mp, "%s: bad header magic number", in xlog_recover_add_to_trans()
2110 xfs_warn(log->l_mp, "%s: bad header length", __func__); in xlog_recover_add_to_trans()
2144 xfs_warn(log->l_mp, in xlog_recover_add_to_trans()
2145 "bad number of regions (%d) in inode log format", in xlog_recover_add_to_trans()
2159 xfs_warn(log->l_mp, in xlog_recover_add_to_trans()
2160 "log item region count (%d) overflowed size (%d)", in xlog_recover_add_to_trans()
2171 trace_xfs_log_recover_item_add(log, trans, item, 0); in xlog_recover_add_to_trans()
2207 struct xlog *log, in xlog_recovery_process_trans() argument
2231 error = xlog_recover_add_to_trans(log, trans, dp, len); in xlog_recovery_process_trans()
2234 error = xlog_recover_add_to_cont_trans(log, trans, dp, len); in xlog_recovery_process_trans()
2237 error = xlog_recover_commit_trans(log, trans, pass, in xlog_recovery_process_trans()
2246 xfs_warn(log->l_mp, "%s: Unmount LR", __func__); in xlog_recovery_process_trans()
2251 xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags); in xlog_recovery_process_trans()
2314 struct xlog *log, in xlog_recover_process_ophdr() argument
2330 xfs_warn(log->l_mp, "%s: bad clientid 0x%x", in xlog_recover_process_ophdr()
2341 xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len); in xlog_recover_process_ophdr()
2358 * - Log recovery skips items with a metadata LSN >= the current LSN of in xlog_recover_process_ophdr()
2366 * In other words, we are allowed to submit a buffer from log recovery in xlog_recover_process_ophdr()
2371 * LSN. Therefore, track the current LSN of each commit log record as it in xlog_recover_process_ophdr()
2375 if (log->l_recovery_lsn != trans->r_lsn && in xlog_recover_process_ophdr()
2380 log->l_recovery_lsn = trans->r_lsn; in xlog_recover_process_ophdr()
2383 return xlog_recovery_process_trans(log, trans, dp, len, in xlog_recover_process_ophdr()
2398 struct xlog *log, in xlog_recover_process_data() argument
2413 /* check the log format matches our own - else we can't recover */ in xlog_recover_process_data()
2414 if (xlog_header_check_recover(log->l_mp, rhead)) in xlog_recover_process_data()
2417 trace_xfs_log_recover_record(log, rhead, pass); in xlog_recover_process_data()
2423 xfs_warn(log->l_mp, "%s: op header overrun", __func__); in xlog_recover_process_data()
2428 error = xlog_recover_process_ophdr(log, rhash, rhead, ohead, in xlog_recover_process_data()
2457 * in recovery no matter how full the log might be. in xlog_finish_defer_ops()
2505 * When this is called, all of the log intent items which did not have
2506 * corresponding log done items should be in the AIL. What we do now
2509 * Since we process the log intent items in normal transactions, they
2522 struct xlog *log) in xlog_recover_process_intents() argument
2533 ailp = log->l_ailp; in xlog_recover_process_intents()
2536 last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block); in xlog_recover_process_intents()
2555 * the last transaction we found in the log at the start in xlog_recover_process_intents()
2578 error = xlog_finish_defer_ops(log->l_mp, &capture_list); in xlog_recover_process_intents()
2584 xlog_abort_defer_ops(log->l_mp, &capture_list); in xlog_recover_process_intents()
2590 * Release all pending log intent items so they don't pin the AIL.
2594 struct xlog *log) in xlog_recover_cancel_intents() argument
2600 ailp = log->l_ailp; in xlog_recover_cancel_intents()
2740 * of log space.
2744 * can lead to deadlocks if the recovery process runs out of log reservation
2751 struct xlog *log) in xlog_recover_process_iunlinks() argument
2761 mp = log->l_mp; in xlog_recover_process_iunlinks()
2805 struct xlog *log) in xlog_unpack_data() argument
2815 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { in xlog_unpack_data()
2827 * CRC check, unpack and process a log record.
2831 struct xlog *log, in xlog_recover_process() argument
2841 crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len)); in xlog_recover_process()
2863 if (old_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) { in xlog_recover_process()
2864 xfs_alert(log->l_mp, in xlog_recover_process()
2865 "log record CRC mismatch: found 0x%x, expected 0x%x.", in xlog_recover_process()
2873 * fatal log corruption failure. in xlog_recover_process()
2875 if (xfs_sb_version_hascrc(&log->l_mp->m_sb)) { in xlog_recover_process()
2876 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp); in xlog_recover_process()
2881 xlog_unpack_data(rhead, dp, log); in xlog_recover_process()
2883 return xlog_recover_process_data(log, rhash, rhead, dp, pass, in xlog_recover_process()
2889 struct xlog *log, in xlog_valid_rec_header() argument
2896 if (XFS_IS_CORRUPT(log->l_mp, in xlog_valid_rec_header()
2899 if (XFS_IS_CORRUPT(log->l_mp, in xlog_valid_rec_header()
2903 xfs_warn(log->l_mp, "%s: unrecognised log version (%d).", in xlog_valid_rec_header()
2913 if (XFS_IS_CORRUPT(log->l_mp, hlen <= 0 || hlen > bufsize)) in xlog_valid_rec_header()
2916 if (XFS_IS_CORRUPT(log->l_mp, in xlog_valid_rec_header()
2917 blkno > log->l_logBBsize || blkno > INT_MAX)) in xlog_valid_rec_header()
2923 * Read the log from tail to head and process the log records found.
2925 * and where the active portion of the log wraps around the end of
2926 * the physical log separately. The pass parameter is passed through
2932 struct xlog *log, in xlog_do_recovery_pass() argument
2936 xfs_daddr_t *first_bad) /* out: first bad log rec */ in xlog_do_recovery_pass()
2959 * h_size. Use this to tell how many sectors make up the log header. in xlog_do_recovery_pass()
2961 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { in xlog_do_recovery_pass()
2967 hbp = xlog_alloc_buffer(log, 1); in xlog_do_recovery_pass()
2971 error = xlog_bread(log, tail_blk, 1, hbp, &offset); in xlog_do_recovery_pass()
2981 * log buffer can be too small for the record and cause an in xlog_do_recovery_pass()
2990 if (h_len > h_size && h_len <= log->l_mp->m_logbsize && in xlog_do_recovery_pass()
2992 xfs_warn(log->l_mp, in xlog_do_recovery_pass()
2994 h_size, log->l_mp->m_logbsize); in xlog_do_recovery_pass()
2995 h_size = log->l_mp->m_logbsize; in xlog_do_recovery_pass()
2998 error = xlog_valid_rec_header(log, rhead, tail_blk, h_size); in xlog_do_recovery_pass()
3013 hbp = xlog_alloc_buffer(log, hblks); in xlog_do_recovery_pass()
3017 ASSERT(log->l_sectBBsize == 1); in xlog_do_recovery_pass()
3018 hbp = xlog_alloc_buffer(log, 1); in xlog_do_recovery_pass()
3024 dbp = xlog_alloc_buffer(log, BTOBB(h_size)); in xlog_do_recovery_pass()
3033 * Perform recovery around the end of the physical log. in xlog_do_recovery_pass()
3037 while (blk_no < log->l_logBBsize) { in xlog_do_recovery_pass()
3039 * Check for header wrapping around physical end-of-log in xlog_do_recovery_pass()
3044 if (blk_no + hblks <= log->l_logBBsize) { in xlog_do_recovery_pass()
3046 error = xlog_bread(log, blk_no, hblks, hbp, in xlog_do_recovery_pass()
3051 /* This LR is split across physical log end */ in xlog_do_recovery_pass()
3052 if (blk_no != log->l_logBBsize) { in xlog_do_recovery_pass()
3053 /* some data before physical log end */ in xlog_do_recovery_pass()
3055 split_hblks = log->l_logBBsize - (int)blk_no; in xlog_do_recovery_pass()
3057 error = xlog_bread(log, blk_no, in xlog_do_recovery_pass()
3070 * - the log start is guaranteed to be sector in xlog_do_recovery_pass()
3072 * - we read the log end (LR header start) in xlog_do_recovery_pass()
3073 * _first_, then the log start (LR header end) in xlog_do_recovery_pass()
3077 error = xlog_bread_noalign(log, 0, in xlog_do_recovery_pass()
3084 error = xlog_valid_rec_header(log, rhead, in xlog_do_recovery_pass()
3093 * Read the log record data in multiple reads if it in xlog_do_recovery_pass()
3094 * wraps around the end of the log. Note that if the in xlog_do_recovery_pass()
3096 * end of the log. The record data is contiguous in in xlog_do_recovery_pass()
3099 if (blk_no + bblks <= log->l_logBBsize || in xlog_do_recovery_pass()
3100 blk_no >= log->l_logBBsize) { in xlog_do_recovery_pass()
3101 rblk_no = xlog_wrap_logbno(log, blk_no); in xlog_do_recovery_pass()
3102 error = xlog_bread(log, rblk_no, bblks, dbp, in xlog_do_recovery_pass()
3107 /* This log record is split across the in xlog_do_recovery_pass()
3108 * physical end of log */ in xlog_do_recovery_pass()
3111 if (blk_no != log->l_logBBsize) { in xlog_do_recovery_pass()
3113 * end of log */ in xlog_do_recovery_pass()
3117 log->l_logBBsize - (int)blk_no; in xlog_do_recovery_pass()
3119 error = xlog_bread(log, blk_no, in xlog_do_recovery_pass()
3132 * - the log start is guaranteed to be sector in xlog_do_recovery_pass()
3134 * - we read the log end (LR header start) in xlog_do_recovery_pass()
3135 * _first_, then the log start (LR header end) in xlog_do_recovery_pass()
3138 error = xlog_bread_noalign(log, 0, in xlog_do_recovery_pass()
3145 error = xlog_recover_process(log, rhash, rhead, offset, in xlog_do_recovery_pass()
3154 ASSERT(blk_no >= log->l_logBBsize); in xlog_do_recovery_pass()
3155 blk_no -= log->l_logBBsize; in xlog_do_recovery_pass()
3159 /* read first part of physical log */ in xlog_do_recovery_pass()
3161 error = xlog_bread(log, blk_no, hblks, hbp, &offset); in xlog_do_recovery_pass()
3166 error = xlog_valid_rec_header(log, rhead, blk_no, h_size); in xlog_do_recovery_pass()
3172 error = xlog_bread(log, blk_no+hblks, bblks, dbp, in xlog_do_recovery_pass()
3177 error = xlog_recover_process(log, rhash, rhead, offset, pass, in xlog_do_recovery_pass()
3218 * Do the recovery of the log. We actually do this in two phases.
3220 * of cancelling a record written into the log. The first pass
3222 * second pass replays log items normally except for those which
3224 * takes place in the log item type specific routines.
3226 * The table of items which have cancel records in the log is allocated
3228 * the log recovery has been completed.
3232 struct xlog *log, in xlog_do_log_recovery() argument
3241 * First do a pass to find all of the cancelled buf log items. in xlog_do_log_recovery()
3244 log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE * in xlog_do_log_recovery()
3248 INIT_LIST_HEAD(&log->l_buf_cancel_table[i]); in xlog_do_log_recovery()
3250 error = xlog_do_recovery_pass(log, head_blk, tail_blk, in xlog_do_log_recovery()
3253 kmem_free(log->l_buf_cancel_table); in xlog_do_log_recovery()
3254 log->l_buf_cancel_table = NULL; in xlog_do_log_recovery()
3258 * Then do a second pass to actually recover the items in the log. in xlog_do_log_recovery()
3261 error = xlog_do_recovery_pass(log, head_blk, tail_blk, in xlog_do_log_recovery()
3268 ASSERT(list_empty(&log->l_buf_cancel_table[i])); in xlog_do_log_recovery()
3272 kmem_free(log->l_buf_cancel_table); in xlog_do_log_recovery()
3273 log->l_buf_cancel_table = NULL; in xlog_do_log_recovery()
3283 struct xlog *log, in xlog_do_recover() argument
3287 struct xfs_mount *mp = log->l_mp; in xlog_do_recover()
3292 trace_xfs_log_recover(log, head_blk, tail_blk); in xlog_do_recover()
3295 * First replay the images in the log. in xlog_do_recover()
3297 error = xlog_do_log_recovery(log, head_blk, tail_blk); in xlog_do_recover()
3310 * or iunlinks, we can free up the entire log and set the tail_lsn to in xlog_do_recover()
3347 xlog_recover_check_summary(log); in xlog_do_recover()
3350 log->l_flags &= ~XLOG_ACTIVE_RECOVERY; in xlog_do_recover()
3355 * Perform recovery and re-initialize some log variables in xlog_find_tail.
3361 struct xlog *log) in xlog_recover() argument
3366 /* find the tail of the log */ in xlog_recover()
3367 error = xlog_find_tail(log, &head_blk, &tail_blk); in xlog_recover()
3372 * The superblock was read before the log was available and thus the LSN in xlog_recover()
3376 if (xfs_sb_version_hascrc(&log->l_mp->m_sb) && in xlog_recover()
3377 !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn)) in xlog_recover()
3392 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) { in xlog_recover()
3397 * Version 5 superblock log feature mask validation. We know the in xlog_recover()
3398 * log is dirty so check if there are any unknown log features in xlog_recover()
3403 if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 && in xlog_recover()
3404 xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb, in xlog_recover()
3406 xfs_warn(log->l_mp, in xlog_recover()
3407 "Superblock has unknown incompatible log features (0x%x) enabled.", in xlog_recover()
3408 (log->l_mp->m_sb.sb_features_log_incompat & in xlog_recover()
3410 xfs_warn(log->l_mp, in xlog_recover()
3411 "The log can not be fully and/or safely recovered by this kernel."); in xlog_recover()
3412 xfs_warn(log->l_mp, in xlog_recover()
3413 "Please recover the log on a kernel that supports the unknown features."); in xlog_recover()
3418 * Delay log recovery if the debug hook is set. This is debug in xlog_recover()
3420 * log recovery. in xlog_recover()
3423 xfs_notice(log->l_mp, in xlog_recover()
3424 "Delaying log recovery for %d seconds.", in xlog_recover()
3429 xfs_notice(log->l_mp, "Starting recovery (logdev: %s)", in xlog_recover()
3430 log->l_mp->m_logname ? log->l_mp->m_logname in xlog_recover()
3433 error = xlog_do_recover(log, head_blk, tail_blk); in xlog_recover()
3434 log->l_flags |= XLOG_RECOVERY_NEEDED; in xlog_recover()
3450 struct xlog *log) in xlog_recover_finish() argument
3460 if (log->l_flags & XLOG_RECOVERY_NEEDED) { in xlog_recover_finish()
3462 error = xlog_recover_process_intents(log); in xlog_recover_finish()
3471 xlog_recover_cancel_intents(log); in xlog_recover_finish()
3472 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); in xlog_recover_finish()
3473 xfs_alert(log->l_mp, "Failed to recover intents"); in xlog_recover_finish()
3478 * Sync the log to get all the intents out of the AIL. in xlog_recover_finish()
3483 xfs_log_force(log->l_mp, XFS_LOG_SYNC); in xlog_recover_finish()
3485 xlog_recover_process_iunlinks(log); in xlog_recover_finish()
3487 xlog_recover_check_summary(log); in xlog_recover_finish()
3489 xfs_notice(log->l_mp, "Ending recovery (logdev: %s)", in xlog_recover_finish()
3490 log->l_mp->m_logname ? log->l_mp->m_logname in xlog_recover_finish()
3492 log->l_flags &= ~XLOG_RECOVERY_NEEDED; in xlog_recover_finish()
3494 xfs_info(log->l_mp, "Ending clean mount"); in xlog_recover_finish()
3501 struct xlog *log) in xlog_recover_cancel() argument
3503 if (log->l_flags & XLOG_RECOVERY_NEEDED) in xlog_recover_cancel()
3504 xlog_recover_cancel_intents(log); in xlog_recover_cancel()
3514 struct xlog *log) in xlog_recover_check_summary() argument
3525 mp = log->l_mp; in xlog_recover_check_summary()