Lines Matching full:log
35 struct xlog *log,
39 struct xlog *log);
46 struct xlog *log,
54 struct xlog *log,
59 struct xlog *log,
63 struct xlog *log,
68 struct xlog *log,
72 struct xlog *log);
75 struct xlog *log,
80 struct xlog *log,
92 struct xlog *log);
96 struct xlog *log, in xlog_grant_sub_space() argument
110 space += log->l_logsize; in xlog_grant_sub_space()
122 struct xlog *log, in xlog_grant_add_space() argument
135 tmp = log->l_logsize - space; in xlog_grant_add_space()
172 struct xlog *log, in xlog_ticket_reservation() argument
176 if (head == &log->l_write_head) { in xlog_ticket_reservation()
189 struct xlog *log, in xlog_grant_head_wake() argument
202 * limiting the target to the log head (l_last_sync_lsn) at the in xlog_grant_head_wake()
203 * time. This may not reflect where the log head is now as the in xlog_grant_head_wake()
207 * log that has moved rather than the tail. As the tail didn't in xlog_grant_head_wake()
210 * pushed to the target defined by the old log head location, we in xlog_grant_head_wake()
216 * target reflects both the current log tail and log head in xlog_grant_head_wake()
220 need_bytes = xlog_ticket_reservation(log, head, tic); in xlog_grant_head_wake()
223 xlog_grant_push_ail(log, need_bytes); in xlog_grant_head_wake()
228 trace_xfs_log_grant_wake_up(log, tic); in xlog_grant_head_wake()
238 struct xlog *log, in xlog_grant_head_wait() argument
247 if (XLOG_FORCED_SHUTDOWN(log)) in xlog_grant_head_wait()
249 xlog_grant_push_ail(log, need_bytes); in xlog_grant_head_wait()
254 XFS_STATS_INC(log->l_mp, xs_sleep_logspace); in xlog_grant_head_wait()
256 trace_xfs_log_grant_sleep(log, tic); in xlog_grant_head_wait()
258 trace_xfs_log_grant_wake(log, tic); in xlog_grant_head_wait()
261 if (XLOG_FORCED_SHUTDOWN(log)) in xlog_grant_head_wait()
263 } while (xlog_space_left(log, &head->grant) < need_bytes); in xlog_grant_head_wait()
273 * Atomically get the log space required for a log ticket.
291 struct xlog *log, in xlog_grant_head_check() argument
299 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); in xlog_grant_head_check()
307 *need_bytes = xlog_ticket_reservation(log, head, tic); in xlog_grant_head_check()
308 free_bytes = xlog_space_left(log, &head->grant); in xlog_grant_head_check()
311 if (!xlog_grant_head_wake(log, head, &free_bytes) || in xlog_grant_head_check()
313 error = xlog_grant_head_wait(log, head, tic, in xlog_grant_head_check()
319 error = xlog_grant_head_wait(log, head, tic, *need_bytes); in xlog_grant_head_check()
355 * Never write to the log on norecovery mounts, if the block device is in xfs_log_writable()
357 * allow internal writes for log recovery and unmount purposes, so don't in xfs_log_writable()
377 struct xlog *log = mp->m_log; in xfs_log_regrant() local
381 if (XLOG_FORCED_SHUTDOWN(log)) in xfs_log_regrant()
389 * the log. Just add one to the existing tid so that we can see chains in xfs_log_regrant()
390 * of rolling transactions in the log easily. in xfs_log_regrant()
394 xlog_grant_push_ail(log, tic->t_unit_res); in xfs_log_regrant()
402 trace_xfs_log_regrant(log, tic); in xfs_log_regrant()
404 error = xlog_grant_head_check(log, &log->l_write_head, tic, in xfs_log_regrant()
409 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); in xfs_log_regrant()
410 trace_xfs_log_regrant_exit(log, tic); in xfs_log_regrant()
411 xlog_verify_grant_tail(log); in xfs_log_regrant()
426 * Reserve log space and return a ticket corresponding to the reservation.
428 * Each reservation is going to reserve extra space for a log record header.
429 * When writes happen to the on-disk log, we don't subtract the length of the
430 * log record header from any reservation. By wasting space in each
442 struct xlog *log = mp->m_log; in xfs_log_reserve() local
449 if (XLOG_FORCED_SHUTDOWN(log)) in xfs_log_reserve()
455 tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent); in xfs_log_reserve()
458 xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt in xfs_log_reserve()
461 trace_xfs_log_reserve(log, tic); in xfs_log_reserve()
463 error = xlog_grant_head_check(log, &log->l_reserve_head, tic, in xfs_log_reserve()
468 xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes); in xfs_log_reserve()
469 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); in xfs_log_reserve()
470 trace_xfs_log_reserve_exit(log, tic); in xfs_log_reserve()
471 xlog_verify_grant_tail(log); in xfs_log_reserve()
487 struct xlog *log, in __xlog_state_release_iclog() argument
490 lockdep_assert_held(&log->l_icloglock); in __xlog_state_release_iclog()
494 xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp); in __xlog_state_release_iclog()
498 xlog_verify_tail_lsn(log, iclog, tail_lsn); in __xlog_state_release_iclog()
513 struct xlog *log, in xlog_state_release_iclog() argument
516 lockdep_assert_held(&log->l_icloglock); in xlog_state_release_iclog()
522 __xlog_state_release_iclog(log, iclog)) { in xlog_state_release_iclog()
523 spin_unlock(&log->l_icloglock); in xlog_state_release_iclog()
524 xlog_sync(log, iclog); in xlog_state_release_iclog()
525 spin_lock(&log->l_icloglock); in xlog_state_release_iclog()
535 struct xlog *log = iclog->ic_log; in xfs_log_release_iclog() local
538 if (atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock)) { in xfs_log_release_iclog()
540 sync = __xlog_state_release_iclog(log, iclog); in xfs_log_release_iclog()
541 spin_unlock(&log->l_icloglock); in xfs_log_release_iclog()
545 xlog_sync(log, iclog); in xfs_log_release_iclog()
549 * Mount a log filesystem
552 * log_target - buftarg of on-disk log device
554 * num_bblocks - Number of BBSIZE blocks in on-disk log
586 * Validate the given log space and drop a critical message via syslog in xfs_log_mount()
587 * if the log size is too small that would lead to some unexpected in xfs_log_mount()
588 * situations in transaction log space reservation stage. in xfs_log_mount()
592 * remedy the situation as there is no way to grow the log (short of in xfs_log_mount()
597 * filesystem with a log that is too small. in xfs_log_mount()
603 "Log size %d blocks too small, minimum size is %d blocks", in xfs_log_mount()
608 "Log size %d blocks too large, maximum size is %lld blocks", in xfs_log_mount()
613 "log size %lld bytes too large, maximum size is %lld bytes", in xfs_log_mount()
620 "log stripe unit %u bytes must be a multiple of block size", in xfs_log_mount()
627 * Log check errors are always fatal on v5; or whenever bad in xfs_log_mount()
631 xfs_crit(mp, "AAIEEE! Log failed size checks. Abort!"); in xfs_log_mount()
635 xfs_crit(mp, "Log size out of supported range."); in xfs_log_mount()
637 "Continuing onwards, but if log hangs are experienced then please report this message in the bug re… in xfs_log_mount()
641 * Initialize the AIL now we have a log. in xfs_log_mount()
651 * skip log recovery on a norecovery mount. pretend it all in xfs_log_mount()
665 xfs_warn(mp, "log mount/recovery failed: error %d", in xfs_log_mount()
673 "log"); in xfs_log_mount()
681 * Now the log has been fully initialised and we know were our in xfs_log_mount()
703 * If we finish recovery successfully, start the background log work. If we are
724 * During the second phase of log recovery, we need iget and in xfs_log_mount_finish()
727 * of inodes before we're done replaying log items on those in xfs_log_mount_finish()
736 * in log recovery failure. We have to evict the unreferenced in xfs_log_mount_finish()
751 * Drain the buffer LRU after log recovery. This is required for v4 in xfs_log_mount_finish()
768 /* Make sure the log is dead if we're returning failure. */ in xfs_log_mount_finish()
776 * the log.
787 * Wait for the iclog to be written disk, or return an error if the log has been
795 struct xlog *log = iclog->ic_log; in xlog_wait_on_iclog() local
797 if (!XLOG_FORCED_SHUTDOWN(log) && in xlog_wait_on_iclog()
800 XFS_STATS_INC(log->l_mp, xs_log_force_sleep); in xlog_wait_on_iclog()
801 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); in xlog_wait_on_iclog()
803 spin_unlock(&log->l_icloglock); in xlog_wait_on_iclog()
806 if (XLOG_FORCED_SHUTDOWN(log)) in xlog_wait_on_iclog()
818 struct xlog *log, in xlog_write_unmount_record() argument
838 return xlog_write(log, &vec, ticket, lsn, NULL, flags, false); in xlog_write_unmount_record()
843 * log.
847 struct xlog *log) in xlog_unmount_write() argument
849 struct xfs_mount *mp = log->l_mp; in xlog_unmount_write()
860 error = xlog_write_unmount_record(log, tic, &lsn, flags); in xlog_unmount_write()
863 * transitioning log state to IOERROR. Just continue... in xlog_unmount_write()
869 spin_lock(&log->l_icloglock); in xlog_unmount_write()
870 iclog = log->l_iclog; in xlog_unmount_write()
873 xlog_state_switch_iclogs(log, iclog, 0); in xlog_unmount_write()
877 error = xlog_state_release_iclog(log, iclog); in xlog_unmount_write()
881 trace_xfs_log_umount_write(log, tic); in xlog_unmount_write()
882 xfs_log_ticket_ungrant(log, tic); in xlog_unmount_write()
888 struct xlog *log) in xfs_log_unmount_verify_iclog() argument
890 struct xlog_in_core *iclog = log->l_iclog; in xfs_log_unmount_verify_iclog()
895 } while ((iclog = iclog->ic_next) != log->l_iclog); in xfs_log_unmount_verify_iclog()
909 struct xlog *log = mp->m_log; in xfs_log_unmount_write() local
916 if (XLOG_FORCED_SHUTDOWN(log)) in xfs_log_unmount_write()
921 * record to force log recovery at next mount, after which the summary in xfs_log_unmount_write()
932 xfs_log_unmount_verify_iclog(log); in xfs_log_unmount_write()
933 xlog_unmount_write(log); in xfs_log_unmount_write()
937 * Empty the log for unmount/freeze.
939 * To do this, we first need to shut down the background log work so it is not
940 * trying to cover the log as we clean up. We then need to unpin all objects in
941 * the log so we can then flush them out. Once they have completed their IO and
968 * Shut down and release the AIL and Log.
971 * from the AIL so that the log is empty before we write the unmount record to
972 * the log. Once this is done, we can tear down the AIL and the log.
1007 * Wake up processes waiting for log space after we have moved the log tail.
1013 struct xlog *log = mp->m_log; in xfs_log_space_wake() local
1016 if (XLOG_FORCED_SHUTDOWN(log)) in xfs_log_space_wake()
1019 if (!list_empty_careful(&log->l_write_head.waiters)) { in xfs_log_space_wake()
1020 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); in xfs_log_space_wake()
1022 spin_lock(&log->l_write_head.lock); in xfs_log_space_wake()
1023 free_bytes = xlog_space_left(log, &log->l_write_head.grant); in xfs_log_space_wake()
1024 xlog_grant_head_wake(log, &log->l_write_head, &free_bytes); in xfs_log_space_wake()
1025 spin_unlock(&log->l_write_head.lock); in xfs_log_space_wake()
1028 if (!list_empty_careful(&log->l_reserve_head.waiters)) { in xfs_log_space_wake()
1029 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); in xfs_log_space_wake()
1031 spin_lock(&log->l_reserve_head.lock); in xfs_log_space_wake()
1032 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); in xfs_log_space_wake()
1033 xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes); in xfs_log_space_wake()
1034 spin_unlock(&log->l_reserve_head.lock); in xfs_log_space_wake()
1040 * covered. To begin the transition to the idle state firstly the log needs to
1042 * we start attempting to cover the log.
1045 * informed that dummy transactions are required to move the log into the idle
1049 * cover the log as we may be in a situation where there isn't log space
1051 * tail of the log is pinned by an item that is modified in the CIL. Hence
1053 * can't start trying to idle the log until both the CIL and AIL are empty.
1058 struct xlog *log = mp->m_log; in xfs_log_need_covered() local
1064 if (!xlog_cil_empty(log)) in xfs_log_need_covered()
1067 spin_lock(&log->l_icloglock); in xfs_log_need_covered()
1068 switch (log->l_covered_state) { in xfs_log_need_covered()
1075 if (xfs_ail_min_lsn(log->l_ailp)) in xfs_log_need_covered()
1077 if (!xlog_iclogs_empty(log)) in xfs_log_need_covered()
1081 if (log->l_covered_state == XLOG_STATE_COVER_NEED) in xfs_log_need_covered()
1082 log->l_covered_state = XLOG_STATE_COVER_DONE; in xfs_log_need_covered()
1084 log->l_covered_state = XLOG_STATE_COVER_DONE2; in xfs_log_need_covered()
1090 spin_unlock(&log->l_icloglock); in xfs_log_need_covered()
1095 * We may be holding the log iclog lock upon entering this routine.
1101 struct xlog *log = mp->m_log; in xlog_assign_tail_lsn_locked() local
1108 * To make sure we always have a valid LSN for the log tail we keep in xlog_assign_tail_lsn_locked()
1109 * track of the last LSN which was committed in log->l_last_sync_lsn, in xlog_assign_tail_lsn_locked()
1116 tail_lsn = atomic64_read(&log->l_last_sync_lsn); in xlog_assign_tail_lsn_locked()
1117 trace_xfs_log_assign_tail_lsn(log, tail_lsn); in xlog_assign_tail_lsn_locked()
1118 atomic64_set(&log->l_tail_lsn, tail_lsn); in xlog_assign_tail_lsn_locked()
1136 * Return the space in the log between the tail and the head. The head
1140 * in the log. This works for all places where this function is called
1147 * result is that we return the size of the log as the amount of space left.
1151 struct xlog *log, in xlog_space_left() argument
1161 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes); in xlog_space_left()
1164 free_bytes = log->l_logsize - (head_bytes - tail_bytes); in xlog_space_left()
1174 * log as the amount of space left. in xlog_space_left()
1176 xfs_alert(log->l_mp, "xlog_space_left: head behind tail"); in xlog_space_left()
1177 xfs_alert(log->l_mp, in xlog_space_left()
1180 xfs_alert(log->l_mp, in xlog_space_left()
1184 free_bytes = log->l_logsize; in xlog_space_left()
1196 struct xlog *log = iclog->ic_log; in xlog_ioend_work() local
1209 if (XFS_TEST_ERROR(error, log->l_mp, XFS_ERRTAG_IODONE_IOERR)) { in xlog_ioend_work()
1210 xfs_alert(log->l_mp, "log I/O error %d", error); in xlog_ioend_work()
1211 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); in xlog_ioend_work()
1227 * Return size of each in-core log record buffer.
1237 struct xlog *log) in xlog_get_iclog_buffer_size() argument
1244 log->l_iclog_bufs = mp->m_logbufs; in xlog_get_iclog_buffer_size()
1245 log->l_iclog_size = mp->m_logbsize; in xlog_get_iclog_buffer_size()
1250 log->l_iclog_heads = in xlog_get_iclog_buffer_size()
1252 log->l_iclog_hsize = log->l_iclog_heads << BBSHIFT; in xlog_get_iclog_buffer_size()
1265 * disk. If there is nothing dirty, then we might need to cover the log to
1272 struct xlog *log = container_of(to_delayed_work(work), in xfs_log_worker() local
1274 struct xfs_mount *mp = log->l_mp; in xfs_log_worker()
1279 * Dump a transaction into the log that contains no real change. in xfs_log_worker()
1280 * This is needed to stamp the current tail LSN into the log in xfs_log_worker()
1285 * will prevent log covering from making progress. Hence we in xfs_log_worker()
1286 * synchronously log the superblock instead to ensure the in xfs_log_worker()
1301 * This routine initializes some of the log structure for a given mount point.
1312 struct xlog *log; in xlog_alloc_log() local
1320 log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL); in xlog_alloc_log()
1321 if (!log) { in xlog_alloc_log()
1322 xfs_warn(mp, "Log allocation failed: No memory!"); in xlog_alloc_log()
1326 log->l_mp = mp; in xlog_alloc_log()
1327 log->l_targ = log_target; in xlog_alloc_log()
1328 log->l_logsize = BBTOB(num_bblks); in xlog_alloc_log()
1329 log->l_logBBstart = blk_offset; in xlog_alloc_log()
1330 log->l_logBBsize = num_bblks; in xlog_alloc_log()
1331 log->l_covered_state = XLOG_STATE_COVER_IDLE; in xlog_alloc_log()
1332 log->l_flags |= XLOG_ACTIVE_RECOVERY; in xlog_alloc_log()
1333 INIT_DELAYED_WORK(&log->l_work, xfs_log_worker); in xlog_alloc_log()
1335 log->l_prev_block = -1; in xlog_alloc_log()
1336 /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */ in xlog_alloc_log()
1337 xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0); in xlog_alloc_log()
1338 xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0); in xlog_alloc_log()
1339 log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ in xlog_alloc_log()
1341 xlog_grant_head_init(&log->l_reserve_head); in xlog_alloc_log()
1342 xlog_grant_head_init(&log->l_write_head); in xlog_alloc_log()
1348 xfs_warn(mp, "Log sector size too small (0x%x < 0x%x)", in xlog_alloc_log()
1355 xfs_warn(mp, "Log sector size too large (0x%x > 0x%x)", in xlog_alloc_log()
1360 /* for larger sector sizes, must have v2 or external log */ in xlog_alloc_log()
1361 if (log2_size && log->l_logBBstart > 0 && in xlog_alloc_log()
1364 "log sector size (0x%x) invalid for configuration.", in xlog_alloc_log()
1369 log->l_sectBBsize = 1 << log2_size; in xlog_alloc_log()
1371 xlog_get_iclog_buffer_size(mp, log); in xlog_alloc_log()
1373 spin_lock_init(&log->l_icloglock); in xlog_alloc_log()
1374 init_waitqueue_head(&log->l_flush_wait); in xlog_alloc_log()
1376 iclogp = &log->l_iclog; in xlog_alloc_log()
1384 ASSERT(log->l_iclog_size >= 4096); in xlog_alloc_log()
1385 for (i = 0; i < log->l_iclog_bufs; i++) { in xlog_alloc_log()
1387 size_t bvec_size = howmany(log->l_iclog_size, PAGE_SIZE) * in xlog_alloc_log()
1398 iclog->ic_data = kmem_alloc_io(log->l_iclog_size, align_mask, in xlog_alloc_log()
1403 log->l_iclog_bak[i] = &iclog->ic_header; in xlog_alloc_log()
1409 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1); in xlog_alloc_log()
1410 head->h_size = cpu_to_be32(log->l_iclog_size); in xlog_alloc_log()
1415 iclog->ic_size = log->l_iclog_size - log->l_iclog_hsize; in xlog_alloc_log()
1417 iclog->ic_log = log; in xlog_alloc_log()
1421 iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize; in xlog_alloc_log()
1430 *iclogp = log->l_iclog; /* complete ring */ in xlog_alloc_log()
1431 log->l_iclog->ic_prev = prev_iclog; /* re-write 1st prev ptr */ in xlog_alloc_log()
1433 log->l_ioend_workqueue = alloc_workqueue("xfs-log/%s", in xlog_alloc_log()
1436 if (!log->l_ioend_workqueue) in xlog_alloc_log()
1439 error = xlog_cil_init(log); in xlog_alloc_log()
1442 return log; in xlog_alloc_log()
1445 destroy_workqueue(log->l_ioend_workqueue); in xlog_alloc_log()
1447 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) { in xlog_alloc_log()
1451 if (prev_iclog == log->l_iclog) in xlog_alloc_log()
1455 kmem_free(log); in xlog_alloc_log()
1462 * ticket to close off a running log write. Return the lsn of the commit record.
1466 struct xlog *log, in xlog_commit_record() argument
1482 if (XLOG_FORCED_SHUTDOWN(log)) in xlog_commit_record()
1485 error = xlog_write(log, &vec, ticket, lsn, iclog, XLOG_COMMIT_TRANS, in xlog_commit_record()
1488 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); in xlog_commit_record()
1493 * Compute the LSN that we'd need to push the log tail towards in order to have
1494 * (a) enough on-disk log space to log the number of bytes specified, (b) at
1495 * least 25% of the log space free, and (c) at least 256 blocks free. If the
1496 * log free space already meets all three thresholds, this function returns
1501 struct xlog *log, in xlog_grant_push_threshold() argument
1512 ASSERT(BTOBB(need_bytes) < log->l_logBBsize); in xlog_grant_push_threshold()
1514 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); in xlog_grant_push_threshold()
1519 * log to the maximum of what the caller needs, one quarter of the in xlog_grant_push_threshold()
1520 * log, and 256 blocks. in xlog_grant_push_threshold()
1523 free_threshold = max(free_threshold, (log->l_logBBsize >> 2)); in xlog_grant_push_threshold()
1528 xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle, in xlog_grant_push_threshold()
1531 if (threshold_block >= log->l_logBBsize) { in xlog_grant_push_threshold()
1532 threshold_block -= log->l_logBBsize; in xlog_grant_push_threshold()
1539 * log record known to be on disk. Use a snapshot of the last sync lsn in xlog_grant_push_threshold()
1542 last_sync_lsn = atomic64_read(&log->l_last_sync_lsn); in xlog_grant_push_threshold()
1550 * Push the tail of the log if we need to do so to maintain the free log space
1552 * policy which pushes on an lsn which is further along in the log once we
1558 struct xlog *log, in xlog_grant_push_ail() argument
1563 threshold_lsn = xlog_grant_push_threshold(log, need_bytes); in xlog_grant_push_ail()
1564 if (threshold_lsn == NULLCOMMITLSN || XLOG_FORCED_SHUTDOWN(log)) in xlog_grant_push_ail()
1572 xfs_ail_push(log->l_ailp, threshold_lsn); in xlog_grant_push_ail()
1580 struct xlog *log, in xlog_pack_data() argument
1600 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { in xlog_pack_data()
1611 for (i = 1; i < log->l_iclog_heads; i++) in xlog_pack_data()
1617 * Calculate the checksum for a log buffer.
1624 struct xlog *log, in xlog_cksum() argument
1637 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { in xlog_cksum()
1689 struct xlog *log, in xlog_write_iclog() argument
1695 ASSERT(bno < log->l_logBBsize); in xlog_write_iclog()
1703 * across the log IO to archieve that. in xlog_write_iclog()
1709 * the log state machine to propagate I/O errors instead of in xlog_write_iclog()
1720 bio_set_dev(&iclog->ic_bio, log->l_targ->bt_bdev); in xlog_write_iclog()
1721 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno; in xlog_write_iclog()
1728 * writeback throttle from throttling log writes behind background in xlog_write_iclog()
1737 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); in xlog_write_iclog()
1744 * If this log buffer would straddle the end of the log we will have in xlog_write_iclog()
1747 if (bno + BTOBB(count) > log->l_logBBsize) { in xlog_write_iclog()
1750 split = bio_split(&iclog->ic_bio, log->l_logBBsize - bno, in xlog_write_iclog()
1756 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart; in xlog_write_iclog()
1764 * written to the start of the log. Watch out for the header magic
1769 struct xlog *log, in xlog_split_iclog() argument
1774 unsigned int split_offset = BBTOB(log->l_logBBsize - bno); in xlog_split_iclog()
1788 struct xlog *log, in xlog_calc_iclog_size() argument
1795 use_lsunit = xfs_sb_version_haslogv2(&log->l_mp->m_sb) && in xlog_calc_iclog_size()
1796 log->l_mp->m_sb.sb_logsunit > 1; in xlog_calc_iclog_size()
1799 count_init = log->l_iclog_hsize + iclog->ic_offset; in xlog_calc_iclog_size()
1801 /* Round out the log write size */ in xlog_calc_iclog_size()
1804 count = XLOG_LSUNITTOB(log, XLOG_BTOLSUNIT(log, count_init)); in xlog_calc_iclog_size()
1813 ASSERT(*roundoff < log->l_mp->m_sb.sb_logsunit); in xlog_calc_iclog_size()
1820 * Flush out the in-core log (iclog) to the on-disk log in an asynchronous
1822 * ptr in the log to point to the next available iclog. This allows further
1824 * Before an in-core log can be written out, the data section must be scanned
1836 * log will require grabbing the lock though.
1838 * The entire log manager uses a logical block numbering scheme. Only
1839 * xlog_write_iclog knows about the fact that the log may not start with
1844 struct xlog *log, in xlog_sync() argument
1855 count = xlog_calc_iclog_size(log, iclog, &roundoff); in xlog_sync()
1858 xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff); in xlog_sync()
1859 xlog_grant_add_space(log, &log->l_write_head.grant, roundoff); in xlog_sync()
1862 xlog_pack_data(log, iclog, roundoff); in xlog_sync()
1866 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) in xlog_sync()
1870 XFS_STATS_INC(log->l_mp, xs_log_writes); in xlog_sync()
1871 XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count)); in xlog_sync()
1876 if (bno + BTOBB(count) > log->l_logBBsize) { in xlog_sync()
1877 xlog_split_iclog(log, &iclog->ic_header, bno, count); in xlog_sync()
1882 iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header, in xlog_sync()
1885 * Intentionally corrupt the log record CRC based on the error injection in xlog_sync()
1886 * frequency, if defined. This facilitates testing log recovery in the in xlog_sync()
1887 * event of torn writes. Hence, set the IOABORT state to abort the log in xlog_sync()
1892 if (XFS_TEST_ERROR(false, log->l_mp, XFS_ERRTAG_LOG_BAD_CRC)) { in xlog_sync()
1895 xfs_warn(log->l_mp, in xlog_sync()
1896 "Intentionally corrupted log record at LSN 0x%llx. Shutdown imminent.", in xlog_sync()
1902 * Flush the data device before flushing the log to make sure all meta in xlog_sync()
1904 * stamping the new log tail LSN into the log buffer. For an external in xlog_sync()
1905 * log we need to issue the flush explicitly, and unfortunately in xlog_sync()
1906 * synchronously here; for an internal log we can simply use the block in xlog_sync()
1909 if (log->l_targ != log->l_mp->m_ddev_targp || split) { in xlog_sync()
1910 xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp); in xlog_sync()
1914 xlog_verify_iclog(log, iclog, count); in xlog_sync()
1915 xlog_write_iclog(log, iclog, bno, count, need_flush); in xlog_sync()
1919 * Deallocate a log structure
1923 struct xlog *log) in xlog_dealloc_log() argument
1928 xlog_cil_destroy(log); in xlog_dealloc_log()
1931 * Cycle all the iclogbuf locks to make sure all log IO completion in xlog_dealloc_log()
1934 iclog = log->l_iclog; in xlog_dealloc_log()
1935 for (i = 0; i < log->l_iclog_bufs; i++) { in xlog_dealloc_log()
1941 iclog = log->l_iclog; in xlog_dealloc_log()
1942 for (i = 0; i < log->l_iclog_bufs; i++) { in xlog_dealloc_log()
1949 log->l_mp->m_log = NULL; in xlog_dealloc_log()
1950 destroy_workqueue(log->l_ioend_workqueue); in xlog_dealloc_log()
1951 kmem_free(log); in xlog_dealloc_log()
1959 struct xlog *log, in xlog_state_finish_copy() argument
1964 lockdep_assert_held(&log->l_icloglock); in xlog_state_finish_copy()
2050 xfs_warn(mp, " log res = %d", tp->t_log_res); in xlog_print_trans()
2051 xfs_warn(mp, " log count = %d", tp->t_log_count); in xlog_print_trans()
2056 /* dump each log item */ in xlog_print_trans()
2062 xfs_warn(mp, "log item: "); in xlog_print_trans()
2072 /* dump each iovec for the log item */ in xlog_print_trans()
2089 * Calculate the potential space needed by the log vector. We may need a start
2105 /* we don't write ordered log vectors */ in xlog_write_calc_vec_length()
2139 struct xlog *log, in xlog_write_setup_ophdr() argument
2162 xfs_warn(log->l_mp, in xlog_write_setup_ophdr()
2172 * Set up the parameters of the region copy into the log. This has
2173 * to handle region write split across multiple log buffers - this
2204 /* partial write of region, needs extra log op header reservation */ in xlog_write_setup_copy()
2213 /* account for new log op header */ in xlog_write_setup_copy()
2222 struct xlog *log, in xlog_write_copy_finish() argument
2239 spin_lock(&log->l_icloglock); in xlog_write_copy_finish()
2240 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); in xlog_write_copy_finish()
2251 spin_lock(&log->l_icloglock); in xlog_write_copy_finish()
2252 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); in xlog_write_copy_finish()
2257 xlog_state_switch_iclogs(log, iclog, 0); in xlog_write_copy_finish()
2263 spin_unlock(&log->l_icloglock); in xlog_write_copy_finish()
2271 error = xlog_state_release_iclog(log, iclog); in xlog_write_copy_finish()
2272 spin_unlock(&log->l_icloglock); in xlog_write_copy_finish()
2277 * Write some region out to in-core log
2291 * 2. Write log operation header (header per region)
2298 * 5. Release iclog for potential flush to on-disk log.
2308 * on all log operation writes which don't contain the end of the
2309 * region. The XLOG_END_TRANS bit is used for the in-core log
2318 struct xlog *log, in xlog_write() argument
2346 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, in xlog_write()
2348 xlog_print_tic_res(log->l_mp, ticket); in xlog_write()
2349 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); in xlog_write()
2358 error = xlog_state_get_iclog_space(log, len, &iclog, ticket, in xlog_write()
2381 /* ordered log vectors have no regions to write */ in xlog_write()
2393 * Before we start formatting log vectors, we need to in xlog_write()
2403 ophdr = xlog_write_setup_ophdr(log, ptr, ticket, flags); in xlog_write()
2416 xlog_verify_dest_ptr(log, ptr); in xlog_write()
2421 * Unmount records just log an opheader, so can have in xlog_write()
2441 error = xlog_write_copy_finish(log, iclog, flags, in xlog_write()
2458 * count), then we also need to get more log space. If in xlog_write()
2482 spin_lock(&log->l_icloglock); in xlog_write()
2483 xlog_state_finish_copy(log, iclog, record_cnt, data_cnt); in xlog_write()
2488 error = xlog_state_release_iclog(log, iclog); in xlog_write()
2490 spin_unlock(&log->l_icloglock); in xlog_write()
2533 struct xlog *log, in xlog_state_activate_iclogs() argument
2536 struct xlog_in_core *iclog = log->l_iclog; in xlog_state_activate_iclogs()
2547 } while ((iclog = iclog->ic_next) != log->l_iclog); in xlog_state_activate_iclogs()
2582 struct xlog *log, in xlog_state_clean_iclog() argument
2589 xlog_state_activate_iclogs(log, &iclogs_changed); in xlog_state_clean_iclog()
2593 log->l_covered_state = xlog_covered_state(log->l_covered_state, in xlog_state_clean_iclog()
2600 struct xlog *log) in xlog_get_lowest_lsn() argument
2602 struct xlog_in_core *iclog = log->l_iclog; in xlog_get_lowest_lsn()
2613 } while ((iclog = iclog->ic_next) != log->l_iclog); in xlog_get_lowest_lsn()
2621 * tail of the log half way through a transaction as this may be the only
2622 * transaction in the log and moving the tail to point to the middle of it
2633 * amount of log space bound up in this committing transaction then the
2635 * freeing space in the log. Hence once we've updated the last_sync_lsn we
2637 * no longer bound by the old log head location and can move forwards and make
2642 struct xlog *log, in xlog_state_set_callback() argument
2648 ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn), in xlog_state_set_callback()
2654 atomic64_set(&log->l_last_sync_lsn, header_lsn); in xlog_state_set_callback()
2655 xlog_grant_push_ail(log, 0); in xlog_state_set_callback()
2665 struct xlog *log, in xlog_state_iodone_process_iclog() argument
2681 * Between marking a filesystem SHUTDOWN and stopping the log, in xlog_state_iodone_process_iclog()
2682 * we do flush all iclogs to disk (if there wasn't a log I/O in xlog_state_iodone_process_iclog()
2696 lowest_lsn = xlog_get_lowest_lsn(log); in xlog_state_iodone_process_iclog()
2699 xlog_state_set_callback(log, iclog, header_lsn); in xlog_state_iodone_process_iclog()
2722 struct xlog *log, in xlog_state_do_iclog_callbacks() argument
2724 __releases(&log->l_icloglock) in xlog_state_do_iclog_callbacks()
2725 __acquires(&log->l_icloglock) in xlog_state_do_iclog_callbacks()
2727 spin_unlock(&log->l_icloglock); in xlog_state_do_iclog_callbacks()
2744 spin_lock(&log->l_icloglock); in xlog_state_do_iclog_callbacks()
2750 struct xlog *log) in xlog_state_do_callback() argument
2759 spin_lock(&log->l_icloglock); in xlog_state_do_callback()
2763 * log. Reset this starting point each time the log is in xlog_state_do_callback()
2769 first_iclog = log->l_iclog; in xlog_state_do_callback()
2770 iclog = log->l_iclog; in xlog_state_do_callback()
2776 if (xlog_state_iodone_process_iclog(log, iclog, in xlog_state_do_callback()
2791 xlog_state_do_iclog_callbacks(log, iclog); in xlog_state_do_callback()
2792 if (XLOG_FORCED_SHUTDOWN(log)) in xlog_state_do_callback()
2795 xlog_state_clean_iclog(log, iclog); in xlog_state_do_callback()
2802 xfs_warn(log->l_mp, in xlog_state_do_callback()
2808 if (log->l_iclog->ic_state == XLOG_STATE_ACTIVE || in xlog_state_do_callback()
2809 log->l_iclog->ic_state == XLOG_STATE_IOERROR) in xlog_state_do_callback()
2810 wake_up_all(&log->l_flush_wait); in xlog_state_do_callback()
2812 spin_unlock(&log->l_icloglock); in xlog_state_do_callback()
2821 * when we reach the end of the physical log, get turned into 2 separate
2827 * global state machine log lock.
2833 struct xlog *log = iclog->ic_log; in xlog_state_done_syncing() local
2835 spin_lock(&log->l_icloglock); in xlog_state_done_syncing()
2840 * split log writes, on the second, we shut down the file system and in xlog_state_done_syncing()
2843 if (!XLOG_FORCED_SHUTDOWN(log)) { in xlog_state_done_syncing()
2854 spin_unlock(&log->l_icloglock); in xlog_state_done_syncing()
2855 xlog_state_do_callback(log); in xlog_state_done_syncing()
2859 * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must
2869 * log's data space.
2870 * * in-core log pointer to which xlog_write() should write.
2871 * * boolean indicating this is a continued write to an in-core log.
2872 * If this is the last write, then the in-core log's offset field
2878 struct xlog *log, in xlog_state_get_iclog_space() argument
2890 spin_lock(&log->l_icloglock); in xlog_state_get_iclog_space()
2891 if (XLOG_FORCED_SHUTDOWN(log)) { in xlog_state_get_iclog_space()
2892 spin_unlock(&log->l_icloglock); in xlog_state_get_iclog_space()
2896 iclog = log->l_iclog; in xlog_state_get_iclog_space()
2898 XFS_STATS_INC(log->l_mp, xs_log_noiclogs); in xlog_state_get_iclog_space()
2900 /* Wait for log writes to have flushed */ in xlog_state_get_iclog_space()
2901 xlog_wait(&log->l_flush_wait, &log->l_icloglock); in xlog_state_get_iclog_space()
2916 ticket->t_curr_res -= log->l_iclog_hsize; in xlog_state_get_iclog_space()
2918 log->l_iclog_hsize, in xlog_state_get_iclog_space()
2920 head->h_cycle = cpu_to_be32(log->l_curr_cycle); in xlog_state_get_iclog_space()
2922 xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block)); in xlog_state_get_iclog_space()
2923 ASSERT(log->l_curr_block >= 0); in xlog_state_get_iclog_space()
2938 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); in xlog_state_get_iclog_space()
2948 error = xlog_state_release_iclog(log, iclog); in xlog_state_get_iclog_space()
2949 spin_unlock(&log->l_icloglock); in xlog_state_get_iclog_space()
2966 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); in xlog_state_get_iclog_space()
2971 spin_unlock(&log->l_icloglock); in xlog_state_get_iclog_space()
2986 struct xlog *log, in xfs_log_ticket_regrant() argument
2989 trace_xfs_log_ticket_regrant(log, ticket); in xfs_log_ticket_regrant()
2994 xlog_grant_sub_space(log, &log->l_reserve_head.grant, in xfs_log_ticket_regrant()
2996 xlog_grant_sub_space(log, &log->l_write_head.grant, in xfs_log_ticket_regrant()
3001 trace_xfs_log_ticket_regrant_sub(log, ticket); in xfs_log_ticket_regrant()
3005 xlog_grant_add_space(log, &log->l_reserve_head.grant, in xfs_log_ticket_regrant()
3007 trace_xfs_log_ticket_regrant_exit(log, ticket); in xfs_log_ticket_regrant()
3032 struct xlog *log, in xfs_log_ticket_ungrant() argument
3037 trace_xfs_log_ticket_ungrant(log, ticket); in xfs_log_ticket_ungrant()
3042 trace_xfs_log_ticket_ungrant_sub(log, ticket); in xfs_log_ticket_ungrant()
3054 xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes); in xfs_log_ticket_ungrant()
3055 xlog_grant_sub_space(log, &log->l_write_head.grant, bytes); in xfs_log_ticket_ungrant()
3057 trace_xfs_log_ticket_ungrant_exit(log, ticket); in xfs_log_ticket_ungrant()
3059 xfs_log_space_wake(log->l_mp); in xfs_log_ticket_ungrant()
3069 struct xlog *log, in xlog_state_switch_iclogs() argument
3074 assert_spin_locked(&log->l_icloglock); in xlog_state_switch_iclogs()
3079 iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block); in xlog_state_switch_iclogs()
3080 log->l_prev_block = log->l_curr_block; in xlog_state_switch_iclogs()
3081 log->l_prev_cycle = log->l_curr_cycle; in xlog_state_switch_iclogs()
3083 /* roll log?: ic_offset changed later */ in xlog_state_switch_iclogs()
3084 log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize); in xlog_state_switch_iclogs()
3086 /* Round up to next log-sunit */ in xlog_state_switch_iclogs()
3087 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) && in xlog_state_switch_iclogs()
3088 log->l_mp->m_sb.sb_logsunit > 1) { in xlog_state_switch_iclogs()
3089 uint32_t sunit_bb = BTOBB(log->l_mp->m_sb.sb_logsunit); in xlog_state_switch_iclogs()
3090 log->l_curr_block = roundup(log->l_curr_block, sunit_bb); in xlog_state_switch_iclogs()
3093 if (log->l_curr_block >= log->l_logBBsize) { in xlog_state_switch_iclogs()
3097 * when the log wraps to the next cycle. This is to support the in xlog_state_switch_iclogs()
3101 log->l_curr_block -= log->l_logBBsize; in xlog_state_switch_iclogs()
3102 ASSERT(log->l_curr_block >= 0); in xlog_state_switch_iclogs()
3104 log->l_curr_cycle++; in xlog_state_switch_iclogs()
3105 if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM) in xlog_state_switch_iclogs()
3106 log->l_curr_cycle++; in xlog_state_switch_iclogs()
3108 ASSERT(iclog == log->l_iclog); in xlog_state_switch_iclogs()
3109 log->l_iclog = iclog->ic_next; in xlog_state_switch_iclogs()
3113 * Write out all data in the in-core log as of this exact moment in time.
3115 * Data may be written to the in-core log during this call. However,
3144 struct xlog *log = mp->m_log; in xfs_log_force() local
3151 xlog_cil_force(log); in xfs_log_force()
3153 spin_lock(&log->l_icloglock); in xfs_log_force()
3154 iclog = log->l_iclog; in xfs_log_force()
3181 xlog_state_switch_iclogs(log, iclog, 0); in xfs_log_force()
3182 if (xlog_state_release_iclog(log, iclog)) in xfs_log_force()
3195 xlog_state_switch_iclogs(log, iclog, 0); in xfs_log_force()
3208 spin_unlock(&log->l_icloglock); in xfs_log_force()
3211 spin_unlock(&log->l_icloglock); in xfs_log_force()
3217 struct xlog *log, in xlog_force_lsn() argument
3225 spin_lock(&log->l_icloglock); in xlog_force_lsn()
3226 iclog = log->l_iclog; in xlog_force_lsn()
3232 if (iclog == log->l_iclog) in xlog_force_lsn()
3246 * refcnt so we can release the log (which drops the ref count). in xlog_force_lsn()
3256 &log->l_icloglock); in xlog_force_lsn()
3260 xlog_state_switch_iclogs(log, iclog, 0); in xlog_force_lsn()
3261 if (xlog_state_release_iclog(log, iclog)) in xlog_force_lsn()
3270 spin_unlock(&log->l_icloglock); in xlog_force_lsn()
3273 spin_unlock(&log->l_icloglock); in xlog_force_lsn()
3278 * Force the in-core log to disk for a specific LSN.
3280 * Find in-core log with lsn.
3282 * If it is in the ACTIVE state, move the in-core log into the WANT_SYNC
3288 * specific in-core log. When given in-core log finally completes its write
3298 struct xlog *log = mp->m_log; in xfs_log_force_seq() local
3306 lsn = xlog_cil_force_seq(log, seq); in xfs_log_force_seq()
3310 ret = xlog_force_lsn(log, lsn, flags, log_flushed, false); in xfs_log_force_seq()
3313 ret = xlog_force_lsn(log, lsn, flags, log_flushed, true); in xfs_log_force_seq()
3340 * Figure out the total log space unit (in bytes) that would be
3341 * required for a log ticket.
3348 struct xlog *log = mp->m_log; in xfs_log_calc_unit_res() local
3353 * Permanent reservations have up to 'cnt'-1 active log operations in xfs_log_calc_unit_res()
3354 * in the log. A unit in this case is the amount of space for one in xfs_log_calc_unit_res()
3355 * of these log operations. Normal reservations have a cnt of 1 in xfs_log_calc_unit_res()
3359 * which occupy space in the on-disk log. in xfs_log_calc_unit_res()
3374 * Therefore the commit record is in its own Log Record. in xfs_log_calc_unit_res()
3396 * increase the space required enough to require more log and op in xfs_log_calc_unit_res()
3404 * Fundamentally, this means we must pass the entire log vector to in xfs_log_calc_unit_res()
3407 iclog_space = log->l_iclog_size - log->l_iclog_hsize; in xfs_log_calc_unit_res()
3419 unit_bytes += log->l_iclog_hsize * num_headers; in xfs_log_calc_unit_res()
3422 unit_bytes += log->l_iclog_hsize; in xfs_log_calc_unit_res()
3426 /* log su roundoff */ in xfs_log_calc_unit_res()
3437 * Allocate and initialise a new log ticket.
3441 struct xlog *log, in xlog_ticket_alloc() argument
3452 unit_res = xfs_log_calc_unit_res(log->l_mp, unit_bytes); in xlog_ticket_alloc()
3475 * part of the log in case we trash the log structure.
3479 struct xlog *log, in xlog_verify_dest_ptr() argument
3485 for (i = 0; i < log->l_iclog_bufs; i++) { in xlog_verify_dest_ptr()
3486 if (ptr >= log->l_iclog_bak[i] && in xlog_verify_dest_ptr()
3487 ptr <= log->l_iclog_bak[i] + log->l_iclog_size) in xlog_verify_dest_ptr()
3492 xfs_emerg(log->l_mp, "%s: invalid ptr", __func__); in xlog_verify_dest_ptr()
3508 struct xlog *log) in xlog_verify_grant_tail() argument
3513 xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space); in xlog_verify_grant_tail()
3514 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks); in xlog_verify_grant_tail()
3517 !(log->l_flags & XLOG_TAIL_WARN)) { in xlog_verify_grant_tail()
3518 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, in xlog_verify_grant_tail()
3520 log->l_flags |= XLOG_TAIL_WARN; in xlog_verify_grant_tail()
3524 !(log->l_flags & XLOG_TAIL_WARN)) { in xlog_verify_grant_tail()
3525 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, in xlog_verify_grant_tail()
3527 log->l_flags |= XLOG_TAIL_WARN; in xlog_verify_grant_tail()
3535 struct xlog *log, in xlog_verify_tail_lsn() argument
3541 if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) { in xlog_verify_tail_lsn()
3543 log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn)); in xlog_verify_tail_lsn()
3544 if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize)) in xlog_verify_tail_lsn()
3545 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); in xlog_verify_tail_lsn()
3547 ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle); in xlog_verify_tail_lsn()
3549 if (BLOCK_LSN(tail_lsn) == log->l_prev_block) in xlog_verify_tail_lsn()
3550 xfs_emerg(log->l_mp, "%s: tail wrapped", __func__); in xlog_verify_tail_lsn()
3552 blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block; in xlog_verify_tail_lsn()
3554 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); in xlog_verify_tail_lsn()
3564 * 4. Check fields of each log operation header for:
3567 * C. Length in log record header is correct according to the
3570 * log, check the preceding blocks of the physical log to make sure all
3575 struct xlog *log, in xlog_verify_iclog() argument
3589 spin_lock(&log->l_icloglock); in xlog_verify_iclog()
3590 icptr = log->l_iclog; in xlog_verify_iclog()
3591 for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next) in xlog_verify_iclog()
3594 if (icptr != log->l_iclog) in xlog_verify_iclog()
3595 xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__); in xlog_verify_iclog()
3596 spin_unlock(&log->l_icloglock); in xlog_verify_iclog()
3598 /* check log magic numbers */ in xlog_verify_iclog()
3600 xfs_emerg(log->l_mp, "%s: invalid magic num", __func__); in xlog_verify_iclog()
3606 xfs_emerg(log->l_mp, "%s: unexpected magic num", in xlog_verify_iclog()
3636 xfs_warn(log->l_mp, in xlog_verify_iclog()
3667 struct xlog *log) in xlog_state_ioerror() argument
3671 iclog = log->l_iclog; in xlog_state_ioerror()
3675 * From now on, no log flushes will result. in xlog_state_ioerror()
3698 * c. those who're sleeping on log reservations, pinned objects and
3703 * to disk first. This needs to be done before the log is marked as shutdown,
3711 struct xlog *log; in xfs_log_force_umount() local
3714 log = mp->m_log; in xfs_log_force_umount()
3717 * If this happens during log recovery, don't worry about in xfs_log_force_umount()
3718 * locking; the log isn't open for business yet. in xfs_log_force_umount()
3720 if (!log || in xfs_log_force_umount()
3721 log->l_flags & XLOG_ACTIVE_RECOVERY) { in xfs_log_force_umount()
3732 if (logerror && log->l_iclog->ic_state == XLOG_STATE_IOERROR) { in xfs_log_force_umount()
3733 ASSERT(XLOG_FORCED_SHUTDOWN(log)); in xfs_log_force_umount()
3738 * Flush all the completed transactions to disk before marking the log in xfs_log_force_umount()
3751 spin_lock(&log->l_icloglock); in xfs_log_force_umount()
3757 * Mark the log and the iclogs with IO error flags to prevent any in xfs_log_force_umount()
3758 * further log IO from being issued or completed. in xfs_log_force_umount()
3760 log->l_flags |= XLOG_IO_ERROR; in xfs_log_force_umount()
3761 retval = xlog_state_ioerror(log); in xfs_log_force_umount()
3762 spin_unlock(&log->l_icloglock); in xfs_log_force_umount()
3765 * We don't want anybody waiting for log reservations after this. That in xfs_log_force_umount()
3771 xlog_grant_head_wake_all(&log->l_reserve_head); in xfs_log_force_umount()
3772 xlog_grant_head_wake_all(&log->l_write_head); in xfs_log_force_umount()
3776 * as if the log writes were completed. The abort handling in the log in xfs_log_force_umount()
3780 spin_lock(&log->l_cilp->xc_push_lock); in xfs_log_force_umount()
3781 wake_up_all(&log->l_cilp->xc_commit_wait); in xfs_log_force_umount()
3782 spin_unlock(&log->l_cilp->xc_push_lock); in xfs_log_force_umount()
3783 xlog_state_do_callback(log); in xfs_log_force_umount()
3785 /* return non-zero if log IOERROR transition had already happened */ in xfs_log_force_umount()
3791 struct xlog *log) in xlog_iclogs_empty() argument
3795 iclog = log->l_iclog; in xlog_iclogs_empty()
3803 } while (iclog != log->l_iclog); in xlog_iclogs_empty()
3816 struct xlog *log = mp->m_log; in xfs_log_check_lsn() local
3820 * norecovery mode skips mount-time log processing and unconditionally in xfs_log_check_lsn()
3838 spin_lock(&log->l_icloglock); in xfs_log_check_lsn()
3843 log->l_curr_cycle, log->l_curr_block); in xfs_log_check_lsn()
3844 spin_unlock(&log->l_icloglock); in xfs_log_check_lsn()
3854 struct xlog *log = mp->m_log; in xfs_log_in_recovery() local
3856 return log->l_flags & XLOG_ACTIVE_RECOVERY; in xfs_log_in_recovery()