• Home
  • Raw
  • Download

Lines Matching refs:log

35 	struct xlog		*log,
39 struct xlog *log);
46 struct xlog *log,
54 struct xlog *log,
59 struct xlog *log,
63 struct xlog *log,
68 struct xlog *log,
72 struct xlog *log);
75 struct xlog *log,
80 struct xlog *log,
92 struct xlog *log);
96 struct xlog *log, in xlog_grant_sub_space() argument
110 space += log->l_logsize; in xlog_grant_sub_space()
122 struct xlog *log, in xlog_grant_add_space() argument
135 tmp = log->l_logsize - space; in xlog_grant_add_space()
172 struct xlog *log, in xlog_ticket_reservation() argument
176 if (head == &log->l_write_head) { in xlog_ticket_reservation()
189 struct xlog *log, in xlog_grant_head_wake() argument
220 need_bytes = xlog_ticket_reservation(log, head, tic); in xlog_grant_head_wake()
223 xlog_grant_push_ail(log, need_bytes); in xlog_grant_head_wake()
228 trace_xfs_log_grant_wake_up(log, tic); in xlog_grant_head_wake()
238 struct xlog *log, in xlog_grant_head_wait() argument
247 if (XLOG_FORCED_SHUTDOWN(log)) in xlog_grant_head_wait()
249 xlog_grant_push_ail(log, need_bytes); in xlog_grant_head_wait()
254 XFS_STATS_INC(log->l_mp, xs_sleep_logspace); in xlog_grant_head_wait()
256 trace_xfs_log_grant_sleep(log, tic); in xlog_grant_head_wait()
258 trace_xfs_log_grant_wake(log, tic); in xlog_grant_head_wait()
261 if (XLOG_FORCED_SHUTDOWN(log)) in xlog_grant_head_wait()
263 } while (xlog_space_left(log, &head->grant) < need_bytes); in xlog_grant_head_wait()
291 struct xlog *log, in xlog_grant_head_check() argument
299 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); in xlog_grant_head_check()
307 *need_bytes = xlog_ticket_reservation(log, head, tic); in xlog_grant_head_check()
308 free_bytes = xlog_space_left(log, &head->grant); in xlog_grant_head_check()
311 if (!xlog_grant_head_wake(log, head, &free_bytes) || in xlog_grant_head_check()
313 error = xlog_grant_head_wait(log, head, tic, in xlog_grant_head_check()
319 error = xlog_grant_head_wait(log, head, tic, *need_bytes); in xlog_grant_head_check()
358 struct xlog *log = mp->m_log; in xfs_log_regrant() local
362 if (XLOG_FORCED_SHUTDOWN(log)) in xfs_log_regrant()
375 xlog_grant_push_ail(log, tic->t_unit_res); in xfs_log_regrant()
383 trace_xfs_log_regrant(log, tic); in xfs_log_regrant()
385 error = xlog_grant_head_check(log, &log->l_write_head, tic, in xfs_log_regrant()
390 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); in xfs_log_regrant()
391 trace_xfs_log_regrant_exit(log, tic); in xfs_log_regrant()
392 xlog_verify_grant_tail(log); in xfs_log_regrant()
423 struct xlog *log = mp->m_log; in xfs_log_reserve() local
430 if (XLOG_FORCED_SHUTDOWN(log)) in xfs_log_reserve()
436 tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent); in xfs_log_reserve()
439 xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt in xfs_log_reserve()
442 trace_xfs_log_reserve(log, tic); in xfs_log_reserve()
444 error = xlog_grant_head_check(log, &log->l_reserve_head, tic, in xfs_log_reserve()
449 xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes); in xfs_log_reserve()
450 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); in xfs_log_reserve()
451 trace_xfs_log_reserve_exit(log, tic); in xfs_log_reserve()
452 xlog_verify_grant_tail(log); in xfs_log_reserve()
468 struct xlog *log, in __xlog_state_release_iclog() argument
471 lockdep_assert_held(&log->l_icloglock); in __xlog_state_release_iclog()
475 xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp); in __xlog_state_release_iclog()
479 xlog_verify_tail_lsn(log, iclog, tail_lsn); in __xlog_state_release_iclog()
494 struct xlog *log, in xlog_state_release_iclog() argument
497 lockdep_assert_held(&log->l_icloglock); in xlog_state_release_iclog()
503 __xlog_state_release_iclog(log, iclog)) { in xlog_state_release_iclog()
504 spin_unlock(&log->l_icloglock); in xlog_state_release_iclog()
505 xlog_sync(log, iclog); in xlog_state_release_iclog()
506 spin_lock(&log->l_icloglock); in xlog_state_release_iclog()
516 struct xlog *log = iclog->ic_log; in xfs_log_release_iclog() local
519 if (atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock)) { in xfs_log_release_iclog()
521 sync = __xlog_state_release_iclog(log, iclog); in xfs_log_release_iclog()
522 spin_unlock(&log->l_icloglock); in xfs_log_release_iclog()
526 xlog_sync(log, iclog); in xfs_log_release_iclog()
773 struct xlog *log = iclog->ic_log; in xlog_wait_on_iclog() local
775 if (!XLOG_FORCED_SHUTDOWN(log) && in xlog_wait_on_iclog()
778 XFS_STATS_INC(log->l_mp, xs_log_force_sleep); in xlog_wait_on_iclog()
779 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); in xlog_wait_on_iclog()
781 spin_unlock(&log->l_icloglock); in xlog_wait_on_iclog()
784 if (XLOG_FORCED_SHUTDOWN(log)) in xlog_wait_on_iclog()
796 struct xlog *log, in xlog_write_unmount_record() argument
816 return xlog_write(log, &vec, ticket, lsn, NULL, flags, false); in xlog_write_unmount_record()
825 struct xlog *log) in xlog_unmount_write() argument
827 struct xfs_mount *mp = log->l_mp; in xlog_unmount_write()
838 error = xlog_write_unmount_record(log, tic, &lsn, flags); in xlog_unmount_write()
847 spin_lock(&log->l_icloglock); in xlog_unmount_write()
848 iclog = log->l_iclog; in xlog_unmount_write()
851 xlog_state_switch_iclogs(log, iclog, 0); in xlog_unmount_write()
855 error = xlog_state_release_iclog(log, iclog); in xlog_unmount_write()
859 trace_xfs_log_umount_write(log, tic); in xlog_unmount_write()
860 xfs_log_ticket_ungrant(log, tic); in xlog_unmount_write()
866 struct xlog *log) in xfs_log_unmount_verify_iclog() argument
868 struct xlog_in_core *iclog = log->l_iclog; in xfs_log_unmount_verify_iclog()
873 } while ((iclog = iclog->ic_next) != log->l_iclog); in xfs_log_unmount_verify_iclog()
887 struct xlog *log = mp->m_log; in xfs_log_unmount_write() local
894 xfs_readonly_buftarg(log->l_targ)) { in xfs_log_unmount_write()
901 if (XLOG_FORCED_SHUTDOWN(log)) in xfs_log_unmount_write()
917 xfs_log_unmount_verify_iclog(log); in xfs_log_unmount_write()
918 xlog_unmount_write(log); in xfs_log_unmount_write()
998 struct xlog *log = mp->m_log; in xfs_log_space_wake() local
1001 if (XLOG_FORCED_SHUTDOWN(log)) in xfs_log_space_wake()
1004 if (!list_empty_careful(&log->l_write_head.waiters)) { in xfs_log_space_wake()
1005 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); in xfs_log_space_wake()
1007 spin_lock(&log->l_write_head.lock); in xfs_log_space_wake()
1008 free_bytes = xlog_space_left(log, &log->l_write_head.grant); in xfs_log_space_wake()
1009 xlog_grant_head_wake(log, &log->l_write_head, &free_bytes); in xfs_log_space_wake()
1010 spin_unlock(&log->l_write_head.lock); in xfs_log_space_wake()
1013 if (!list_empty_careful(&log->l_reserve_head.waiters)) { in xfs_log_space_wake()
1014 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); in xfs_log_space_wake()
1016 spin_lock(&log->l_reserve_head.lock); in xfs_log_space_wake()
1017 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); in xfs_log_space_wake()
1018 xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes); in xfs_log_space_wake()
1019 spin_unlock(&log->l_reserve_head.lock); in xfs_log_space_wake()
1043 struct xlog *log = mp->m_log; in xfs_log_need_covered() local
1049 if (!xlog_cil_empty(log)) in xfs_log_need_covered()
1052 spin_lock(&log->l_icloglock); in xfs_log_need_covered()
1053 switch (log->l_covered_state) { in xfs_log_need_covered()
1060 if (xfs_ail_min_lsn(log->l_ailp)) in xfs_log_need_covered()
1062 if (!xlog_iclogs_empty(log)) in xfs_log_need_covered()
1066 if (log->l_covered_state == XLOG_STATE_COVER_NEED) in xfs_log_need_covered()
1067 log->l_covered_state = XLOG_STATE_COVER_DONE; in xfs_log_need_covered()
1069 log->l_covered_state = XLOG_STATE_COVER_DONE2; in xfs_log_need_covered()
1075 spin_unlock(&log->l_icloglock); in xfs_log_need_covered()
1086 struct xlog *log = mp->m_log; in xlog_assign_tail_lsn_locked() local
1101 tail_lsn = atomic64_read(&log->l_last_sync_lsn); in xlog_assign_tail_lsn_locked()
1102 trace_xfs_log_assign_tail_lsn(log, tail_lsn); in xlog_assign_tail_lsn_locked()
1103 atomic64_set(&log->l_tail_lsn, tail_lsn); in xlog_assign_tail_lsn_locked()
1136 struct xlog *log, in xlog_space_left() argument
1146 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes); in xlog_space_left()
1149 free_bytes = log->l_logsize - (head_bytes - tail_bytes); in xlog_space_left()
1161 xfs_alert(log->l_mp, "xlog_space_left: head behind tail"); in xlog_space_left()
1162 xfs_alert(log->l_mp, in xlog_space_left()
1165 xfs_alert(log->l_mp, in xlog_space_left()
1169 free_bytes = log->l_logsize; in xlog_space_left()
1181 struct xlog *log = iclog->ic_log; in xlog_ioend_work() local
1194 if (XFS_TEST_ERROR(error, log->l_mp, XFS_ERRTAG_IODONE_IOERR)) { in xlog_ioend_work()
1195 xfs_alert(log->l_mp, "log I/O error %d", error); in xlog_ioend_work()
1196 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); in xlog_ioend_work()
1222 struct xlog *log) in xlog_get_iclog_buffer_size() argument
1229 log->l_iclog_bufs = mp->m_logbufs; in xlog_get_iclog_buffer_size()
1230 log->l_iclog_size = mp->m_logbsize; in xlog_get_iclog_buffer_size()
1235 log->l_iclog_heads = in xlog_get_iclog_buffer_size()
1237 log->l_iclog_hsize = log->l_iclog_heads << BBSHIFT; in xlog_get_iclog_buffer_size()
1257 struct xlog *log = container_of(to_delayed_work(work), in xfs_log_worker() local
1259 struct xfs_mount *mp = log->l_mp; in xfs_log_worker()
1297 struct xlog *log; in xlog_alloc_log() local
1305 log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL); in xlog_alloc_log()
1306 if (!log) { in xlog_alloc_log()
1311 log->l_mp = mp; in xlog_alloc_log()
1312 log->l_targ = log_target; in xlog_alloc_log()
1313 log->l_logsize = BBTOB(num_bblks); in xlog_alloc_log()
1314 log->l_logBBstart = blk_offset; in xlog_alloc_log()
1315 log->l_logBBsize = num_bblks; in xlog_alloc_log()
1316 log->l_covered_state = XLOG_STATE_COVER_IDLE; in xlog_alloc_log()
1317 log->l_flags |= XLOG_ACTIVE_RECOVERY; in xlog_alloc_log()
1318 INIT_DELAYED_WORK(&log->l_work, xfs_log_worker); in xlog_alloc_log()
1320 log->l_prev_block = -1; in xlog_alloc_log()
1322 xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0); in xlog_alloc_log()
1323 xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0); in xlog_alloc_log()
1324 log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ in xlog_alloc_log()
1326 xlog_grant_head_init(&log->l_reserve_head); in xlog_alloc_log()
1327 xlog_grant_head_init(&log->l_write_head); in xlog_alloc_log()
1346 if (log2_size && log->l_logBBstart > 0 && in xlog_alloc_log()
1354 log->l_sectBBsize = 1 << log2_size; in xlog_alloc_log()
1356 xlog_get_iclog_buffer_size(mp, log); in xlog_alloc_log()
1358 spin_lock_init(&log->l_icloglock); in xlog_alloc_log()
1359 init_waitqueue_head(&log->l_flush_wait); in xlog_alloc_log()
1361 iclogp = &log->l_iclog; in xlog_alloc_log()
1369 ASSERT(log->l_iclog_size >= 4096); in xlog_alloc_log()
1370 for (i = 0; i < log->l_iclog_bufs; i++) { in xlog_alloc_log()
1372 size_t bvec_size = howmany(log->l_iclog_size, PAGE_SIZE) * in xlog_alloc_log()
1383 iclog->ic_data = kmem_alloc_io(log->l_iclog_size, align_mask, in xlog_alloc_log()
1388 log->l_iclog_bak[i] = &iclog->ic_header; in xlog_alloc_log()
1394 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1); in xlog_alloc_log()
1395 head->h_size = cpu_to_be32(log->l_iclog_size); in xlog_alloc_log()
1400 iclog->ic_size = log->l_iclog_size - log->l_iclog_hsize; in xlog_alloc_log()
1402 iclog->ic_log = log; in xlog_alloc_log()
1406 iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize; in xlog_alloc_log()
1415 *iclogp = log->l_iclog; /* complete ring */ in xlog_alloc_log()
1416 log->l_iclog->ic_prev = prev_iclog; /* re-write 1st prev ptr */ in xlog_alloc_log()
1418 log->l_ioend_workqueue = alloc_workqueue("xfs-log/%s", in xlog_alloc_log()
1421 if (!log->l_ioend_workqueue) in xlog_alloc_log()
1424 error = xlog_cil_init(log); in xlog_alloc_log()
1427 return log; in xlog_alloc_log()
1430 destroy_workqueue(log->l_ioend_workqueue); in xlog_alloc_log()
1432 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) { in xlog_alloc_log()
1436 if (prev_iclog == log->l_iclog) in xlog_alloc_log()
1440 kmem_free(log); in xlog_alloc_log()
1451 struct xlog *log, in xlog_commit_record() argument
1467 if (XLOG_FORCED_SHUTDOWN(log)) in xlog_commit_record()
1470 error = xlog_write(log, &vec, ticket, lsn, iclog, XLOG_COMMIT_TRANS, in xlog_commit_record()
1473 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); in xlog_commit_record()
1486 struct xlog *log, in xlog_grant_push_threshold() argument
1497 ASSERT(BTOBB(need_bytes) < log->l_logBBsize); in xlog_grant_push_threshold()
1499 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); in xlog_grant_push_threshold()
1508 free_threshold = max(free_threshold, (log->l_logBBsize >> 2)); in xlog_grant_push_threshold()
1513 xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle, in xlog_grant_push_threshold()
1516 if (threshold_block >= log->l_logBBsize) { in xlog_grant_push_threshold()
1517 threshold_block -= log->l_logBBsize; in xlog_grant_push_threshold()
1527 last_sync_lsn = atomic64_read(&log->l_last_sync_lsn); in xlog_grant_push_threshold()
1543 struct xlog *log, in xlog_grant_push_ail() argument
1548 threshold_lsn = xlog_grant_push_threshold(log, need_bytes); in xlog_grant_push_ail()
1549 if (threshold_lsn == NULLCOMMITLSN || XLOG_FORCED_SHUTDOWN(log)) in xlog_grant_push_ail()
1557 xfs_ail_push(log->l_ailp, threshold_lsn); in xlog_grant_push_ail()
1565 struct xlog *log, in xlog_pack_data() argument
1585 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { in xlog_pack_data()
1596 for (i = 1; i < log->l_iclog_heads; i++) in xlog_pack_data()
1609 struct xlog *log, in xlog_cksum() argument
1622 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { in xlog_cksum()
1674 struct xlog *log, in xlog_write_iclog() argument
1680 ASSERT(bno < log->l_logBBsize); in xlog_write_iclog()
1705 bio_set_dev(&iclog->ic_bio, log->l_targ->bt_bdev); in xlog_write_iclog()
1706 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno; in xlog_write_iclog()
1722 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); in xlog_write_iclog()
1732 if (bno + BTOBB(count) > log->l_logBBsize) { in xlog_write_iclog()
1735 split = bio_split(&iclog->ic_bio, log->l_logBBsize - bno, in xlog_write_iclog()
1741 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart; in xlog_write_iclog()
1754 struct xlog *log, in xlog_split_iclog() argument
1759 unsigned int split_offset = BBTOB(log->l_logBBsize - bno); in xlog_split_iclog()
1773 struct xlog *log, in xlog_calc_iclog_size() argument
1780 use_lsunit = xfs_sb_version_haslogv2(&log->l_mp->m_sb) && in xlog_calc_iclog_size()
1781 log->l_mp->m_sb.sb_logsunit > 1; in xlog_calc_iclog_size()
1784 count_init = log->l_iclog_hsize + iclog->ic_offset; in xlog_calc_iclog_size()
1789 count = XLOG_LSUNITTOB(log, XLOG_BTOLSUNIT(log, count_init)); in xlog_calc_iclog_size()
1798 ASSERT(*roundoff < log->l_mp->m_sb.sb_logsunit); in xlog_calc_iclog_size()
1829 struct xlog *log, in xlog_sync() argument
1840 count = xlog_calc_iclog_size(log, iclog, &roundoff); in xlog_sync()
1843 xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff); in xlog_sync()
1844 xlog_grant_add_space(log, &log->l_write_head.grant, roundoff); in xlog_sync()
1847 xlog_pack_data(log, iclog, roundoff); in xlog_sync()
1851 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) in xlog_sync()
1855 XFS_STATS_INC(log->l_mp, xs_log_writes); in xlog_sync()
1856 XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count)); in xlog_sync()
1861 if (bno + BTOBB(count) > log->l_logBBsize) { in xlog_sync()
1862 xlog_split_iclog(log, &iclog->ic_header, bno, count); in xlog_sync()
1867 iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header, in xlog_sync()
1877 if (XFS_TEST_ERROR(false, log->l_mp, XFS_ERRTAG_LOG_BAD_CRC)) { in xlog_sync()
1880 xfs_warn(log->l_mp, in xlog_sync()
1894 if (log->l_targ != log->l_mp->m_ddev_targp || split) { in xlog_sync()
1895 xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp); in xlog_sync()
1899 xlog_verify_iclog(log, iclog, count); in xlog_sync()
1900 xlog_write_iclog(log, iclog, bno, count, need_flush); in xlog_sync()
1908 struct xlog *log) in xlog_dealloc_log() argument
1913 xlog_cil_destroy(log); in xlog_dealloc_log()
1919 iclog = log->l_iclog; in xlog_dealloc_log()
1920 for (i = 0; i < log->l_iclog_bufs; i++) { in xlog_dealloc_log()
1926 iclog = log->l_iclog; in xlog_dealloc_log()
1927 for (i = 0; i < log->l_iclog_bufs; i++) { in xlog_dealloc_log()
1934 log->l_mp->m_log = NULL; in xlog_dealloc_log()
1935 destroy_workqueue(log->l_ioend_workqueue); in xlog_dealloc_log()
1936 kmem_free(log); in xlog_dealloc_log()
1944 struct xlog *log, in xlog_state_finish_copy() argument
1949 lockdep_assert_held(&log->l_icloglock); in xlog_state_finish_copy()
2124 struct xlog *log, in xlog_write_setup_ophdr() argument
2147 xfs_warn(log->l_mp, in xlog_write_setup_ophdr()
2207 struct xlog *log, in xlog_write_copy_finish() argument
2224 spin_lock(&log->l_icloglock); in xlog_write_copy_finish()
2225 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); in xlog_write_copy_finish()
2236 spin_lock(&log->l_icloglock); in xlog_write_copy_finish()
2237 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); in xlog_write_copy_finish()
2242 xlog_state_switch_iclogs(log, iclog, 0); in xlog_write_copy_finish()
2248 spin_unlock(&log->l_icloglock); in xlog_write_copy_finish()
2256 error = xlog_state_release_iclog(log, iclog); in xlog_write_copy_finish()
2257 spin_unlock(&log->l_icloglock); in xlog_write_copy_finish()
2303 struct xlog *log, in xlog_write() argument
2331 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, in xlog_write()
2333 xlog_print_tic_res(log->l_mp, ticket); in xlog_write()
2334 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); in xlog_write()
2343 error = xlog_state_get_iclog_space(log, len, &iclog, ticket, in xlog_write()
2388 ophdr = xlog_write_setup_ophdr(log, ptr, ticket, flags); in xlog_write()
2401 xlog_verify_dest_ptr(log, ptr); in xlog_write()
2426 error = xlog_write_copy_finish(log, iclog, flags, in xlog_write()
2467 spin_lock(&log->l_icloglock); in xlog_write()
2468 xlog_state_finish_copy(log, iclog, record_cnt, data_cnt); in xlog_write()
2473 error = xlog_state_release_iclog(log, iclog); in xlog_write()
2475 spin_unlock(&log->l_icloglock); in xlog_write()
2518 struct xlog *log, in xlog_state_activate_iclogs() argument
2521 struct xlog_in_core *iclog = log->l_iclog; in xlog_state_activate_iclogs()
2532 } while ((iclog = iclog->ic_next) != log->l_iclog); in xlog_state_activate_iclogs()
2567 struct xlog *log, in xlog_state_clean_iclog() argument
2574 xlog_state_activate_iclogs(log, &iclogs_changed); in xlog_state_clean_iclog()
2578 log->l_covered_state = xlog_covered_state(log->l_covered_state, in xlog_state_clean_iclog()
2585 struct xlog *log) in xlog_get_lowest_lsn() argument
2587 struct xlog_in_core *iclog = log->l_iclog; in xlog_get_lowest_lsn()
2598 } while ((iclog = iclog->ic_next) != log->l_iclog); in xlog_get_lowest_lsn()
2627 struct xlog *log, in xlog_state_set_callback() argument
2633 ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn), in xlog_state_set_callback()
2639 atomic64_set(&log->l_last_sync_lsn, header_lsn); in xlog_state_set_callback()
2640 xlog_grant_push_ail(log, 0); in xlog_state_set_callback()
2650 struct xlog *log, in xlog_state_iodone_process_iclog() argument
2681 lowest_lsn = xlog_get_lowest_lsn(log); in xlog_state_iodone_process_iclog()
2684 xlog_state_set_callback(log, iclog, header_lsn); in xlog_state_iodone_process_iclog()
2707 struct xlog *log, in xlog_state_do_iclog_callbacks() argument
2709 __releases(&log->l_icloglock) in xlog_state_do_iclog_callbacks()
2710 __acquires(&log->l_icloglock) in xlog_state_do_iclog_callbacks()
2712 spin_unlock(&log->l_icloglock); in xlog_state_do_iclog_callbacks()
2729 spin_lock(&log->l_icloglock); in xlog_state_do_iclog_callbacks()
2735 struct xlog *log) in xlog_state_do_callback() argument
2744 spin_lock(&log->l_icloglock); in xlog_state_do_callback()
2754 first_iclog = log->l_iclog; in xlog_state_do_callback()
2755 iclog = log->l_iclog; in xlog_state_do_callback()
2761 if (xlog_state_iodone_process_iclog(log, iclog, in xlog_state_do_callback()
2776 xlog_state_do_iclog_callbacks(log, iclog); in xlog_state_do_callback()
2777 if (XLOG_FORCED_SHUTDOWN(log)) in xlog_state_do_callback()
2780 xlog_state_clean_iclog(log, iclog); in xlog_state_do_callback()
2787 xfs_warn(log->l_mp, in xlog_state_do_callback()
2793 if (log->l_iclog->ic_state == XLOG_STATE_ACTIVE || in xlog_state_do_callback()
2794 log->l_iclog->ic_state == XLOG_STATE_IOERROR) in xlog_state_do_callback()
2795 wake_up_all(&log->l_flush_wait); in xlog_state_do_callback()
2797 spin_unlock(&log->l_icloglock); in xlog_state_do_callback()
2818 struct xlog *log = iclog->ic_log; in xlog_state_done_syncing() local
2820 spin_lock(&log->l_icloglock); in xlog_state_done_syncing()
2828 if (!XLOG_FORCED_SHUTDOWN(log)) { in xlog_state_done_syncing()
2839 spin_unlock(&log->l_icloglock); in xlog_state_done_syncing()
2840 xlog_state_do_callback(log); in xlog_state_done_syncing()
2863 struct xlog *log, in xlog_state_get_iclog_space() argument
2875 spin_lock(&log->l_icloglock); in xlog_state_get_iclog_space()
2876 if (XLOG_FORCED_SHUTDOWN(log)) { in xlog_state_get_iclog_space()
2877 spin_unlock(&log->l_icloglock); in xlog_state_get_iclog_space()
2881 iclog = log->l_iclog; in xlog_state_get_iclog_space()
2883 XFS_STATS_INC(log->l_mp, xs_log_noiclogs); in xlog_state_get_iclog_space()
2886 xlog_wait(&log->l_flush_wait, &log->l_icloglock); in xlog_state_get_iclog_space()
2901 ticket->t_curr_res -= log->l_iclog_hsize; in xlog_state_get_iclog_space()
2903 log->l_iclog_hsize, in xlog_state_get_iclog_space()
2905 head->h_cycle = cpu_to_be32(log->l_curr_cycle); in xlog_state_get_iclog_space()
2907 xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block)); in xlog_state_get_iclog_space()
2908 ASSERT(log->l_curr_block >= 0); in xlog_state_get_iclog_space()
2923 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); in xlog_state_get_iclog_space()
2933 error = xlog_state_release_iclog(log, iclog); in xlog_state_get_iclog_space()
2934 spin_unlock(&log->l_icloglock); in xlog_state_get_iclog_space()
2951 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); in xlog_state_get_iclog_space()
2956 spin_unlock(&log->l_icloglock); in xlog_state_get_iclog_space()
2971 struct xlog *log, in xfs_log_ticket_regrant() argument
2974 trace_xfs_log_ticket_regrant(log, ticket); in xfs_log_ticket_regrant()
2979 xlog_grant_sub_space(log, &log->l_reserve_head.grant, in xfs_log_ticket_regrant()
2981 xlog_grant_sub_space(log, &log->l_write_head.grant, in xfs_log_ticket_regrant()
2986 trace_xfs_log_ticket_regrant_sub(log, ticket); in xfs_log_ticket_regrant()
2990 xlog_grant_add_space(log, &log->l_reserve_head.grant, in xfs_log_ticket_regrant()
2992 trace_xfs_log_ticket_regrant_exit(log, ticket); in xfs_log_ticket_regrant()
3017 struct xlog *log, in xfs_log_ticket_ungrant() argument
3022 trace_xfs_log_ticket_ungrant(log, ticket); in xfs_log_ticket_ungrant()
3027 trace_xfs_log_ticket_ungrant_sub(log, ticket); in xfs_log_ticket_ungrant()
3039 xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes); in xfs_log_ticket_ungrant()
3040 xlog_grant_sub_space(log, &log->l_write_head.grant, bytes); in xfs_log_ticket_ungrant()
3042 trace_xfs_log_ticket_ungrant_exit(log, ticket); in xfs_log_ticket_ungrant()
3044 xfs_log_space_wake(log->l_mp); in xfs_log_ticket_ungrant()
3054 struct xlog *log, in xlog_state_switch_iclogs() argument
3059 assert_spin_locked(&log->l_icloglock); in xlog_state_switch_iclogs()
3064 iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block); in xlog_state_switch_iclogs()
3065 log->l_prev_block = log->l_curr_block; in xlog_state_switch_iclogs()
3066 log->l_prev_cycle = log->l_curr_cycle; in xlog_state_switch_iclogs()
3069 log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize); in xlog_state_switch_iclogs()
3072 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) && in xlog_state_switch_iclogs()
3073 log->l_mp->m_sb.sb_logsunit > 1) { in xlog_state_switch_iclogs()
3074 uint32_t sunit_bb = BTOBB(log->l_mp->m_sb.sb_logsunit); in xlog_state_switch_iclogs()
3075 log->l_curr_block = roundup(log->l_curr_block, sunit_bb); in xlog_state_switch_iclogs()
3078 if (log->l_curr_block >= log->l_logBBsize) { in xlog_state_switch_iclogs()
3086 log->l_curr_block -= log->l_logBBsize; in xlog_state_switch_iclogs()
3087 ASSERT(log->l_curr_block >= 0); in xlog_state_switch_iclogs()
3089 log->l_curr_cycle++; in xlog_state_switch_iclogs()
3090 if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM) in xlog_state_switch_iclogs()
3091 log->l_curr_cycle++; in xlog_state_switch_iclogs()
3093 ASSERT(iclog == log->l_iclog); in xlog_state_switch_iclogs()
3094 log->l_iclog = iclog->ic_next; in xlog_state_switch_iclogs()
3129 struct xlog *log = mp->m_log; in xfs_log_force() local
3136 xlog_cil_force(log); in xfs_log_force()
3138 spin_lock(&log->l_icloglock); in xfs_log_force()
3139 iclog = log->l_iclog; in xfs_log_force()
3166 xlog_state_switch_iclogs(log, iclog, 0); in xfs_log_force()
3167 if (xlog_state_release_iclog(log, iclog)) in xfs_log_force()
3180 xlog_state_switch_iclogs(log, iclog, 0); in xfs_log_force()
3193 spin_unlock(&log->l_icloglock); in xfs_log_force()
3196 spin_unlock(&log->l_icloglock); in xfs_log_force()
3208 struct xlog *log = mp->m_log; in __xfs_log_force_lsn() local
3211 spin_lock(&log->l_icloglock); in __xfs_log_force_lsn()
3212 iclog = log->l_iclog; in __xfs_log_force_lsn()
3218 if (iclog == log->l_iclog) in __xfs_log_force_lsn()
3244 &log->l_icloglock); in __xfs_log_force_lsn()
3248 xlog_state_switch_iclogs(log, iclog, 0); in __xfs_log_force_lsn()
3249 if (xlog_state_release_iclog(log, iclog)) in __xfs_log_force_lsn()
3258 spin_unlock(&log->l_icloglock); in __xfs_log_force_lsn()
3261 spin_unlock(&log->l_icloglock); in __xfs_log_force_lsn()
3332 struct xlog *log = mp->m_log; in xfs_log_calc_unit_res() local
3391 iclog_space = log->l_iclog_size - log->l_iclog_hsize; in xfs_log_calc_unit_res()
3403 unit_bytes += log->l_iclog_hsize * num_headers; in xfs_log_calc_unit_res()
3406 unit_bytes += log->l_iclog_hsize; in xfs_log_calc_unit_res()
3425 struct xlog *log, in xlog_ticket_alloc() argument
3436 unit_res = xfs_log_calc_unit_res(log->l_mp, unit_bytes); in xlog_ticket_alloc()
3463 struct xlog *log, in xlog_verify_dest_ptr() argument
3469 for (i = 0; i < log->l_iclog_bufs; i++) { in xlog_verify_dest_ptr()
3470 if (ptr >= log->l_iclog_bak[i] && in xlog_verify_dest_ptr()
3471 ptr <= log->l_iclog_bak[i] + log->l_iclog_size) in xlog_verify_dest_ptr()
3476 xfs_emerg(log->l_mp, "%s: invalid ptr", __func__); in xlog_verify_dest_ptr()
3492 struct xlog *log) in xlog_verify_grant_tail() argument
3497 xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space); in xlog_verify_grant_tail()
3498 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks); in xlog_verify_grant_tail()
3501 !(log->l_flags & XLOG_TAIL_WARN)) { in xlog_verify_grant_tail()
3502 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, in xlog_verify_grant_tail()
3504 log->l_flags |= XLOG_TAIL_WARN; in xlog_verify_grant_tail()
3508 !(log->l_flags & XLOG_TAIL_WARN)) { in xlog_verify_grant_tail()
3509 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, in xlog_verify_grant_tail()
3511 log->l_flags |= XLOG_TAIL_WARN; in xlog_verify_grant_tail()
3519 struct xlog *log, in xlog_verify_tail_lsn() argument
3525 if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) { in xlog_verify_tail_lsn()
3527 log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn)); in xlog_verify_tail_lsn()
3528 if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize)) in xlog_verify_tail_lsn()
3529 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); in xlog_verify_tail_lsn()
3531 ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle); in xlog_verify_tail_lsn()
3533 if (BLOCK_LSN(tail_lsn) == log->l_prev_block) in xlog_verify_tail_lsn()
3534 xfs_emerg(log->l_mp, "%s: tail wrapped", __func__); in xlog_verify_tail_lsn()
3536 blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block; in xlog_verify_tail_lsn()
3538 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); in xlog_verify_tail_lsn()
3559 struct xlog *log, in xlog_verify_iclog() argument
3573 spin_lock(&log->l_icloglock); in xlog_verify_iclog()
3574 icptr = log->l_iclog; in xlog_verify_iclog()
3575 for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next) in xlog_verify_iclog()
3578 if (icptr != log->l_iclog) in xlog_verify_iclog()
3579 xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__); in xlog_verify_iclog()
3580 spin_unlock(&log->l_icloglock); in xlog_verify_iclog()
3584 xfs_emerg(log->l_mp, "%s: invalid magic num", __func__); in xlog_verify_iclog()
3590 xfs_emerg(log->l_mp, "%s: unexpected magic num", in xlog_verify_iclog()
3620 xfs_warn(log->l_mp, in xlog_verify_iclog()
3651 struct xlog *log) in xlog_state_ioerror() argument
3655 iclog = log->l_iclog; in xlog_state_ioerror()
3695 struct xlog *log; in xfs_log_force_umount() local
3698 log = mp->m_log; in xfs_log_force_umount()
3704 if (!log || in xfs_log_force_umount()
3705 log->l_flags & XLOG_ACTIVE_RECOVERY) { in xfs_log_force_umount()
3716 if (logerror && log->l_iclog->ic_state == XLOG_STATE_IOERROR) { in xfs_log_force_umount()
3717 ASSERT(XLOG_FORCED_SHUTDOWN(log)); in xfs_log_force_umount()
3735 spin_lock(&log->l_icloglock); in xfs_log_force_umount()
3744 log->l_flags |= XLOG_IO_ERROR; in xfs_log_force_umount()
3745 retval = xlog_state_ioerror(log); in xfs_log_force_umount()
3746 spin_unlock(&log->l_icloglock); in xfs_log_force_umount()
3755 xlog_grant_head_wake_all(&log->l_reserve_head); in xfs_log_force_umount()
3756 xlog_grant_head_wake_all(&log->l_write_head); in xfs_log_force_umount()
3764 spin_lock(&log->l_cilp->xc_push_lock); in xfs_log_force_umount()
3765 wake_up_all(&log->l_cilp->xc_commit_wait); in xfs_log_force_umount()
3766 spin_unlock(&log->l_cilp->xc_push_lock); in xfs_log_force_umount()
3767 xlog_state_do_callback(log); in xfs_log_force_umount()
3775 struct xlog *log) in xlog_iclogs_empty() argument
3779 iclog = log->l_iclog; in xlog_iclogs_empty()
3787 } while (iclog != log->l_iclog); in xlog_iclogs_empty()
3800 struct xlog *log = mp->m_log; in xfs_log_check_lsn() local
3822 spin_lock(&log->l_icloglock); in xfs_log_check_lsn()
3827 log->l_curr_cycle, log->l_curr_block); in xfs_log_check_lsn()
3828 spin_unlock(&log->l_icloglock); in xfs_log_check_lsn()
3838 struct xlog *log = mp->m_log; in xfs_log_in_recovery() local
3840 return log->l_flags & XLOG_ACTIVE_RECOVERY; in xfs_log_in_recovery()