• Home
  • Raw
  • Download

Lines Matching refs:log

43 	struct xlog		*log,
56 struct xlog *log,
60 struct xlog *log,
64 struct xlog *log);
70 struct xlog *log,
75 struct xlog *log,
83 struct xlog *log,
87 struct xlog *log,
92 struct xlog *log,
97 struct xlog *log,
101 struct xlog *log,
105 struct xlog *log,
111 struct xlog *log,
115 struct xlog *log);
118 struct xlog *log,
124 struct xlog *log,
136 struct xlog *log);
140 struct xlog *log, in xlog_grant_sub_space() argument
154 space += log->l_logsize; in xlog_grant_sub_space()
166 struct xlog *log, in xlog_grant_add_space() argument
179 tmp = log->l_logsize - space; in xlog_grant_add_space()
216 struct xlog *log, in xlog_ticket_reservation() argument
220 if (head == &log->l_write_head) { in xlog_ticket_reservation()
233 struct xlog *log, in xlog_grant_head_wake() argument
241 need_bytes = xlog_ticket_reservation(log, head, tic); in xlog_grant_head_wake()
246 trace_xfs_log_grant_wake_up(log, tic); in xlog_grant_head_wake()
255 struct xlog *log, in xlog_grant_head_wait() argument
264 if (XLOG_FORCED_SHUTDOWN(log)) in xlog_grant_head_wait()
266 xlog_grant_push_ail(log, need_bytes); in xlog_grant_head_wait()
271 XFS_STATS_INC(log->l_mp, xs_sleep_logspace); in xlog_grant_head_wait()
273 trace_xfs_log_grant_sleep(log, tic); in xlog_grant_head_wait()
275 trace_xfs_log_grant_wake(log, tic); in xlog_grant_head_wait()
278 if (XLOG_FORCED_SHUTDOWN(log)) in xlog_grant_head_wait()
280 } while (xlog_space_left(log, &head->grant) < need_bytes); in xlog_grant_head_wait()
308 struct xlog *log, in xlog_grant_head_check() argument
316 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); in xlog_grant_head_check()
324 *need_bytes = xlog_ticket_reservation(log, head, tic); in xlog_grant_head_check()
325 free_bytes = xlog_space_left(log, &head->grant); in xlog_grant_head_check()
328 if (!xlog_grant_head_wake(log, head, &free_bytes) || in xlog_grant_head_check()
330 error = xlog_grant_head_wait(log, head, tic, in xlog_grant_head_check()
336 error = xlog_grant_head_wait(log, head, tic, *need_bytes); in xlog_grant_head_check()
375 struct xlog *log = mp->m_log; in xfs_log_regrant() local
379 if (XLOG_FORCED_SHUTDOWN(log)) in xfs_log_regrant()
392 xlog_grant_push_ail(log, tic->t_unit_res); in xfs_log_regrant()
400 trace_xfs_log_regrant(log, tic); in xfs_log_regrant()
402 error = xlog_grant_head_check(log, &log->l_write_head, tic, in xfs_log_regrant()
407 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); in xfs_log_regrant()
408 trace_xfs_log_regrant_exit(log, tic); in xfs_log_regrant()
409 xlog_verify_grant_tail(log); in xfs_log_regrant()
440 struct xlog *log = mp->m_log; in xfs_log_reserve() local
447 if (XLOG_FORCED_SHUTDOWN(log)) in xfs_log_reserve()
453 tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent, in xfs_log_reserve()
460 xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt in xfs_log_reserve()
463 trace_xfs_log_reserve(log, tic); in xfs_log_reserve()
465 error = xlog_grant_head_check(log, &log->l_reserve_head, tic, in xfs_log_reserve()
470 xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes); in xfs_log_reserve()
471 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); in xfs_log_reserve()
472 trace_xfs_log_reserve_exit(log, tic); in xfs_log_reserve()
473 xlog_verify_grant_tail(log); in xfs_log_reserve()
516 struct xlog *log = mp->m_log; in xfs_log_done() local
519 if (XLOG_FORCED_SHUTDOWN(log) || in xfs_log_done()
525 (xlog_commit_record(log, ticket, iclog, &lsn)))) { in xfs_log_done()
532 trace_xfs_log_done_nonperm(log, ticket); in xfs_log_done()
538 xlog_ungrant_log_space(log, ticket); in xfs_log_done()
540 trace_xfs_log_done_perm(log, ticket); in xfs_log_done()
542 xlog_regrant_reserve_log_space(log, ticket); in xfs_log_done()
824 struct xlog *log = mp->m_log; in xfs_log_unmount_write() local
838 xfs_readonly_buftarg(log->l_mp->m_logdev_targp)) { in xfs_log_unmount_write()
844 ASSERT(error || !(XLOG_FORCED_SHUTDOWN(log))); in xfs_log_unmount_write()
847 first_iclog = iclog = log->l_iclog; in xfs_log_unmount_write()
856 if (! (XLOG_FORCED_SHUTDOWN(log))) { in xfs_log_unmount_write()
880 error = xlog_write(log, &vec, tic, &lsn, in xfs_log_unmount_write()
893 spin_lock(&log->l_icloglock); in xfs_log_unmount_write()
894 iclog = log->l_iclog; in xfs_log_unmount_write()
896 xlog_state_want_sync(log, iclog); in xfs_log_unmount_write()
897 spin_unlock(&log->l_icloglock); in xfs_log_unmount_write()
898 error = xlog_state_release_iclog(log, iclog); in xfs_log_unmount_write()
900 spin_lock(&log->l_icloglock); in xfs_log_unmount_write()
903 if (!XLOG_FORCED_SHUTDOWN(log)) { in xfs_log_unmount_write()
905 &log->l_icloglock); in xfs_log_unmount_write()
907 spin_unlock(&log->l_icloglock); in xfs_log_unmount_write()
910 spin_unlock(&log->l_icloglock); in xfs_log_unmount_write()
913 trace_xfs_log_umount_write(log, tic); in xfs_log_unmount_write()
914 xlog_ungrant_log_space(log, tic); in xfs_log_unmount_write()
931 spin_lock(&log->l_icloglock); in xfs_log_unmount_write()
932 iclog = log->l_iclog; in xfs_log_unmount_write()
935 xlog_state_want_sync(log, iclog); in xfs_log_unmount_write()
936 spin_unlock(&log->l_icloglock); in xfs_log_unmount_write()
937 error = xlog_state_release_iclog(log, iclog); in xfs_log_unmount_write()
939 spin_lock(&log->l_icloglock); in xfs_log_unmount_write()
946 &log->l_icloglock); in xfs_log_unmount_write()
948 spin_unlock(&log->l_icloglock); in xfs_log_unmount_write()
1030 struct xlog *log = mp->m_log; in xfs_log_space_wake() local
1033 if (XLOG_FORCED_SHUTDOWN(log)) in xfs_log_space_wake()
1036 if (!list_empty_careful(&log->l_write_head.waiters)) { in xfs_log_space_wake()
1037 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); in xfs_log_space_wake()
1039 spin_lock(&log->l_write_head.lock); in xfs_log_space_wake()
1040 free_bytes = xlog_space_left(log, &log->l_write_head.grant); in xfs_log_space_wake()
1041 xlog_grant_head_wake(log, &log->l_write_head, &free_bytes); in xfs_log_space_wake()
1042 spin_unlock(&log->l_write_head.lock); in xfs_log_space_wake()
1045 if (!list_empty_careful(&log->l_reserve_head.waiters)) { in xfs_log_space_wake()
1046 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); in xfs_log_space_wake()
1048 spin_lock(&log->l_reserve_head.lock); in xfs_log_space_wake()
1049 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); in xfs_log_space_wake()
1050 xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes); in xfs_log_space_wake()
1051 spin_unlock(&log->l_reserve_head.lock); in xfs_log_space_wake()
1075 struct xlog *log = mp->m_log; in xfs_log_need_covered() local
1081 if (!xlog_cil_empty(log)) in xfs_log_need_covered()
1084 spin_lock(&log->l_icloglock); in xfs_log_need_covered()
1085 switch (log->l_covered_state) { in xfs_log_need_covered()
1092 if (xfs_ail_min_lsn(log->l_ailp)) in xfs_log_need_covered()
1094 if (!xlog_iclogs_empty(log)) in xfs_log_need_covered()
1098 if (log->l_covered_state == XLOG_STATE_COVER_NEED) in xfs_log_need_covered()
1099 log->l_covered_state = XLOG_STATE_COVER_DONE; in xfs_log_need_covered()
1101 log->l_covered_state = XLOG_STATE_COVER_DONE2; in xfs_log_need_covered()
1107 spin_unlock(&log->l_icloglock); in xfs_log_need_covered()
1118 struct xlog *log = mp->m_log; in xlog_assign_tail_lsn_locked() local
1133 tail_lsn = atomic64_read(&log->l_last_sync_lsn); in xlog_assign_tail_lsn_locked()
1134 trace_xfs_log_assign_tail_lsn(log, tail_lsn); in xlog_assign_tail_lsn_locked()
1135 atomic64_set(&log->l_tail_lsn, tail_lsn); in xlog_assign_tail_lsn_locked()
1168 struct xlog *log, in xlog_space_left() argument
1178 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes); in xlog_space_left()
1181 free_bytes = log->l_logsize - (head_bytes - tail_bytes); in xlog_space_left()
1193 xfs_alert(log->l_mp, "xlog_space_left: head behind tail"); in xlog_space_left()
1194 xfs_alert(log->l_mp, in xlog_space_left()
1197 xfs_alert(log->l_mp, in xlog_space_left()
1201 free_bytes = log->l_logsize; in xlog_space_left()
1269 struct xlog *log) in xlog_get_iclog_buffer_size() argument
1275 log->l_iclog_bufs = XLOG_MAX_ICLOGS; in xlog_get_iclog_buffer_size()
1277 log->l_iclog_bufs = mp->m_logbufs; in xlog_get_iclog_buffer_size()
1283 size = log->l_iclog_size = mp->m_logbsize; in xlog_get_iclog_buffer_size()
1284 log->l_iclog_size_log = 0; in xlog_get_iclog_buffer_size()
1286 log->l_iclog_size_log++; in xlog_get_iclog_buffer_size()
1298 log->l_iclog_hsize = xhdrs << BBSHIFT; in xlog_get_iclog_buffer_size()
1299 log->l_iclog_heads = xhdrs; in xlog_get_iclog_buffer_size()
1302 log->l_iclog_hsize = BBSIZE; in xlog_get_iclog_buffer_size()
1303 log->l_iclog_heads = 1; in xlog_get_iclog_buffer_size()
1309 log->l_iclog_size = XLOG_BIG_RECORD_BSIZE; in xlog_get_iclog_buffer_size()
1310 log->l_iclog_size_log = XLOG_BIG_RECORD_BSHIFT; in xlog_get_iclog_buffer_size()
1313 log->l_iclog_hsize = BBSIZE; in xlog_get_iclog_buffer_size()
1314 log->l_iclog_heads = 1; in xlog_get_iclog_buffer_size()
1319 mp->m_logbufs = log->l_iclog_bufs; in xlog_get_iclog_buffer_size()
1321 mp->m_logbsize = log->l_iclog_size; in xlog_get_iclog_buffer_size()
1342 struct xlog *log = container_of(to_delayed_work(work), in xfs_log_worker() local
1344 struct xfs_mount *mp = log->l_mp; in xfs_log_worker()
1382 struct xlog *log; in xlog_alloc_log() local
1391 log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL); in xlog_alloc_log()
1392 if (!log) { in xlog_alloc_log()
1397 log->l_mp = mp; in xlog_alloc_log()
1398 log->l_targ = log_target; in xlog_alloc_log()
1399 log->l_logsize = BBTOB(num_bblks); in xlog_alloc_log()
1400 log->l_logBBstart = blk_offset; in xlog_alloc_log()
1401 log->l_logBBsize = num_bblks; in xlog_alloc_log()
1402 log->l_covered_state = XLOG_STATE_COVER_IDLE; in xlog_alloc_log()
1403 log->l_flags |= XLOG_ACTIVE_RECOVERY; in xlog_alloc_log()
1404 INIT_DELAYED_WORK(&log->l_work, xfs_log_worker); in xlog_alloc_log()
1406 log->l_prev_block = -1; in xlog_alloc_log()
1408 xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0); in xlog_alloc_log()
1409 xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0); in xlog_alloc_log()
1410 log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ in xlog_alloc_log()
1412 xlog_grant_head_init(&log->l_reserve_head); in xlog_alloc_log()
1413 xlog_grant_head_init(&log->l_write_head); in xlog_alloc_log()
1432 if (log2_size && log->l_logBBstart > 0 && in xlog_alloc_log()
1440 log->l_sectBBsize = 1 << log2_size; in xlog_alloc_log()
1442 xlog_get_iclog_buffer_size(mp, log); in xlog_alloc_log()
1451 BTOBB(log->l_iclog_size), XBF_NO_IOACCT); in xlog_alloc_log()
1466 log->l_xbuf = bp; in xlog_alloc_log()
1468 spin_lock_init(&log->l_icloglock); in xlog_alloc_log()
1469 init_waitqueue_head(&log->l_flush_wait); in xlog_alloc_log()
1471 iclogp = &log->l_iclog; in xlog_alloc_log()
1479 ASSERT(log->l_iclog_size >= 4096); in xlog_alloc_log()
1480 for (i=0; i < log->l_iclog_bufs; i++) { in xlog_alloc_log()
1490 BTOBB(log->l_iclog_size), in xlog_alloc_log()
1504 log->l_iclog_bak[i] = &iclog->ic_header; in xlog_alloc_log()
1510 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1); in xlog_alloc_log()
1511 head->h_size = cpu_to_be32(log->l_iclog_size); in xlog_alloc_log()
1516 iclog->ic_size = BBTOB(bp->b_length) - log->l_iclog_hsize; in xlog_alloc_log()
1518 iclog->ic_log = log; in xlog_alloc_log()
1522 iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize; in xlog_alloc_log()
1529 *iclogp = log->l_iclog; /* complete ring */ in xlog_alloc_log()
1530 log->l_iclog->ic_prev = prev_iclog; /* re-write 1st prev ptr */ in xlog_alloc_log()
1532 error = xlog_cil_init(log); in xlog_alloc_log()
1535 return log; in xlog_alloc_log()
1538 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) { in xlog_alloc_log()
1544 spinlock_destroy(&log->l_icloglock); in xlog_alloc_log()
1545 xfs_buf_free(log->l_xbuf); in xlog_alloc_log()
1547 kmem_free(log); in xlog_alloc_log()
1559 struct xlog *log, in xlog_commit_record() argument
1564 struct xfs_mount *mp = log->l_mp; in xlog_commit_record()
1577 error = xlog_write(log, &vec, ticket, commitlsnp, iclog, in xlog_commit_record()
1593 struct xlog *log, in xlog_grant_push_ail() argument
1604 ASSERT(BTOBB(need_bytes) < log->l_logBBsize); in xlog_grant_push_ail()
1606 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); in xlog_grant_push_ail()
1615 free_threshold = MAX(free_threshold, (log->l_logBBsize >> 2)); in xlog_grant_push_ail()
1620 xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle, in xlog_grant_push_ail()
1623 if (threshold_block >= log->l_logBBsize) { in xlog_grant_push_ail()
1624 threshold_block -= log->l_logBBsize; in xlog_grant_push_ail()
1634 last_sync_lsn = atomic64_read(&log->l_last_sync_lsn); in xlog_grant_push_ail()
1643 if (!XLOG_FORCED_SHUTDOWN(log)) in xlog_grant_push_ail()
1644 xfs_ail_push(log->l_ailp, threshold_lsn); in xlog_grant_push_ail()
1652 struct xlog *log, in xlog_pack_data() argument
1672 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { in xlog_pack_data()
1683 for (i = 1; i < log->l_iclog_heads; i++) in xlog_pack_data()
1696 struct xlog *log, in xlog_cksum() argument
1709 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { in xlog_cksum()
1795 struct xlog *log, in xlog_sync() argument
1805 int v2 = xfs_sb_version_haslogv2(&log->l_mp->m_sb); in xlog_sync()
1808 XFS_STATS_INC(log->l_mp, xs_log_writes); in xlog_sync()
1812 count_init = log->l_iclog_hsize + iclog->ic_offset; in xlog_sync()
1815 if (v2 && log->l_mp->m_sb.sb_logsunit > 1) { in xlog_sync()
1817 count = XLOG_LSUNITTOB(log, XLOG_BTOLSUNIT(log, count_init)); in xlog_sync()
1823 ASSERT((v2 && log->l_mp->m_sb.sb_logsunit > 1 && in xlog_sync()
1824 roundoff < log->l_mp->m_sb.sb_logsunit) in xlog_sync()
1826 (log->l_mp->m_sb.sb_logsunit <= 1 && in xlog_sync()
1830 xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff); in xlog_sync()
1831 xlog_grant_add_space(log, &log->l_write_head.grant, roundoff); in xlog_sync()
1834 xlog_pack_data(log, iclog, roundoff); in xlog_sync()
1845 XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count)); in xlog_sync()
1848 if (XFS_BUF_ADDR(bp) + BTOBB(count) > log->l_logBBsize) { in xlog_sync()
1851 split = count - (BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp))); in xlog_sync()
1852 count = BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp)); in xlog_sync()
1876 iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header, in xlog_sync()
1886 if (log->l_badcrc_factor && in xlog_sync()
1887 (prandom_u32() % log->l_badcrc_factor == 0)) { in xlog_sync()
1890 xfs_warn(log->l_mp, in xlog_sync()
1901 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) { in xlog_sync()
1913 if (log->l_mp->m_logdev_targp != log->l_mp->m_ddev_targp) in xlog_sync()
1914 xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp); in xlog_sync()
1919 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); in xlog_sync()
1920 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); in xlog_sync()
1922 xlog_verify_iclog(log, iclog, count, true); in xlog_sync()
1925 XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart); in xlog_sync()
1944 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) in xlog_sync()
1947 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); in xlog_sync()
1948 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); in xlog_sync()
1951 XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart); in xlog_sync()
1966 struct xlog *log) in xlog_dealloc_log() argument
1971 xlog_cil_destroy(log); in xlog_dealloc_log()
1977 iclog = log->l_iclog; in xlog_dealloc_log()
1978 for (i = 0; i < log->l_iclog_bufs; i++) { in xlog_dealloc_log()
1989 xfs_buf_lock(log->l_xbuf); in xlog_dealloc_log()
1990 xfs_buf_unlock(log->l_xbuf); in xlog_dealloc_log()
1991 xfs_buf_set_empty(log->l_xbuf, BTOBB(log->l_iclog_size)); in xlog_dealloc_log()
1992 xfs_buf_free(log->l_xbuf); in xlog_dealloc_log()
1994 iclog = log->l_iclog; in xlog_dealloc_log()
1995 for (i = 0; i < log->l_iclog_bufs; i++) { in xlog_dealloc_log()
2001 spinlock_destroy(&log->l_icloglock); in xlog_dealloc_log()
2003 log->l_mp->m_log = NULL; in xlog_dealloc_log()
2004 kmem_free(log); in xlog_dealloc_log()
2013 struct xlog *log, in xlog_state_finish_copy() argument
2018 spin_lock(&log->l_icloglock); in xlog_state_finish_copy()
2023 spin_unlock(&log->l_icloglock); in xlog_state_finish_copy()
2158 struct xlog *log, in xlog_write_setup_ophdr() argument
2181 xfs_warn(log->l_mp, in xlog_write_setup_ophdr()
2241 struct xlog *log, in xlog_write_copy_finish() argument
2256 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); in xlog_write_copy_finish()
2259 return xlog_state_release_iclog(log, iclog); in xlog_write_copy_finish()
2267 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); in xlog_write_copy_finish()
2271 spin_lock(&log->l_icloglock); in xlog_write_copy_finish()
2272 xlog_state_want_sync(log, iclog); in xlog_write_copy_finish()
2273 spin_unlock(&log->l_icloglock); in xlog_write_copy_finish()
2276 return xlog_state_release_iclog(log, iclog); in xlog_write_copy_finish()
2326 struct xlog *log, in xlog_write() argument
2365 xlog_print_tic_res(log->l_mp, ticket); in xlog_write()
2374 error = xlog_state_get_iclog_space(log, len, &iclog, ticket, in xlog_write()
2416 ophdr = xlog_write_setup_ophdr(log, ptr, ticket, flags); in xlog_write()
2429 xlog_verify_dest_ptr(log, ptr); in xlog_write()
2449 error = xlog_write_copy_finish(log, iclog, flags, in xlog_write()
2490 xlog_state_finish_copy(log, iclog, record_cnt, data_cnt); in xlog_write()
2492 return xlog_state_release_iclog(log, iclog); in xlog_write()
2517 struct xlog *log) in xlog_state_clean_log() argument
2522 iclog = log->l_iclog; in xlog_state_clean_log()
2557 } while (iclog != log->l_iclog); in xlog_state_clean_log()
2568 switch (log->l_covered_state) { in xlog_state_clean_log()
2572 log->l_covered_state = XLOG_STATE_COVER_NEED; in xlog_state_clean_log()
2577 log->l_covered_state = XLOG_STATE_COVER_NEED2; in xlog_state_clean_log()
2579 log->l_covered_state = XLOG_STATE_COVER_NEED; in xlog_state_clean_log()
2584 log->l_covered_state = XLOG_STATE_COVER_IDLE; in xlog_state_clean_log()
2586 log->l_covered_state = XLOG_STATE_COVER_NEED; in xlog_state_clean_log()
2597 struct xlog *log) in xlog_get_lowest_lsn() argument
2602 lsn_log = log->l_iclog; in xlog_get_lowest_lsn()
2613 } while (lsn_log != log->l_iclog); in xlog_get_lowest_lsn()
2620 struct xlog *log, in xlog_state_do_callback() argument
2637 spin_lock(&log->l_icloglock); in xlog_state_do_callback()
2638 first_iclog = iclog = log->l_iclog; in xlog_state_do_callback()
2652 first_iclog = log->l_iclog; in xlog_state_do_callback()
2653 iclog = log->l_iclog; in xlog_state_do_callback()
2707 lowest_lsn = xlog_get_lowest_lsn(log); in xlog_state_do_callback()
2736 ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn), in xlog_state_do_callback()
2739 atomic64_set(&log->l_last_sync_lsn, in xlog_state_do_callback()
2745 spin_unlock(&log->l_icloglock); in xlog_state_do_callback()
2773 spin_lock(&log->l_icloglock); in xlog_state_do_callback()
2783 xlog_state_clean_log(log); in xlog_state_do_callback()
2794 xfs_warn(log->l_mp, in xlog_state_do_callback()
2814 first_iclog = iclog = log->l_iclog; in xlog_state_do_callback()
2836 if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR)) in xlog_state_do_callback()
2838 spin_unlock(&log->l_icloglock); in xlog_state_do_callback()
2841 wake_up_all(&log->l_flush_wait); in xlog_state_do_callback()
2863 struct xlog *log = iclog->ic_log; in xlog_state_done_syncing() local
2865 spin_lock(&log->l_icloglock); in xlog_state_done_syncing()
2881 spin_unlock(&log->l_icloglock); in xlog_state_done_syncing()
2893 spin_unlock(&log->l_icloglock); in xlog_state_done_syncing()
2894 xlog_state_do_callback(log, aborted, iclog); /* also cleans log */ in xlog_state_done_syncing()
2918 struct xlog *log, in xlog_state_get_iclog_space() argument
2931 spin_lock(&log->l_icloglock); in xlog_state_get_iclog_space()
2932 if (XLOG_FORCED_SHUTDOWN(log)) { in xlog_state_get_iclog_space()
2933 spin_unlock(&log->l_icloglock); in xlog_state_get_iclog_space()
2937 iclog = log->l_iclog; in xlog_state_get_iclog_space()
2939 XFS_STATS_INC(log->l_mp, xs_log_noiclogs); in xlog_state_get_iclog_space()
2942 xlog_wait(&log->l_flush_wait, &log->l_icloglock); in xlog_state_get_iclog_space()
2957 ticket->t_curr_res -= log->l_iclog_hsize; in xlog_state_get_iclog_space()
2959 log->l_iclog_hsize, in xlog_state_get_iclog_space()
2961 head->h_cycle = cpu_to_be32(log->l_curr_cycle); in xlog_state_get_iclog_space()
2963 xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block)); in xlog_state_get_iclog_space()
2964 ASSERT(log->l_curr_block >= 0); in xlog_state_get_iclog_space()
2977 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); in xlog_state_get_iclog_space()
2988 spin_unlock(&log->l_icloglock); in xlog_state_get_iclog_space()
2989 error = xlog_state_release_iclog(log, iclog); in xlog_state_get_iclog_space()
2993 spin_unlock(&log->l_icloglock); in xlog_state_get_iclog_space()
3009 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); in xlog_state_get_iclog_space()
3014 spin_unlock(&log->l_icloglock); in xlog_state_get_iclog_space()
3029 struct xlog *log, in xlog_regrant_reserve_log_space() argument
3032 trace_xfs_log_regrant_reserve_enter(log, ticket); in xlog_regrant_reserve_log_space()
3037 xlog_grant_sub_space(log, &log->l_reserve_head.grant, in xlog_regrant_reserve_log_space()
3039 xlog_grant_sub_space(log, &log->l_write_head.grant, in xlog_regrant_reserve_log_space()
3044 trace_xfs_log_regrant_reserve_sub(log, ticket); in xlog_regrant_reserve_log_space()
3050 xlog_grant_add_space(log, &log->l_reserve_head.grant, in xlog_regrant_reserve_log_space()
3053 trace_xfs_log_regrant_reserve_exit(log, ticket); in xlog_regrant_reserve_log_space()
3076 struct xlog *log, in xlog_ungrant_log_space() argument
3084 trace_xfs_log_ungrant_enter(log, ticket); in xlog_ungrant_log_space()
3085 trace_xfs_log_ungrant_sub(log, ticket); in xlog_ungrant_log_space()
3097 xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes); in xlog_ungrant_log_space()
3098 xlog_grant_sub_space(log, &log->l_write_head.grant, bytes); in xlog_ungrant_log_space()
3100 trace_xfs_log_ungrant_exit(log, ticket); in xlog_ungrant_log_space()
3102 xfs_log_space_wake(log->l_mp); in xlog_ungrant_log_space()
3116 struct xlog *log, in xlog_state_release_iclog() argument
3125 if (!atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock)) in xlog_state_release_iclog()
3129 spin_unlock(&log->l_icloglock); in xlog_state_release_iclog()
3137 xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp); in xlog_state_release_iclog()
3141 xlog_verify_tail_lsn(log, iclog, tail_lsn); in xlog_state_release_iclog()
3144 spin_unlock(&log->l_icloglock); in xlog_state_release_iclog()
3154 return xlog_sync(log, iclog); in xlog_state_release_iclog()
3168 struct xlog *log, in xlog_state_switch_iclogs() argument
3176 iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block); in xlog_state_switch_iclogs()
3177 log->l_prev_block = log->l_curr_block; in xlog_state_switch_iclogs()
3178 log->l_prev_cycle = log->l_curr_cycle; in xlog_state_switch_iclogs()
3181 log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize); in xlog_state_switch_iclogs()
3184 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) && in xlog_state_switch_iclogs()
3185 log->l_mp->m_sb.sb_logsunit > 1) { in xlog_state_switch_iclogs()
3186 __uint32_t sunit_bb = BTOBB(log->l_mp->m_sb.sb_logsunit); in xlog_state_switch_iclogs()
3187 log->l_curr_block = roundup(log->l_curr_block, sunit_bb); in xlog_state_switch_iclogs()
3190 if (log->l_curr_block >= log->l_logBBsize) { in xlog_state_switch_iclogs()
3198 log->l_curr_block -= log->l_logBBsize; in xlog_state_switch_iclogs()
3199 ASSERT(log->l_curr_block >= 0); in xlog_state_switch_iclogs()
3201 log->l_curr_cycle++; in xlog_state_switch_iclogs()
3202 if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM) in xlog_state_switch_iclogs()
3203 log->l_curr_cycle++; in xlog_state_switch_iclogs()
3205 ASSERT(iclog == log->l_iclog); in xlog_state_switch_iclogs()
3206 log->l_iclog = iclog->ic_next; in xlog_state_switch_iclogs()
3242 struct xlog *log = mp->m_log; in _xfs_log_force() local
3248 xlog_cil_force(log); in _xfs_log_force()
3250 spin_lock(&log->l_icloglock); in _xfs_log_force()
3252 iclog = log->l_iclog; in _xfs_log_force()
3254 spin_unlock(&log->l_icloglock); in _xfs_log_force()
3289 xlog_state_switch_iclogs(log, iclog, 0); in _xfs_log_force()
3290 spin_unlock(&log->l_icloglock); in _xfs_log_force()
3292 if (xlog_state_release_iclog(log, iclog)) in _xfs_log_force()
3297 spin_lock(&log->l_icloglock); in _xfs_log_force()
3309 xlog_state_switch_iclogs(log, iclog, 0); in _xfs_log_force()
3328 spin_unlock(&log->l_icloglock); in _xfs_log_force()
3332 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); in _xfs_log_force()
3343 spin_unlock(&log->l_icloglock); in _xfs_log_force()
3384 struct xlog *log = mp->m_log; in _xfs_log_force_lsn() local
3392 lsn = xlog_cil_force_lsn(log, lsn); in _xfs_log_force_lsn()
3397 spin_lock(&log->l_icloglock); in _xfs_log_force_lsn()
3398 iclog = log->l_iclog; in _xfs_log_force_lsn()
3400 spin_unlock(&log->l_icloglock); in _xfs_log_force_lsn()
3411 spin_unlock(&log->l_icloglock); in _xfs_log_force_lsn()
3442 &log->l_icloglock); in _xfs_log_force_lsn()
3447 xlog_state_switch_iclogs(log, iclog, 0); in _xfs_log_force_lsn()
3448 spin_unlock(&log->l_icloglock); in _xfs_log_force_lsn()
3449 if (xlog_state_release_iclog(log, iclog)) in _xfs_log_force_lsn()
3453 spin_lock(&log->l_icloglock); in _xfs_log_force_lsn()
3464 spin_unlock(&log->l_icloglock); in _xfs_log_force_lsn()
3468 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); in _xfs_log_force_lsn()
3477 spin_unlock(&log->l_icloglock); in _xfs_log_force_lsn()
3481 } while (iclog != log->l_iclog); in _xfs_log_force_lsn()
3483 spin_unlock(&log->l_icloglock); in _xfs_log_force_lsn()
3508 struct xlog *log, in xlog_state_want_sync() argument
3511 assert_spin_locked(&log->l_icloglock); in xlog_state_want_sync()
3514 xlog_state_switch_iclogs(log, iclog, 0); in xlog_state_want_sync()
3559 struct xlog *log = mp->m_log; in xfs_log_calc_unit_res() local
3618 iclog_space = log->l_iclog_size - log->l_iclog_hsize; in xfs_log_calc_unit_res()
3630 unit_bytes += log->l_iclog_hsize * num_headers; in xfs_log_calc_unit_res()
3633 unit_bytes += log->l_iclog_hsize; in xfs_log_calc_unit_res()
3652 struct xlog *log, in xlog_ticket_alloc() argument
3666 unit_res = xfs_log_calc_unit_res(log->l_mp, unit_bytes); in xlog_ticket_alloc()
3701 struct xlog *log, in xlog_verify_dest_ptr() argument
3707 for (i = 0; i < log->l_iclog_bufs; i++) { in xlog_verify_dest_ptr()
3708 if (ptr >= log->l_iclog_bak[i] && in xlog_verify_dest_ptr()
3709 ptr <= log->l_iclog_bak[i] + log->l_iclog_size) in xlog_verify_dest_ptr()
3714 xfs_emerg(log->l_mp, "%s: invalid ptr", __func__); in xlog_verify_dest_ptr()
3730 struct xlog *log) in xlog_verify_grant_tail() argument
3735 xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space); in xlog_verify_grant_tail()
3736 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks); in xlog_verify_grant_tail()
3739 !(log->l_flags & XLOG_TAIL_WARN)) { in xlog_verify_grant_tail()
3740 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, in xlog_verify_grant_tail()
3742 log->l_flags |= XLOG_TAIL_WARN; in xlog_verify_grant_tail()
3746 !(log->l_flags & XLOG_TAIL_WARN)) { in xlog_verify_grant_tail()
3747 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, in xlog_verify_grant_tail()
3749 log->l_flags |= XLOG_TAIL_WARN; in xlog_verify_grant_tail()
3757 struct xlog *log, in xlog_verify_tail_lsn() argument
3763 if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) { in xlog_verify_tail_lsn()
3765 log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn)); in xlog_verify_tail_lsn()
3766 if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize)) in xlog_verify_tail_lsn()
3767 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); in xlog_verify_tail_lsn()
3769 ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle); in xlog_verify_tail_lsn()
3771 if (BLOCK_LSN(tail_lsn) == log->l_prev_block) in xlog_verify_tail_lsn()
3772 xfs_emerg(log->l_mp, "%s: tail wrapped", __func__); in xlog_verify_tail_lsn()
3774 blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block; in xlog_verify_tail_lsn()
3776 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); in xlog_verify_tail_lsn()
3797 struct xlog *log, in xlog_verify_iclog() argument
3812 spin_lock(&log->l_icloglock); in xlog_verify_iclog()
3813 icptr = log->l_iclog; in xlog_verify_iclog()
3814 for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next) in xlog_verify_iclog()
3817 if (icptr != log->l_iclog) in xlog_verify_iclog()
3818 xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__); in xlog_verify_iclog()
3819 spin_unlock(&log->l_icloglock); in xlog_verify_iclog()
3823 xfs_emerg(log->l_mp, "%s: invalid magic num", __func__); in xlog_verify_iclog()
3829 xfs_emerg(log->l_mp, "%s: unexpected magic num", in xlog_verify_iclog()
3859 xfs_warn(log->l_mp, in xlog_verify_iclog()
3890 struct xlog *log) in xlog_state_ioerror() argument
3894 iclog = log->l_iclog; in xlog_state_ioerror()
3934 struct xlog *log; in xfs_log_force_umount() local
3937 log = mp->m_log; in xfs_log_force_umount()
3943 if (!log || in xfs_log_force_umount()
3944 log->l_flags & XLOG_ACTIVE_RECOVERY) { in xfs_log_force_umount()
3955 if (logerror && log->l_iclog->ic_state & XLOG_STATE_IOERROR) { in xfs_log_force_umount()
3956 ASSERT(XLOG_FORCED_SHUTDOWN(log)); in xfs_log_force_umount()
3974 spin_lock(&log->l_icloglock); in xfs_log_force_umount()
3983 log->l_flags |= XLOG_IO_ERROR; in xfs_log_force_umount()
3984 retval = xlog_state_ioerror(log); in xfs_log_force_umount()
3985 spin_unlock(&log->l_icloglock); in xfs_log_force_umount()
3994 xlog_grant_head_wake_all(&log->l_reserve_head); in xfs_log_force_umount()
3995 xlog_grant_head_wake_all(&log->l_write_head); in xfs_log_force_umount()
4003 wake_up_all(&log->l_cilp->xc_commit_wait); in xfs_log_force_umount()
4004 xlog_state_do_callback(log, XFS_LI_ABORTED, NULL); in xfs_log_force_umount()
4010 spin_lock(&log->l_icloglock); in xfs_log_force_umount()
4011 iclog = log->l_iclog; in xfs_log_force_umount()
4015 } while (iclog != log->l_iclog); in xfs_log_force_umount()
4016 spin_unlock(&log->l_icloglock); in xfs_log_force_umount()
4025 struct xlog *log) in xlog_iclogs_empty() argument
4029 iclog = log->l_iclog; in xlog_iclogs_empty()
4037 } while (iclog != log->l_iclog); in xlog_iclogs_empty()
4050 struct xlog *log = mp->m_log; in xfs_log_check_lsn() local
4072 spin_lock(&log->l_icloglock); in xfs_log_check_lsn()
4077 log->l_curr_cycle, log->l_curr_block); in xfs_log_check_lsn()
4078 spin_unlock(&log->l_icloglock); in xfs_log_check_lsn()