• Home
  • Raw
  • Download

Lines Matching refs:log

29 	struct xlog		*log,
42 struct xlog *log,
46 struct xlog *log);
54 struct xlog *log,
62 struct xlog *log,
66 struct xlog *log,
71 struct xlog *log,
76 struct xlog *log,
80 struct xlog *log,
84 struct xlog *log,
90 struct xlog *log,
94 struct xlog *log);
97 struct xlog *log,
102 struct xlog *log,
114 struct xlog *log);
118 struct xlog *log, in xlog_grant_sub_space() argument
132 space += log->l_logsize; in xlog_grant_sub_space()
144 struct xlog *log, in xlog_grant_add_space() argument
157 tmp = log->l_logsize - space; in xlog_grant_add_space()
194 struct xlog *log, in xlog_ticket_reservation() argument
198 if (head == &log->l_write_head) { in xlog_ticket_reservation()
211 struct xlog *log, in xlog_grant_head_wake() argument
242 need_bytes = xlog_ticket_reservation(log, head, tic); in xlog_grant_head_wake()
245 xlog_grant_push_ail(log, need_bytes); in xlog_grant_head_wake()
250 trace_xfs_log_grant_wake_up(log, tic); in xlog_grant_head_wake()
260 struct xlog *log, in xlog_grant_head_wait() argument
269 if (XLOG_FORCED_SHUTDOWN(log)) in xlog_grant_head_wait()
271 xlog_grant_push_ail(log, need_bytes); in xlog_grant_head_wait()
276 XFS_STATS_INC(log->l_mp, xs_sleep_logspace); in xlog_grant_head_wait()
278 trace_xfs_log_grant_sleep(log, tic); in xlog_grant_head_wait()
280 trace_xfs_log_grant_wake(log, tic); in xlog_grant_head_wait()
283 if (XLOG_FORCED_SHUTDOWN(log)) in xlog_grant_head_wait()
285 } while (xlog_space_left(log, &head->grant) < need_bytes); in xlog_grant_head_wait()
313 struct xlog *log, in xlog_grant_head_check() argument
321 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); in xlog_grant_head_check()
329 *need_bytes = xlog_ticket_reservation(log, head, tic); in xlog_grant_head_check()
330 free_bytes = xlog_space_left(log, &head->grant); in xlog_grant_head_check()
333 if (!xlog_grant_head_wake(log, head, &free_bytes) || in xlog_grant_head_check()
335 error = xlog_grant_head_wait(log, head, tic, in xlog_grant_head_check()
341 error = xlog_grant_head_wait(log, head, tic, *need_bytes); in xlog_grant_head_check()
380 struct xlog *log = mp->m_log; in xfs_log_regrant() local
384 if (XLOG_FORCED_SHUTDOWN(log)) in xfs_log_regrant()
397 xlog_grant_push_ail(log, tic->t_unit_res); in xfs_log_regrant()
405 trace_xfs_log_regrant(log, tic); in xfs_log_regrant()
407 error = xlog_grant_head_check(log, &log->l_write_head, tic, in xfs_log_regrant()
412 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); in xfs_log_regrant()
413 trace_xfs_log_regrant_exit(log, tic); in xfs_log_regrant()
414 xlog_verify_grant_tail(log); in xfs_log_regrant()
445 struct xlog *log = mp->m_log; in xfs_log_reserve() local
452 if (XLOG_FORCED_SHUTDOWN(log)) in xfs_log_reserve()
458 tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent, 0); in xfs_log_reserve()
461 xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt in xfs_log_reserve()
464 trace_xfs_log_reserve(log, tic); in xfs_log_reserve()
466 error = xlog_grant_head_check(log, &log->l_reserve_head, tic, in xfs_log_reserve()
471 xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes); in xfs_log_reserve()
472 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); in xfs_log_reserve()
473 trace_xfs_log_reserve_exit(log, tic); in xfs_log_reserve()
474 xlog_verify_grant_tail(log); in xfs_log_reserve()
517 struct xlog *log = mp->m_log; in xfs_log_done() local
520 if (XLOG_FORCED_SHUTDOWN(log) || in xfs_log_done()
526 (xlog_commit_record(log, ticket, iclog, &lsn)))) { in xfs_log_done()
533 trace_xfs_log_done_nonperm(log, ticket); in xfs_log_done()
539 xlog_ungrant_log_space(log, ticket); in xfs_log_done()
541 trace_xfs_log_done_perm(log, ticket); in xfs_log_done()
543 xlog_regrant_reserve_log_space(log, ticket); in xfs_log_done()
829 struct xlog *log = mp->m_log; in xfs_log_write_unmount_record() local
856 error = xlog_write(log, &vec, tic, &lsn, NULL, flags); in xfs_log_write_unmount_record()
865 spin_lock(&log->l_icloglock); in xfs_log_write_unmount_record()
866 iclog = log->l_iclog; in xfs_log_write_unmount_record()
868 xlog_state_want_sync(log, iclog); in xfs_log_write_unmount_record()
869 spin_unlock(&log->l_icloglock); in xfs_log_write_unmount_record()
870 error = xlog_state_release_iclog(log, iclog); in xfs_log_write_unmount_record()
872 spin_lock(&log->l_icloglock); in xfs_log_write_unmount_record()
875 if (!XLOG_FORCED_SHUTDOWN(log)) { in xfs_log_write_unmount_record()
876 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); in xfs_log_write_unmount_record()
882 spin_unlock(&log->l_icloglock); in xfs_log_write_unmount_record()
887 trace_xfs_log_umount_write(log, tic); in xfs_log_write_unmount_record()
888 xlog_ungrant_log_space(log, tic); in xfs_log_write_unmount_record()
904 struct xlog *log = mp->m_log; in xfs_log_unmount_write() local
916 xfs_readonly_buftarg(log->l_targ)) { in xfs_log_unmount_write()
922 ASSERT(error || !(XLOG_FORCED_SHUTDOWN(log))); in xfs_log_unmount_write()
925 first_iclog = iclog = log->l_iclog; in xfs_log_unmount_write()
934 if (! (XLOG_FORCED_SHUTDOWN(log))) { in xfs_log_unmount_write()
950 spin_lock(&log->l_icloglock); in xfs_log_unmount_write()
951 iclog = log->l_iclog; in xfs_log_unmount_write()
954 xlog_state_want_sync(log, iclog); in xfs_log_unmount_write()
955 spin_unlock(&log->l_icloglock); in xfs_log_unmount_write()
956 error = xlog_state_release_iclog(log, iclog); in xfs_log_unmount_write()
958 spin_lock(&log->l_icloglock); in xfs_log_unmount_write()
965 &log->l_icloglock); in xfs_log_unmount_write()
967 spin_unlock(&log->l_icloglock); in xfs_log_unmount_write()
1051 struct xlog *log = mp->m_log; in xfs_log_space_wake() local
1054 if (XLOG_FORCED_SHUTDOWN(log)) in xfs_log_space_wake()
1057 if (!list_empty_careful(&log->l_write_head.waiters)) { in xfs_log_space_wake()
1058 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); in xfs_log_space_wake()
1060 spin_lock(&log->l_write_head.lock); in xfs_log_space_wake()
1061 free_bytes = xlog_space_left(log, &log->l_write_head.grant); in xfs_log_space_wake()
1062 xlog_grant_head_wake(log, &log->l_write_head, &free_bytes); in xfs_log_space_wake()
1063 spin_unlock(&log->l_write_head.lock); in xfs_log_space_wake()
1066 if (!list_empty_careful(&log->l_reserve_head.waiters)) { in xfs_log_space_wake()
1067 ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY)); in xfs_log_space_wake()
1069 spin_lock(&log->l_reserve_head.lock); in xfs_log_space_wake()
1070 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); in xfs_log_space_wake()
1071 xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes); in xfs_log_space_wake()
1072 spin_unlock(&log->l_reserve_head.lock); in xfs_log_space_wake()
1096 struct xlog *log = mp->m_log; in xfs_log_need_covered() local
1102 if (!xlog_cil_empty(log)) in xfs_log_need_covered()
1105 spin_lock(&log->l_icloglock); in xfs_log_need_covered()
1106 switch (log->l_covered_state) { in xfs_log_need_covered()
1113 if (xfs_ail_min_lsn(log->l_ailp)) in xfs_log_need_covered()
1115 if (!xlog_iclogs_empty(log)) in xfs_log_need_covered()
1119 if (log->l_covered_state == XLOG_STATE_COVER_NEED) in xfs_log_need_covered()
1120 log->l_covered_state = XLOG_STATE_COVER_DONE; in xfs_log_need_covered()
1122 log->l_covered_state = XLOG_STATE_COVER_DONE2; in xfs_log_need_covered()
1128 spin_unlock(&log->l_icloglock); in xfs_log_need_covered()
1139 struct xlog *log = mp->m_log; in xlog_assign_tail_lsn_locked() local
1154 tail_lsn = atomic64_read(&log->l_last_sync_lsn); in xlog_assign_tail_lsn_locked()
1155 trace_xfs_log_assign_tail_lsn(log, tail_lsn); in xlog_assign_tail_lsn_locked()
1156 atomic64_set(&log->l_tail_lsn, tail_lsn); in xlog_assign_tail_lsn_locked()
1189 struct xlog *log, in xlog_space_left() argument
1199 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes); in xlog_space_left()
1202 free_bytes = log->l_logsize - (head_bytes - tail_bytes); in xlog_space_left()
1214 xfs_alert(log->l_mp, "xlog_space_left: head behind tail"); in xlog_space_left()
1215 xfs_alert(log->l_mp, in xlog_space_left()
1218 xfs_alert(log->l_mp, in xlog_space_left()
1222 free_bytes = log->l_logsize; in xlog_space_left()
1234 struct xlog *log = iclog->ic_log; in xlog_ioend_work() local
1248 if (XFS_TEST_ERROR(error, log->l_mp, XFS_ERRTAG_IODONE_IOERR)) { in xlog_ioend_work()
1249 xfs_alert(log->l_mp, "log I/O error %d", error); in xlog_ioend_work()
1250 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); in xlog_ioend_work()
1284 struct xlog *log) in xlog_get_iclog_buffer_size() argument
1291 log->l_iclog_bufs = mp->m_logbufs; in xlog_get_iclog_buffer_size()
1292 log->l_iclog_size = mp->m_logbsize; in xlog_get_iclog_buffer_size()
1297 log->l_iclog_heads = in xlog_get_iclog_buffer_size()
1299 log->l_iclog_hsize = log->l_iclog_heads << BBSHIFT; in xlog_get_iclog_buffer_size()
1319 struct xlog *log = container_of(to_delayed_work(work), in xfs_log_worker() local
1321 struct xfs_mount *mp = log->l_mp; in xfs_log_worker()
1359 struct xlog *log; in xlog_alloc_log() local
1367 log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL); in xlog_alloc_log()
1368 if (!log) { in xlog_alloc_log()
1373 log->l_mp = mp; in xlog_alloc_log()
1374 log->l_targ = log_target; in xlog_alloc_log()
1375 log->l_logsize = BBTOB(num_bblks); in xlog_alloc_log()
1376 log->l_logBBstart = blk_offset; in xlog_alloc_log()
1377 log->l_logBBsize = num_bblks; in xlog_alloc_log()
1378 log->l_covered_state = XLOG_STATE_COVER_IDLE; in xlog_alloc_log()
1379 log->l_flags |= XLOG_ACTIVE_RECOVERY; in xlog_alloc_log()
1380 INIT_DELAYED_WORK(&log->l_work, xfs_log_worker); in xlog_alloc_log()
1382 log->l_prev_block = -1; in xlog_alloc_log()
1384 xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0); in xlog_alloc_log()
1385 xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0); in xlog_alloc_log()
1386 log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ in xlog_alloc_log()
1388 xlog_grant_head_init(&log->l_reserve_head); in xlog_alloc_log()
1389 xlog_grant_head_init(&log->l_write_head); in xlog_alloc_log()
1408 if (log2_size && log->l_logBBstart > 0 && in xlog_alloc_log()
1416 log->l_sectBBsize = 1 << log2_size; in xlog_alloc_log()
1418 xlog_get_iclog_buffer_size(mp, log); in xlog_alloc_log()
1420 spin_lock_init(&log->l_icloglock); in xlog_alloc_log()
1421 init_waitqueue_head(&log->l_flush_wait); in xlog_alloc_log()
1423 iclogp = &log->l_iclog; in xlog_alloc_log()
1431 ASSERT(log->l_iclog_size >= 4096); in xlog_alloc_log()
1432 for (i = 0; i < log->l_iclog_bufs; i++) { in xlog_alloc_log()
1434 size_t bvec_size = howmany(log->l_iclog_size, PAGE_SIZE) * in xlog_alloc_log()
1445 iclog->ic_data = kmem_alloc_io(log->l_iclog_size, align_mask, in xlog_alloc_log()
1450 log->l_iclog_bak[i] = &iclog->ic_header; in xlog_alloc_log()
1456 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1); in xlog_alloc_log()
1457 head->h_size = cpu_to_be32(log->l_iclog_size); in xlog_alloc_log()
1462 iclog->ic_size = log->l_iclog_size - log->l_iclog_hsize; in xlog_alloc_log()
1464 iclog->ic_log = log; in xlog_alloc_log()
1468 iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize; in xlog_alloc_log()
1477 *iclogp = log->l_iclog; /* complete ring */ in xlog_alloc_log()
1478 log->l_iclog->ic_prev = prev_iclog; /* re-write 1st prev ptr */ in xlog_alloc_log()
1480 log->l_ioend_workqueue = alloc_workqueue("xfs-log/%s", in xlog_alloc_log()
1483 if (!log->l_ioend_workqueue) in xlog_alloc_log()
1486 error = xlog_cil_init(log); in xlog_alloc_log()
1489 return log; in xlog_alloc_log()
1492 destroy_workqueue(log->l_ioend_workqueue); in xlog_alloc_log()
1494 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) { in xlog_alloc_log()
1498 if (prev_iclog == log->l_iclog) in xlog_alloc_log()
1502 kmem_free(log); in xlog_alloc_log()
1514 struct xlog *log, in xlog_commit_record() argument
1519 struct xfs_mount *mp = log->l_mp; in xlog_commit_record()
1532 error = xlog_write(log, &vec, ticket, commitlsnp, iclog, in xlog_commit_record()
1548 struct xlog *log, in xlog_grant_push_ail() argument
1559 ASSERT(BTOBB(need_bytes) < log->l_logBBsize); in xlog_grant_push_ail()
1561 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); in xlog_grant_push_ail()
1570 free_threshold = max(free_threshold, (log->l_logBBsize >> 2)); in xlog_grant_push_ail()
1575 xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle, in xlog_grant_push_ail()
1578 if (threshold_block >= log->l_logBBsize) { in xlog_grant_push_ail()
1579 threshold_block -= log->l_logBBsize; in xlog_grant_push_ail()
1589 last_sync_lsn = atomic64_read(&log->l_last_sync_lsn); in xlog_grant_push_ail()
1598 if (!XLOG_FORCED_SHUTDOWN(log)) in xlog_grant_push_ail()
1599 xfs_ail_push(log->l_ailp, threshold_lsn); in xlog_grant_push_ail()
1607 struct xlog *log, in xlog_pack_data() argument
1627 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { in xlog_pack_data()
1638 for (i = 1; i < log->l_iclog_heads; i++) in xlog_pack_data()
1651 struct xlog *log, in xlog_cksum() argument
1664 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) { in xlog_cksum()
1715 struct xlog *log, in xlog_write_iclog() argument
1721 ASSERT(bno < log->l_logBBsize); in xlog_write_iclog()
1748 bio_set_dev(&iclog->ic_bio, log->l_targ->bt_bdev); in xlog_write_iclog()
1749 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno; in xlog_write_iclog()
1764 if (bno + BTOBB(count) > log->l_logBBsize) { in xlog_write_iclog()
1767 split = bio_split(&iclog->ic_bio, log->l_logBBsize - bno, in xlog_write_iclog()
1773 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart; in xlog_write_iclog()
1786 struct xlog *log, in xlog_split_iclog() argument
1791 unsigned int split_offset = BBTOB(log->l_logBBsize - bno); in xlog_split_iclog()
1805 struct xlog *log, in xlog_calc_iclog_size() argument
1812 use_lsunit = xfs_sb_version_haslogv2(&log->l_mp->m_sb) && in xlog_calc_iclog_size()
1813 log->l_mp->m_sb.sb_logsunit > 1; in xlog_calc_iclog_size()
1816 count_init = log->l_iclog_hsize + iclog->ic_offset; in xlog_calc_iclog_size()
1821 count = XLOG_LSUNITTOB(log, XLOG_BTOLSUNIT(log, count_init)); in xlog_calc_iclog_size()
1830 ASSERT(*roundoff < log->l_mp->m_sb.sb_logsunit); in xlog_calc_iclog_size()
1861 struct xlog *log, in xlog_sync() argument
1872 count = xlog_calc_iclog_size(log, iclog, &roundoff); in xlog_sync()
1875 xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff); in xlog_sync()
1876 xlog_grant_add_space(log, &log->l_write_head.grant, roundoff); in xlog_sync()
1879 xlog_pack_data(log, iclog, roundoff); in xlog_sync()
1883 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) in xlog_sync()
1887 XFS_STATS_INC(log->l_mp, xs_log_writes); in xlog_sync()
1888 XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count)); in xlog_sync()
1893 if (bno + BTOBB(count) > log->l_logBBsize) { in xlog_sync()
1894 xlog_split_iclog(log, &iclog->ic_header, bno, count); in xlog_sync()
1899 iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header, in xlog_sync()
1909 if (XFS_TEST_ERROR(false, log->l_mp, XFS_ERRTAG_LOG_BAD_CRC)) { in xlog_sync()
1912 xfs_warn(log->l_mp, in xlog_sync()
1926 if (log->l_targ != log->l_mp->m_ddev_targp || split) { in xlog_sync()
1927 xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp); in xlog_sync()
1931 xlog_verify_iclog(log, iclog, count); in xlog_sync()
1932 xlog_write_iclog(log, iclog, bno, count, need_flush); in xlog_sync()
1940 struct xlog *log) in xlog_dealloc_log() argument
1945 xlog_cil_destroy(log); in xlog_dealloc_log()
1951 iclog = log->l_iclog; in xlog_dealloc_log()
1952 for (i = 0; i < log->l_iclog_bufs; i++) { in xlog_dealloc_log()
1958 iclog = log->l_iclog; in xlog_dealloc_log()
1959 for (i = 0; i < log->l_iclog_bufs; i++) { in xlog_dealloc_log()
1966 log->l_mp->m_log = NULL; in xlog_dealloc_log()
1967 destroy_workqueue(log->l_ioend_workqueue); in xlog_dealloc_log()
1968 kmem_free(log); in xlog_dealloc_log()
1977 struct xlog *log, in xlog_state_finish_copy() argument
1982 spin_lock(&log->l_icloglock); in xlog_state_finish_copy()
1987 spin_unlock(&log->l_icloglock); in xlog_state_finish_copy()
2175 struct xlog *log, in xlog_write_setup_ophdr() argument
2198 xfs_warn(log->l_mp, in xlog_write_setup_ophdr()
2258 struct xlog *log, in xlog_write_copy_finish() argument
2273 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); in xlog_write_copy_finish()
2276 return xlog_state_release_iclog(log, iclog); in xlog_write_copy_finish()
2284 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); in xlog_write_copy_finish()
2288 spin_lock(&log->l_icloglock); in xlog_write_copy_finish()
2289 xlog_state_want_sync(log, iclog); in xlog_write_copy_finish()
2290 spin_unlock(&log->l_icloglock); in xlog_write_copy_finish()
2293 return xlog_state_release_iclog(log, iclog); in xlog_write_copy_finish()
2343 struct xlog *log, in xlog_write() argument
2382 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, in xlog_write()
2384 xlog_print_tic_res(log->l_mp, ticket); in xlog_write()
2385 xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR); in xlog_write()
2395 error = xlog_state_get_iclog_space(log, len, &iclog, ticket, in xlog_write()
2437 ophdr = xlog_write_setup_ophdr(log, ptr, ticket, flags); in xlog_write()
2450 xlog_verify_dest_ptr(log, ptr); in xlog_write()
2470 error = xlog_write_copy_finish(log, iclog, flags, in xlog_write()
2511 xlog_state_finish_copy(log, iclog, record_cnt, data_cnt); in xlog_write()
2513 return xlog_state_release_iclog(log, iclog); in xlog_write()
2546 struct xlog *log, in xlog_state_clean_iclog() argument
2557 iclog = log->l_iclog; in xlog_state_clean_iclog()
2592 } while (iclog != log->l_iclog); in xlog_state_clean_iclog()
2609 switch (log->l_covered_state) { in xlog_state_clean_iclog()
2613 log->l_covered_state = XLOG_STATE_COVER_NEED; in xlog_state_clean_iclog()
2618 log->l_covered_state = XLOG_STATE_COVER_NEED2; in xlog_state_clean_iclog()
2620 log->l_covered_state = XLOG_STATE_COVER_NEED; in xlog_state_clean_iclog()
2625 log->l_covered_state = XLOG_STATE_COVER_IDLE; in xlog_state_clean_iclog()
2627 log->l_covered_state = XLOG_STATE_COVER_NEED; in xlog_state_clean_iclog()
2638 struct xlog *log) in xlog_get_lowest_lsn() argument
2640 struct xlog_in_core *iclog = log->l_iclog; in xlog_get_lowest_lsn()
2650 } while ((iclog = iclog->ic_next) != log->l_iclog); in xlog_get_lowest_lsn()
2679 struct xlog *log, in xlog_state_set_callback() argument
2685 ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn), in xlog_state_set_callback()
2691 atomic64_set(&log->l_last_sync_lsn, header_lsn); in xlog_state_set_callback()
2692 xlog_grant_push_ail(log, 0); in xlog_state_set_callback()
2702 struct xlog *log, in xlog_state_iodone_process_iclog() argument
2752 lowest_lsn = xlog_get_lowest_lsn(log); in xlog_state_iodone_process_iclog()
2756 xlog_state_set_callback(log, iclog, header_lsn); in xlog_state_iodone_process_iclog()
2772 struct xlog *log, in xlog_state_do_iclog_callbacks() argument
2776 spin_unlock(&log->l_icloglock); in xlog_state_do_iclog_callbacks()
2793 spin_lock(&log->l_icloglock); in xlog_state_do_iclog_callbacks()
2811 struct xlog *log) in xlog_state_callback_check_state() argument
2813 struct xlog_in_core *first_iclog = log->l_iclog; in xlog_state_callback_check_state()
2841 struct xlog *log, in xlog_state_do_callback() argument
2853 spin_lock(&log->l_icloglock); in xlog_state_do_callback()
2863 first_iclog = log->l_iclog; in xlog_state_do_callback()
2864 iclog = log->l_iclog; in xlog_state_do_callback()
2870 if (xlog_state_iodone_process_iclog(log, iclog, in xlog_state_do_callback()
2885 xlog_state_do_iclog_callbacks(log, iclog, aborted); in xlog_state_do_callback()
2887 xlog_state_clean_iclog(log, iclog); in xlog_state_do_callback()
2896 xfs_warn(log->l_mp, in xlog_state_do_callback()
2903 xlog_state_callback_check_state(log); in xlog_state_do_callback()
2905 if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR)) in xlog_state_do_callback()
2906 wake_up_all(&log->l_flush_wait); in xlog_state_do_callback()
2908 spin_unlock(&log->l_icloglock); in xlog_state_do_callback()
2930 struct xlog *log = iclog->ic_log; in xlog_state_done_syncing() local
2932 spin_lock(&log->l_icloglock); in xlog_state_done_syncing()
2953 spin_unlock(&log->l_icloglock); in xlog_state_done_syncing()
2954 xlog_state_do_callback(log, aborted, iclog); /* also cleans log */ in xlog_state_done_syncing()
2978 struct xlog *log, in xlog_state_get_iclog_space() argument
2991 spin_lock(&log->l_icloglock); in xlog_state_get_iclog_space()
2992 if (XLOG_FORCED_SHUTDOWN(log)) { in xlog_state_get_iclog_space()
2993 spin_unlock(&log->l_icloglock); in xlog_state_get_iclog_space()
2997 iclog = log->l_iclog; in xlog_state_get_iclog_space()
2999 XFS_STATS_INC(log->l_mp, xs_log_noiclogs); in xlog_state_get_iclog_space()
3002 xlog_wait(&log->l_flush_wait, &log->l_icloglock); in xlog_state_get_iclog_space()
3017 ticket->t_curr_res -= log->l_iclog_hsize; in xlog_state_get_iclog_space()
3019 log->l_iclog_hsize, in xlog_state_get_iclog_space()
3021 head->h_cycle = cpu_to_be32(log->l_curr_cycle); in xlog_state_get_iclog_space()
3023 xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block)); in xlog_state_get_iclog_space()
3024 ASSERT(log->l_curr_block >= 0); in xlog_state_get_iclog_space()
3037 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); in xlog_state_get_iclog_space()
3048 spin_unlock(&log->l_icloglock); in xlog_state_get_iclog_space()
3049 error = xlog_state_release_iclog(log, iclog); in xlog_state_get_iclog_space()
3053 spin_unlock(&log->l_icloglock); in xlog_state_get_iclog_space()
3069 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); in xlog_state_get_iclog_space()
3074 spin_unlock(&log->l_icloglock); in xlog_state_get_iclog_space()
3089 struct xlog *log, in xlog_regrant_reserve_log_space() argument
3092 trace_xfs_log_regrant_reserve_enter(log, ticket); in xlog_regrant_reserve_log_space()
3097 xlog_grant_sub_space(log, &log->l_reserve_head.grant, in xlog_regrant_reserve_log_space()
3099 xlog_grant_sub_space(log, &log->l_write_head.grant, in xlog_regrant_reserve_log_space()
3104 trace_xfs_log_regrant_reserve_sub(log, ticket); in xlog_regrant_reserve_log_space()
3110 xlog_grant_add_space(log, &log->l_reserve_head.grant, in xlog_regrant_reserve_log_space()
3113 trace_xfs_log_regrant_reserve_exit(log, ticket); in xlog_regrant_reserve_log_space()
3136 struct xlog *log, in xlog_ungrant_log_space() argument
3144 trace_xfs_log_ungrant_enter(log, ticket); in xlog_ungrant_log_space()
3145 trace_xfs_log_ungrant_sub(log, ticket); in xlog_ungrant_log_space()
3157 xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes); in xlog_ungrant_log_space()
3158 xlog_grant_sub_space(log, &log->l_write_head.grant, bytes); in xlog_ungrant_log_space()
3160 trace_xfs_log_ungrant_exit(log, ticket); in xlog_ungrant_log_space()
3162 xfs_log_space_wake(log->l_mp); in xlog_ungrant_log_space()
3176 struct xlog *log, in xlog_state_release_iclog() argument
3185 if (!atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock)) in xlog_state_release_iclog()
3189 spin_unlock(&log->l_icloglock); in xlog_state_release_iclog()
3197 xfs_lsn_t tail_lsn = xlog_assign_tail_lsn(log->l_mp); in xlog_state_release_iclog()
3201 xlog_verify_tail_lsn(log, iclog, tail_lsn); in xlog_state_release_iclog()
3204 spin_unlock(&log->l_icloglock); in xlog_state_release_iclog()
3214 xlog_sync(log, iclog); in xlog_state_release_iclog()
3228 struct xlog *log, in xlog_state_switch_iclogs() argument
3236 iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block); in xlog_state_switch_iclogs()
3237 log->l_prev_block = log->l_curr_block; in xlog_state_switch_iclogs()
3238 log->l_prev_cycle = log->l_curr_cycle; in xlog_state_switch_iclogs()
3241 log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize); in xlog_state_switch_iclogs()
3244 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) && in xlog_state_switch_iclogs()
3245 log->l_mp->m_sb.sb_logsunit > 1) { in xlog_state_switch_iclogs()
3246 uint32_t sunit_bb = BTOBB(log->l_mp->m_sb.sb_logsunit); in xlog_state_switch_iclogs()
3247 log->l_curr_block = roundup(log->l_curr_block, sunit_bb); in xlog_state_switch_iclogs()
3250 if (log->l_curr_block >= log->l_logBBsize) { in xlog_state_switch_iclogs()
3258 log->l_curr_block -= log->l_logBBsize; in xlog_state_switch_iclogs()
3259 ASSERT(log->l_curr_block >= 0); in xlog_state_switch_iclogs()
3261 log->l_curr_cycle++; in xlog_state_switch_iclogs()
3262 if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM) in xlog_state_switch_iclogs()
3263 log->l_curr_cycle++; in xlog_state_switch_iclogs()
3265 ASSERT(iclog == log->l_iclog); in xlog_state_switch_iclogs()
3266 log->l_iclog = iclog->ic_next; in xlog_state_switch_iclogs()
3301 struct xlog *log = mp->m_log; in xfs_log_force() local
3308 xlog_cil_force(log); in xfs_log_force()
3310 spin_lock(&log->l_icloglock); in xfs_log_force()
3311 iclog = log->l_iclog; in xfs_log_force()
3341 xlog_state_switch_iclogs(log, iclog, 0); in xfs_log_force()
3342 spin_unlock(&log->l_icloglock); in xfs_log_force()
3344 if (xlog_state_release_iclog(log, iclog)) in xfs_log_force()
3347 spin_lock(&log->l_icloglock); in xfs_log_force()
3359 xlog_state_switch_iclogs(log, iclog, 0); in xfs_log_force()
3375 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); in xfs_log_force()
3381 spin_unlock(&log->l_icloglock); in xfs_log_force()
3384 spin_unlock(&log->l_icloglock); in xfs_log_force()
3396 struct xlog *log = mp->m_log; in __xfs_log_force_lsn() local
3399 spin_lock(&log->l_icloglock); in __xfs_log_force_lsn()
3400 iclog = log->l_iclog; in __xfs_log_force_lsn()
3406 if (iclog == log->l_iclog) in __xfs_log_force_lsn()
3437 &log->l_icloglock); in __xfs_log_force_lsn()
3441 xlog_state_switch_iclogs(log, iclog, 0); in __xfs_log_force_lsn()
3442 spin_unlock(&log->l_icloglock); in __xfs_log_force_lsn()
3443 if (xlog_state_release_iclog(log, iclog)) in __xfs_log_force_lsn()
3447 spin_lock(&log->l_icloglock); in __xfs_log_force_lsn()
3458 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); in __xfs_log_force_lsn()
3464 spin_unlock(&log->l_icloglock); in __xfs_log_force_lsn()
3467 spin_unlock(&log->l_icloglock); in __xfs_log_force_lsn()
3514 struct xlog *log, in xlog_state_want_sync() argument
3517 assert_spin_locked(&log->l_icloglock); in xlog_state_want_sync()
3520 xlog_state_switch_iclogs(log, iclog, 0); in xlog_state_want_sync()
3565 struct xlog *log = mp->m_log; in xfs_log_calc_unit_res() local
3624 iclog_space = log->l_iclog_size - log->l_iclog_hsize; in xfs_log_calc_unit_res()
3636 unit_bytes += log->l_iclog_hsize * num_headers; in xfs_log_calc_unit_res()
3639 unit_bytes += log->l_iclog_hsize; in xfs_log_calc_unit_res()
3658 struct xlog *log, in xlog_ticket_alloc() argument
3672 unit_res = xfs_log_calc_unit_res(log->l_mp, unit_bytes); in xlog_ticket_alloc()
3707 struct xlog *log, in xlog_verify_dest_ptr() argument
3713 for (i = 0; i < log->l_iclog_bufs; i++) { in xlog_verify_dest_ptr()
3714 if (ptr >= log->l_iclog_bak[i] && in xlog_verify_dest_ptr()
3715 ptr <= log->l_iclog_bak[i] + log->l_iclog_size) in xlog_verify_dest_ptr()
3720 xfs_emerg(log->l_mp, "%s: invalid ptr", __func__); in xlog_verify_dest_ptr()
3736 struct xlog *log) in xlog_verify_grant_tail() argument
3741 xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space); in xlog_verify_grant_tail()
3742 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks); in xlog_verify_grant_tail()
3745 !(log->l_flags & XLOG_TAIL_WARN)) { in xlog_verify_grant_tail()
3746 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, in xlog_verify_grant_tail()
3748 log->l_flags |= XLOG_TAIL_WARN; in xlog_verify_grant_tail()
3752 !(log->l_flags & XLOG_TAIL_WARN)) { in xlog_verify_grant_tail()
3753 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, in xlog_verify_grant_tail()
3755 log->l_flags |= XLOG_TAIL_WARN; in xlog_verify_grant_tail()
3763 struct xlog *log, in xlog_verify_tail_lsn() argument
3769 if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) { in xlog_verify_tail_lsn()
3771 log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn)); in xlog_verify_tail_lsn()
3772 if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize)) in xlog_verify_tail_lsn()
3773 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); in xlog_verify_tail_lsn()
3775 ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle); in xlog_verify_tail_lsn()
3777 if (BLOCK_LSN(tail_lsn) == log->l_prev_block) in xlog_verify_tail_lsn()
3778 xfs_emerg(log->l_mp, "%s: tail wrapped", __func__); in xlog_verify_tail_lsn()
3780 blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block; in xlog_verify_tail_lsn()
3782 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); in xlog_verify_tail_lsn()
3803 struct xlog *log, in xlog_verify_iclog() argument
3817 spin_lock(&log->l_icloglock); in xlog_verify_iclog()
3818 icptr = log->l_iclog; in xlog_verify_iclog()
3819 for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next) in xlog_verify_iclog()
3822 if (icptr != log->l_iclog) in xlog_verify_iclog()
3823 xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__); in xlog_verify_iclog()
3824 spin_unlock(&log->l_icloglock); in xlog_verify_iclog()
3828 xfs_emerg(log->l_mp, "%s: invalid magic num", __func__); in xlog_verify_iclog()
3834 xfs_emerg(log->l_mp, "%s: unexpected magic num", in xlog_verify_iclog()
3864 xfs_warn(log->l_mp, in xlog_verify_iclog()
3895 struct xlog *log) in xlog_state_ioerror() argument
3899 iclog = log->l_iclog; in xlog_state_ioerror()
3939 struct xlog *log; in xfs_log_force_umount() local
3942 log = mp->m_log; in xfs_log_force_umount()
3948 if (!log || in xfs_log_force_umount()
3949 log->l_flags & XLOG_ACTIVE_RECOVERY) { in xfs_log_force_umount()
3960 if (logerror && log->l_iclog->ic_state & XLOG_STATE_IOERROR) { in xfs_log_force_umount()
3961 ASSERT(XLOG_FORCED_SHUTDOWN(log)); in xfs_log_force_umount()
3979 spin_lock(&log->l_icloglock); in xfs_log_force_umount()
3988 log->l_flags |= XLOG_IO_ERROR; in xfs_log_force_umount()
3989 retval = xlog_state_ioerror(log); in xfs_log_force_umount()
3990 spin_unlock(&log->l_icloglock); in xfs_log_force_umount()
3999 xlog_grant_head_wake_all(&log->l_reserve_head); in xfs_log_force_umount()
4000 xlog_grant_head_wake_all(&log->l_write_head); in xfs_log_force_umount()
4008 spin_lock(&log->l_cilp->xc_push_lock); in xfs_log_force_umount()
4009 wake_up_all(&log->l_cilp->xc_commit_wait); in xfs_log_force_umount()
4010 spin_unlock(&log->l_cilp->xc_push_lock); in xfs_log_force_umount()
4011 xlog_state_do_callback(log, true, NULL); in xfs_log_force_umount()
4017 spin_lock(&log->l_icloglock); in xfs_log_force_umount()
4018 iclog = log->l_iclog; in xfs_log_force_umount()
4022 } while (iclog != log->l_iclog); in xfs_log_force_umount()
4023 spin_unlock(&log->l_icloglock); in xfs_log_force_umount()
4032 struct xlog *log) in xlog_iclogs_empty() argument
4036 iclog = log->l_iclog; in xlog_iclogs_empty()
4044 } while (iclog != log->l_iclog); in xlog_iclogs_empty()
4057 struct xlog *log = mp->m_log; in xfs_log_check_lsn() local
4079 spin_lock(&log->l_icloglock); in xfs_log_check_lsn()
4084 log->l_curr_cycle, log->l_curr_block); in xfs_log_check_lsn()
4085 spin_unlock(&log->l_icloglock); in xfs_log_check_lsn()
4095 struct xlog *log = mp->m_log; in xfs_log_in_recovery() local
4097 return log->l_flags & XLOG_ACTIVE_RECOVERY; in xfs_log_in_recovery()