Lines Matching refs:log

35 	struct xlog		*log,
39 struct xlog *log);
45 struct xlog *log);
48 struct xlog *log,
55 struct xlog *log,
59 struct xlog *log,
65 struct xlog *log);
68 struct xlog *log,
73 struct xlog *log,
83 struct xlog *log);
146 struct xlog *log, in xlog_grant_sub_space() argument
160 space += log->l_logsize; in xlog_grant_sub_space()
172 struct xlog *log, in xlog_grant_add_space() argument
185 tmp = log->l_logsize - space; in xlog_grant_add_space()
222 struct xlog *log, in xlog_ticket_reservation() argument
226 if (head == &log->l_write_head) { in xlog_ticket_reservation()
239 struct xlog *log, in xlog_grant_head_wake() argument
270 need_bytes = xlog_ticket_reservation(log, head, tic); in xlog_grant_head_wake()
273 xlog_grant_push_ail(log, need_bytes); in xlog_grant_head_wake()
278 trace_xfs_log_grant_wake_up(log, tic); in xlog_grant_head_wake()
288 struct xlog *log, in xlog_grant_head_wait() argument
297 if (xlog_is_shutdown(log)) in xlog_grant_head_wait()
299 xlog_grant_push_ail(log, need_bytes); in xlog_grant_head_wait()
304 XFS_STATS_INC(log->l_mp, xs_sleep_logspace); in xlog_grant_head_wait()
306 trace_xfs_log_grant_sleep(log, tic); in xlog_grant_head_wait()
308 trace_xfs_log_grant_wake(log, tic); in xlog_grant_head_wait()
311 if (xlog_is_shutdown(log)) in xlog_grant_head_wait()
313 } while (xlog_space_left(log, &head->grant) < need_bytes); in xlog_grant_head_wait()
341 struct xlog *log, in xlog_grant_head_check() argument
349 ASSERT(!xlog_in_recovery(log)); in xlog_grant_head_check()
357 *need_bytes = xlog_ticket_reservation(log, head, tic); in xlog_grant_head_check()
358 free_bytes = xlog_space_left(log, &head->grant); in xlog_grant_head_check()
361 if (!xlog_grant_head_wake(log, head, &free_bytes) || in xlog_grant_head_check()
363 error = xlog_grant_head_wait(log, head, tic, in xlog_grant_head_check()
369 error = xlog_grant_head_wait(log, head, tic, *need_bytes); in xlog_grant_head_check()
405 struct xlog *log = mp->m_log; in xfs_log_regrant() local
409 if (xlog_is_shutdown(log)) in xfs_log_regrant()
422 xlog_grant_push_ail(log, tic->t_unit_res); in xfs_log_regrant()
428 trace_xfs_log_regrant(log, tic); in xfs_log_regrant()
430 error = xlog_grant_head_check(log, &log->l_write_head, tic, in xfs_log_regrant()
435 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); in xfs_log_regrant()
436 trace_xfs_log_regrant_exit(log, tic); in xfs_log_regrant()
437 xlog_verify_grant_tail(log); in xfs_log_regrant()
467 struct xlog *log = mp->m_log; in xfs_log_reserve() local
472 if (xlog_is_shutdown(log)) in xfs_log_reserve()
478 tic = xlog_ticket_alloc(log, unit_bytes, cnt, permanent); in xfs_log_reserve()
481 xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt in xfs_log_reserve()
484 trace_xfs_log_reserve(log, tic); in xfs_log_reserve()
486 error = xlog_grant_head_check(log, &log->l_reserve_head, tic, in xfs_log_reserve()
491 xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes); in xfs_log_reserve()
492 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes); in xfs_log_reserve()
493 trace_xfs_log_reserve_exit(log, tic); in xfs_log_reserve()
494 xlog_verify_grant_tail(log); in xfs_log_reserve()
524 struct xlog *log) in xlog_state_shutdown_callbacks() argument
529 iclog = log->l_iclog; in xlog_state_shutdown_callbacks()
536 spin_unlock(&log->l_icloglock); in xlog_state_shutdown_callbacks()
540 spin_lock(&log->l_icloglock); in xlog_state_shutdown_callbacks()
543 } while ((iclog = iclog->ic_next) != log->l_iclog); in xlog_state_shutdown_callbacks()
545 wake_up_all(&log->l_flush_wait); in xlog_state_shutdown_callbacks()
570 struct xlog *log, in xlog_state_release_iclog() argument
577 lockdep_assert_held(&log->l_icloglock); in xlog_state_release_iclog()
589 tail_lsn = xlog_assign_tail_lsn(log->l_mp); in xlog_state_release_iclog()
595 if (xlog_is_shutdown(log)) { in xlog_state_release_iclog()
602 xlog_state_shutdown_callbacks(log); in xlog_state_release_iclog()
615 xlog_verify_tail_lsn(log, iclog); in xlog_state_release_iclog()
618 spin_unlock(&log->l_icloglock); in xlog_state_release_iclog()
619 xlog_sync(log, iclog, ticket); in xlog_state_release_iclog()
620 spin_lock(&log->l_icloglock); in xlog_state_release_iclog()
641 struct xlog *log; in xfs_log_mount() local
658 log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks); in xfs_log_mount()
659 if (IS_ERR(log)) { in xfs_log_mount()
660 error = PTR_ERR(log); in xfs_log_mount()
663 mp->m_log = log; in xfs_log_mount()
728 log->l_ailp = mp->m_ail; in xfs_log_mount()
741 error = xlog_recover(log); in xfs_log_mount()
747 xlog_recover_cancel(log); in xfs_log_mount()
752 error = xfs_sysfs_init(&log->l_kobj, &xfs_log_ktype, &mp->m_kobj, in xfs_log_mount()
758 clear_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate); in xfs_log_mount()
765 xlog_cil_init_post_recovery(log); in xfs_log_mount()
772 xlog_dealloc_log(log); in xfs_log_mount()
791 struct xlog *log = mp->m_log; in xfs_log_mount_finish() local
828 if (xlog_recovery_needed(log)) in xfs_log_mount_finish()
829 error = xlog_recover_finish(log); in xfs_log_mount_finish()
842 if (xlog_recovery_needed(log)) { in xfs_log_mount_finish()
854 clear_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate); in xfs_log_mount_finish()
859 ASSERT(!error || xlog_is_shutdown(log)); in xfs_log_mount_finish()
896 xlog_wait_iclog_completion(struct xlog *log) in xlog_wait_iclog_completion() argument
899 struct xlog_in_core *iclog = log->l_iclog; in xlog_wait_iclog_completion()
901 for (i = 0; i < log->l_iclog_bufs; i++) { in xlog_wait_iclog_completion()
919 struct xlog *log = iclog->ic_log; in xlog_wait_on_iclog() local
922 if (!xlog_is_shutdown(log) && in xlog_wait_on_iclog()
925 XFS_STATS_INC(log->l_mp, xs_log_force_sleep); in xlog_wait_on_iclog()
926 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); in xlog_wait_on_iclog()
928 spin_unlock(&log->l_icloglock); in xlog_wait_on_iclog()
931 if (xlog_is_shutdown(log)) in xlog_wait_on_iclog()
943 struct xlog *log, in xlog_write_unmount_record() argument
978 return xlog_write(log, NULL, &lv_chain, ticket, reg.i_len); in xlog_write_unmount_record()
987 struct xlog *log) in xlog_unmount_write() argument
989 struct xfs_mount *mp = log->l_mp; in xlog_unmount_write()
998 error = xlog_write_unmount_record(log, tic); in xlog_unmount_write()
1007 spin_lock(&log->l_icloglock); in xlog_unmount_write()
1008 iclog = log->l_iclog; in xlog_unmount_write()
1013 trace_xfs_log_umount_write(log, tic); in xlog_unmount_write()
1014 xfs_log_ticket_ungrant(log, tic); in xlog_unmount_write()
1020 struct xlog *log) in xfs_log_unmount_verify_iclog() argument
1022 struct xlog_in_core *iclog = log->l_iclog; in xfs_log_unmount_verify_iclog()
1027 } while ((iclog = iclog->ic_next) != log->l_iclog); in xfs_log_unmount_verify_iclog()
1041 struct xlog *log = mp->m_log; in xfs_log_unmount_write() local
1048 if (xlog_is_shutdown(log)) in xfs_log_unmount_write()
1064 xfs_log_unmount_verify_iclog(log); in xfs_log_unmount_write()
1065 xlog_unmount_write(log); in xfs_log_unmount_write()
1176 struct xlog *log = mp->m_log; in xfs_log_space_wake() local
1179 if (xlog_is_shutdown(log)) in xfs_log_space_wake()
1182 if (!list_empty_careful(&log->l_write_head.waiters)) { in xfs_log_space_wake()
1183 ASSERT(!xlog_in_recovery(log)); in xfs_log_space_wake()
1185 spin_lock(&log->l_write_head.lock); in xfs_log_space_wake()
1186 free_bytes = xlog_space_left(log, &log->l_write_head.grant); in xfs_log_space_wake()
1187 xlog_grant_head_wake(log, &log->l_write_head, &free_bytes); in xfs_log_space_wake()
1188 spin_unlock(&log->l_write_head.lock); in xfs_log_space_wake()
1191 if (!list_empty_careful(&log->l_reserve_head.waiters)) { in xfs_log_space_wake()
1192 ASSERT(!xlog_in_recovery(log)); in xfs_log_space_wake()
1194 spin_lock(&log->l_reserve_head.lock); in xfs_log_space_wake()
1195 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); in xfs_log_space_wake()
1196 xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes); in xfs_log_space_wake()
1197 spin_unlock(&log->l_reserve_head.lock); in xfs_log_space_wake()
1222 struct xlog *log = mp->m_log; in xfs_log_need_covered() local
1225 if (!xlog_cil_empty(log)) in xfs_log_need_covered()
1228 spin_lock(&log->l_icloglock); in xfs_log_need_covered()
1229 switch (log->l_covered_state) { in xfs_log_need_covered()
1236 if (xfs_ail_min_lsn(log->l_ailp)) in xfs_log_need_covered()
1238 if (!xlog_iclogs_empty(log)) in xfs_log_need_covered()
1242 if (log->l_covered_state == XLOG_STATE_COVER_NEED) in xfs_log_need_covered()
1243 log->l_covered_state = XLOG_STATE_COVER_DONE; in xfs_log_need_covered()
1245 log->l_covered_state = XLOG_STATE_COVER_DONE2; in xfs_log_need_covered()
1251 spin_unlock(&log->l_icloglock); in xfs_log_need_covered()
1316 struct xlog *log = mp->m_log; in xlog_assign_tail_lsn_locked() local
1331 tail_lsn = atomic64_read(&log->l_last_sync_lsn); in xlog_assign_tail_lsn_locked()
1332 trace_xfs_log_assign_tail_lsn(log, tail_lsn); in xlog_assign_tail_lsn_locked()
1333 atomic64_set(&log->l_tail_lsn, tail_lsn); in xlog_assign_tail_lsn_locked()
1369 struct xlog *log, in xlog_space_left() argument
1378 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes); in xlog_space_left()
1381 return log->l_logsize - (head_bytes - tail_bytes); in xlog_space_left()
1386 if (xlog_is_shutdown(log)) in xlog_space_left()
1387 return log->l_logsize; in xlog_space_left()
1398 xfs_alert(log->l_mp, "xlog_space_left: head behind tail"); in xlog_space_left()
1399 xfs_alert(log->l_mp, " tail_cycle = %d, tail_bytes = %d", in xlog_space_left()
1401 xfs_alert(log->l_mp, " GH cycle = %d, GH bytes = %d", in xlog_space_left()
1404 return log->l_logsize; in xlog_space_left()
1414 struct xlog *log = iclog->ic_log; in xlog_ioend_work() local
1427 if (XFS_TEST_ERROR(error, log->l_mp, XFS_ERRTAG_IODONE_IOERR)) { in xlog_ioend_work()
1428 xfs_alert(log->l_mp, "log I/O error %d", error); in xlog_ioend_work()
1429 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); in xlog_ioend_work()
1455 struct xlog *log) in xlog_get_iclog_buffer_size() argument
1462 log->l_iclog_bufs = mp->m_logbufs; in xlog_get_iclog_buffer_size()
1463 log->l_iclog_size = mp->m_logbsize; in xlog_get_iclog_buffer_size()
1468 log->l_iclog_heads = in xlog_get_iclog_buffer_size()
1470 log->l_iclog_hsize = log->l_iclog_heads << BBSHIFT; in xlog_get_iclog_buffer_size()
1489 struct xlog *log) in xlog_clear_incompat() argument
1491 struct xfs_mount *mp = log->l_mp; in xlog_clear_incompat()
1497 if (log->l_covered_state != XLOG_STATE_COVER_DONE2) in xlog_clear_incompat()
1500 if (!down_write_trylock(&log->l_incompat_users)) in xlog_clear_incompat()
1504 up_write(&log->l_incompat_users); in xlog_clear_incompat()
1516 struct xlog *log = container_of(to_delayed_work(work), in xfs_log_worker() local
1518 struct xfs_mount *mp = log->l_mp; in xfs_log_worker()
1533 xlog_clear_incompat(log); in xfs_log_worker()
1557 struct xlog *log; in xlog_alloc_log() local
1565 log = kmem_zalloc(sizeof(struct xlog), KM_MAYFAIL); in xlog_alloc_log()
1566 if (!log) { in xlog_alloc_log()
1571 log->l_mp = mp; in xlog_alloc_log()
1572 log->l_targ = log_target; in xlog_alloc_log()
1573 log->l_logsize = BBTOB(num_bblks); in xlog_alloc_log()
1574 log->l_logBBstart = blk_offset; in xlog_alloc_log()
1575 log->l_logBBsize = num_bblks; in xlog_alloc_log()
1576 log->l_covered_state = XLOG_STATE_COVER_IDLE; in xlog_alloc_log()
1577 set_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate); in xlog_alloc_log()
1578 INIT_DELAYED_WORK(&log->l_work, xfs_log_worker); in xlog_alloc_log()
1580 log->l_prev_block = -1; in xlog_alloc_log()
1582 xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0); in xlog_alloc_log()
1583 xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0); in xlog_alloc_log()
1584 log->l_curr_cycle = 1; /* 0 is bad since this is initial value */ in xlog_alloc_log()
1587 log->l_iclog_roundoff = mp->m_sb.sb_logsunit; in xlog_alloc_log()
1589 log->l_iclog_roundoff = BBSIZE; in xlog_alloc_log()
1591 xlog_grant_head_init(&log->l_reserve_head); in xlog_alloc_log()
1592 xlog_grant_head_init(&log->l_write_head); in xlog_alloc_log()
1611 if (log2_size && log->l_logBBstart > 0 && in xlog_alloc_log()
1619 log->l_sectBBsize = 1 << log2_size; in xlog_alloc_log()
1621 init_rwsem(&log->l_incompat_users); in xlog_alloc_log()
1623 xlog_get_iclog_buffer_size(mp, log); in xlog_alloc_log()
1625 spin_lock_init(&log->l_icloglock); in xlog_alloc_log()
1626 init_waitqueue_head(&log->l_flush_wait); in xlog_alloc_log()
1628 iclogp = &log->l_iclog; in xlog_alloc_log()
1636 ASSERT(log->l_iclog_size >= 4096); in xlog_alloc_log()
1637 for (i = 0; i < log->l_iclog_bufs; i++) { in xlog_alloc_log()
1638 size_t bvec_size = howmany(log->l_iclog_size, PAGE_SIZE) * in xlog_alloc_log()
1649 iclog->ic_data = kvzalloc(log->l_iclog_size, in xlog_alloc_log()
1657 xfs_has_logv2(log->l_mp) ? 2 : 1); in xlog_alloc_log()
1658 head->h_size = cpu_to_be32(log->l_iclog_size); in xlog_alloc_log()
1663 iclog->ic_size = log->l_iclog_size - log->l_iclog_hsize; in xlog_alloc_log()
1665 iclog->ic_log = log; in xlog_alloc_log()
1668 iclog->ic_datap = (void *)iclog->ic_data + log->l_iclog_hsize; in xlog_alloc_log()
1677 *iclogp = log->l_iclog; /* complete ring */ in xlog_alloc_log()
1678 log->l_iclog->ic_prev = prev_iclog; /* re-write 1st prev ptr */ in xlog_alloc_log()
1680 log->l_ioend_workqueue = alloc_workqueue("xfs-log/%s", in xlog_alloc_log()
1684 if (!log->l_ioend_workqueue) in xlog_alloc_log()
1687 error = xlog_cil_init(log); in xlog_alloc_log()
1690 return log; in xlog_alloc_log()
1693 destroy_workqueue(log->l_ioend_workqueue); in xlog_alloc_log()
1695 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) { in xlog_alloc_log()
1699 if (prev_iclog == log->l_iclog) in xlog_alloc_log()
1703 kmem_free(log); in xlog_alloc_log()
1717 struct xlog *log, in xlog_grant_push_threshold() argument
1728 ASSERT(BTOBB(need_bytes) < log->l_logBBsize); in xlog_grant_push_threshold()
1730 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant); in xlog_grant_push_threshold()
1739 free_threshold = max(free_threshold, (log->l_logBBsize >> 2)); in xlog_grant_push_threshold()
1744 xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle, in xlog_grant_push_threshold()
1747 if (threshold_block >= log->l_logBBsize) { in xlog_grant_push_threshold()
1748 threshold_block -= log->l_logBBsize; in xlog_grant_push_threshold()
1758 last_sync_lsn = atomic64_read(&log->l_last_sync_lsn); in xlog_grant_push_threshold()
1774 struct xlog *log, in xlog_grant_push_ail() argument
1779 threshold_lsn = xlog_grant_push_threshold(log, need_bytes); in xlog_grant_push_ail()
1780 if (threshold_lsn == NULLCOMMITLSN || xlog_is_shutdown(log)) in xlog_grant_push_ail()
1788 xfs_ail_push(log->l_ailp, threshold_lsn); in xlog_grant_push_ail()
1796 struct xlog *log, in xlog_pack_data() argument
1816 if (xfs_has_logv2(log->l_mp)) { in xlog_pack_data()
1827 for (i = 1; i < log->l_iclog_heads; i++) in xlog_pack_data()
1840 struct xlog *log, in xlog_cksum() argument
1853 if (xfs_has_logv2(log->l_mp)) { in xlog_cksum()
1905 struct xlog *log, in xlog_write_iclog() argument
1910 ASSERT(bno < log->l_logBBsize); in xlog_write_iclog()
1922 if (xlog_is_shutdown(log)) { in xlog_write_iclog()
1941 bio_init(&iclog->ic_bio, log->l_targ->bt_bdev, iclog->ic_bvec, in xlog_write_iclog()
1944 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno; in xlog_write_iclog()
1961 if (log->l_targ != log->l_mp->m_ddev_targp && in xlog_write_iclog()
1962 blkdev_issue_flush(log->l_mp->m_ddev_targp->bt_bdev)) { in xlog_write_iclog()
1963 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); in xlog_write_iclog()
1973 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); in xlog_write_iclog()
1983 if (bno + BTOBB(count) > log->l_logBBsize) { in xlog_write_iclog()
1986 split = bio_split(&iclog->ic_bio, log->l_logBBsize - bno, in xlog_write_iclog()
1992 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart; in xlog_write_iclog()
2005 struct xlog *log, in xlog_split_iclog() argument
2010 unsigned int split_offset = BBTOB(log->l_logBBsize - bno); in xlog_split_iclog()
2024 struct xlog *log, in xlog_calc_iclog_size() argument
2031 count_init = log->l_iclog_hsize + iclog->ic_offset; in xlog_calc_iclog_size()
2032 count = roundup(count_init, log->l_iclog_roundoff); in xlog_calc_iclog_size()
2037 ASSERT(*roundoff < log->l_iclog_roundoff); in xlog_calc_iclog_size()
2066 struct xlog *log, in xlog_sync() argument
2078 count = xlog_calc_iclog_size(log, iclog, &roundoff); in xlog_sync()
2088 xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff); in xlog_sync()
2089 xlog_grant_add_space(log, &log->l_write_head.grant, roundoff); in xlog_sync()
2093 xlog_pack_data(log, iclog, roundoff); in xlog_sync()
2097 if (xfs_has_logv2(log->l_mp)) in xlog_sync()
2101 XFS_STATS_INC(log->l_mp, xs_log_writes); in xlog_sync()
2102 XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count)); in xlog_sync()
2107 if (bno + BTOBB(count) > log->l_logBBsize) in xlog_sync()
2108 xlog_split_iclog(log, &iclog->ic_header, bno, count); in xlog_sync()
2111 iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header, in xlog_sync()
2121 if (XFS_TEST_ERROR(false, log->l_mp, XFS_ERRTAG_LOG_BAD_CRC)) { in xlog_sync()
2124 xfs_warn(log->l_mp, in xlog_sync()
2129 xlog_verify_iclog(log, iclog, count); in xlog_sync()
2130 xlog_write_iclog(log, iclog, bno, count); in xlog_sync()
2138 struct xlog *log) in xlog_dealloc_log() argument
2148 xlog_cil_destroy(log); in xlog_dealloc_log()
2150 iclog = log->l_iclog; in xlog_dealloc_log()
2151 for (i = 0; i < log->l_iclog_bufs; i++) { in xlog_dealloc_log()
2158 log->l_mp->m_log = NULL; in xlog_dealloc_log()
2159 destroy_workqueue(log->l_ioend_workqueue); in xlog_dealloc_log()
2160 kmem_free(log); in xlog_dealloc_log()
2168 struct xlog *log, in xlog_state_finish_copy() argument
2173 lockdep_assert_held(&log->l_icloglock); in xlog_state_finish_copy()
2309 struct xlog *log = iclog->ic_log; in xlog_write_get_more_iclog_space() local
2312 spin_lock(&log->l_icloglock); in xlog_write_get_more_iclog_space()
2314 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); in xlog_write_get_more_iclog_space()
2315 error = xlog_state_release_iclog(log, iclog, ticket); in xlog_write_get_more_iclog_space()
2316 spin_unlock(&log->l_icloglock); in xlog_write_get_more_iclog_space()
2320 error = xlog_state_get_iclog_space(log, len, &iclog, ticket, in xlog_write_get_more_iclog_space()
2509 struct xlog *log, in xlog_write() argument
2524 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, in xlog_write()
2526 xlog_print_tic_res(log->l_mp, ticket); in xlog_write()
2527 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); in xlog_write()
2530 error = xlog_state_get_iclog_space(log, len, &iclog, ticket, in xlog_write()
2575 spin_lock(&log->l_icloglock); in xlog_write()
2576 xlog_state_finish_copy(log, iclog, record_cnt, 0); in xlog_write()
2577 error = xlog_state_release_iclog(log, iclog, ticket); in xlog_write()
2578 spin_unlock(&log->l_icloglock); in xlog_write()
2623 struct xlog *log, in xlog_state_activate_iclogs() argument
2626 struct xlog_in_core *iclog = log->l_iclog; in xlog_state_activate_iclogs()
2637 } while ((iclog = iclog->ic_next) != log->l_iclog); in xlog_state_activate_iclogs()
2676 struct xlog *log, in xlog_state_clean_iclog() argument
2685 xlog_state_activate_iclogs(log, &iclogs_changed); in xlog_state_clean_iclog()
2689 log->l_covered_state = xlog_covered_state(log->l_covered_state, in xlog_state_clean_iclog()
2696 struct xlog *log) in xlog_get_lowest_lsn() argument
2698 struct xlog_in_core *iclog = log->l_iclog; in xlog_get_lowest_lsn()
2709 } while ((iclog = iclog->ic_next) != log->l_iclog); in xlog_get_lowest_lsn()
2738 struct xlog *log, in xlog_state_set_callback() argument
2745 ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn), in xlog_state_set_callback()
2751 atomic64_set(&log->l_last_sync_lsn, header_lsn); in xlog_state_set_callback()
2752 xlog_grant_push_ail(log, 0); in xlog_state_set_callback()
2762 struct xlog *log, in xlog_state_iodone_process_iclog() argument
2783 lowest_lsn = xlog_get_lowest_lsn(log); in xlog_state_iodone_process_iclog()
2786 xlog_state_set_callback(log, iclog, header_lsn); in xlog_state_iodone_process_iclog()
2807 struct xlog *log) in xlog_state_do_iclog_callbacks() argument
2808 __releases(&log->l_icloglock) in xlog_state_do_iclog_callbacks()
2809 __acquires(&log->l_icloglock) in xlog_state_do_iclog_callbacks()
2811 struct xlog_in_core *first_iclog = log->l_iclog; in xlog_state_do_iclog_callbacks()
2818 if (xlog_state_iodone_process_iclog(log, iclog)) in xlog_state_do_iclog_callbacks()
2825 spin_unlock(&log->l_icloglock); in xlog_state_do_iclog_callbacks()
2832 spin_lock(&log->l_icloglock); in xlog_state_do_iclog_callbacks()
2833 xlog_state_clean_iclog(log, iclog); in xlog_state_do_iclog_callbacks()
2847 struct xlog *log) in xlog_state_do_callback() argument
2852 spin_lock(&log->l_icloglock); in xlog_state_do_callback()
2853 while (xlog_state_do_iclog_callbacks(log)) { in xlog_state_do_callback()
2854 if (xlog_is_shutdown(log)) in xlog_state_do_callback()
2860 xfs_warn(log->l_mp, in xlog_state_do_callback()
2866 if (log->l_iclog->ic_state == XLOG_STATE_ACTIVE) in xlog_state_do_callback()
2867 wake_up_all(&log->l_flush_wait); in xlog_state_do_callback()
2869 spin_unlock(&log->l_icloglock); in xlog_state_do_callback()
2883 struct xlog *log = iclog->ic_log; in xlog_state_done_syncing() local
2885 spin_lock(&log->l_icloglock); in xlog_state_done_syncing()
2894 if (!xlog_is_shutdown(log)) { in xlog_state_done_syncing()
2905 spin_unlock(&log->l_icloglock); in xlog_state_done_syncing()
2906 xlog_state_do_callback(log); in xlog_state_done_syncing()
2929 struct xlog *log, in xlog_state_get_iclog_space() argument
2940 spin_lock(&log->l_icloglock); in xlog_state_get_iclog_space()
2941 if (xlog_is_shutdown(log)) { in xlog_state_get_iclog_space()
2942 spin_unlock(&log->l_icloglock); in xlog_state_get_iclog_space()
2946 iclog = log->l_iclog; in xlog_state_get_iclog_space()
2948 XFS_STATS_INC(log->l_mp, xs_log_noiclogs); in xlog_state_get_iclog_space()
2951 xlog_wait(&log->l_flush_wait, &log->l_icloglock); in xlog_state_get_iclog_space()
2968 ticket->t_curr_res -= log->l_iclog_hsize; in xlog_state_get_iclog_space()
2969 head->h_cycle = cpu_to_be32(log->l_curr_cycle); in xlog_state_get_iclog_space()
2971 xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block)); in xlog_state_get_iclog_space()
2972 ASSERT(log->l_curr_block >= 0); in xlog_state_get_iclog_space()
2987 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); in xlog_state_get_iclog_space()
2997 error = xlog_state_release_iclog(log, iclog, ticket); in xlog_state_get_iclog_space()
2998 spin_unlock(&log->l_icloglock); in xlog_state_get_iclog_space()
3013 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); in xlog_state_get_iclog_space()
3017 spin_unlock(&log->l_icloglock); in xlog_state_get_iclog_space()
3032 struct xlog *log, in xfs_log_ticket_regrant() argument
3035 trace_xfs_log_ticket_regrant(log, ticket); in xfs_log_ticket_regrant()
3040 xlog_grant_sub_space(log, &log->l_reserve_head.grant, in xfs_log_ticket_regrant()
3042 xlog_grant_sub_space(log, &log->l_write_head.grant, in xfs_log_ticket_regrant()
3046 trace_xfs_log_ticket_regrant_sub(log, ticket); in xfs_log_ticket_regrant()
3050 xlog_grant_add_space(log, &log->l_reserve_head.grant, in xfs_log_ticket_regrant()
3052 trace_xfs_log_ticket_regrant_exit(log, ticket); in xfs_log_ticket_regrant()
3076 struct xlog *log, in xfs_log_ticket_ungrant() argument
3081 trace_xfs_log_ticket_ungrant(log, ticket); in xfs_log_ticket_ungrant()
3086 trace_xfs_log_ticket_ungrant_sub(log, ticket); in xfs_log_ticket_ungrant()
3098 xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes); in xfs_log_ticket_ungrant()
3099 xlog_grant_sub_space(log, &log->l_write_head.grant, bytes); in xfs_log_ticket_ungrant()
3101 trace_xfs_log_ticket_ungrant_exit(log, ticket); in xfs_log_ticket_ungrant()
3103 xfs_log_space_wake(log->l_mp); in xfs_log_ticket_ungrant()
3113 struct xlog *log, in xlog_state_switch_iclogs() argument
3118 assert_spin_locked(&log->l_icloglock); in xlog_state_switch_iclogs()
3124 iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block); in xlog_state_switch_iclogs()
3125 log->l_prev_block = log->l_curr_block; in xlog_state_switch_iclogs()
3126 log->l_prev_cycle = log->l_curr_cycle; in xlog_state_switch_iclogs()
3129 log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize); in xlog_state_switch_iclogs()
3132 if (log->l_iclog_roundoff > BBSIZE) { in xlog_state_switch_iclogs()
3133 uint32_t sunit_bb = BTOBB(log->l_iclog_roundoff); in xlog_state_switch_iclogs()
3134 log->l_curr_block = roundup(log->l_curr_block, sunit_bb); in xlog_state_switch_iclogs()
3137 if (log->l_curr_block >= log->l_logBBsize) { in xlog_state_switch_iclogs()
3145 log->l_curr_block -= log->l_logBBsize; in xlog_state_switch_iclogs()
3146 ASSERT(log->l_curr_block >= 0); in xlog_state_switch_iclogs()
3148 log->l_curr_cycle++; in xlog_state_switch_iclogs()
3149 if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM) in xlog_state_switch_iclogs()
3150 log->l_curr_cycle++; in xlog_state_switch_iclogs()
3152 ASSERT(iclog == log->l_iclog); in xlog_state_switch_iclogs()
3153 log->l_iclog = iclog->ic_next; in xlog_state_switch_iclogs()
3217 struct xlog *log = mp->m_log; in xfs_log_force() local
3223 xlog_cil_force(log); in xfs_log_force()
3225 spin_lock(&log->l_icloglock); in xfs_log_force()
3226 if (xlog_is_shutdown(log)) in xfs_log_force()
3229 iclog = log->l_iclog; in xfs_log_force()
3260 xlog_state_switch_iclogs(log, iclog, 0); in xfs_log_force()
3276 spin_unlock(&log->l_icloglock); in xfs_log_force()
3279 spin_unlock(&log->l_icloglock); in xfs_log_force()
3299 struct xlog *log, in xlog_force_lsn() argument
3308 spin_lock(&log->l_icloglock); in xlog_force_lsn()
3309 if (xlog_is_shutdown(log)) in xlog_force_lsn()
3312 iclog = log->l_iclog; in xlog_force_lsn()
3316 if (iclog == log->l_iclog) in xlog_force_lsn()
3341 &log->l_icloglock); in xlog_force_lsn()
3374 spin_unlock(&log->l_icloglock); in xlog_force_lsn()
3377 spin_unlock(&log->l_icloglock); in xlog_force_lsn()
3397 struct xlog *log = mp->m_log; in xfs_log_force_seq() local
3405 lsn = xlog_cil_force_seq(log, seq); in xfs_log_force_seq()
3409 ret = xlog_force_lsn(log, lsn, flags, log_flushed, false); in xfs_log_force_seq()
3412 ret = xlog_force_lsn(log, lsn, flags, log_flushed, true); in xfs_log_force_seq()
3444 struct xlog *log, in xlog_calc_unit_res() argument
3506 iclog_space = log->l_iclog_size - log->l_iclog_hsize; in xlog_calc_unit_res()
3518 unit_bytes += log->l_iclog_hsize * num_headers; in xlog_calc_unit_res()
3521 unit_bytes += log->l_iclog_hsize; in xlog_calc_unit_res()
3524 unit_bytes += 2 * log->l_iclog_roundoff; in xlog_calc_unit_res()
3544 struct xlog *log, in xlog_ticket_alloc() argument
3554 unit_res = xlog_calc_unit_res(log, unit_bytes, &tic->t_iclog_hdrs); in xlog_ticket_alloc()
3584 struct xlog *log) in xlog_verify_grant_tail() argument
3589 xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space); in xlog_verify_grant_tail()
3590 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks); in xlog_verify_grant_tail()
3593 !test_and_set_bit(XLOG_TAIL_WARN, &log->l_opstate)) { in xlog_verify_grant_tail()
3594 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, in xlog_verify_grant_tail()
3599 !test_and_set_bit(XLOG_TAIL_WARN, &log->l_opstate)) { in xlog_verify_grant_tail()
3600 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES, in xlog_verify_grant_tail()
3609 struct xlog *log, in xlog_verify_tail_lsn() argument
3615 if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) { in xlog_verify_tail_lsn()
3617 log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn)); in xlog_verify_tail_lsn()
3618 if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize)) in xlog_verify_tail_lsn()
3619 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); in xlog_verify_tail_lsn()
3621 ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle); in xlog_verify_tail_lsn()
3623 if (BLOCK_LSN(tail_lsn) == log->l_prev_block) in xlog_verify_tail_lsn()
3624 xfs_emerg(log->l_mp, "%s: tail wrapped", __func__); in xlog_verify_tail_lsn()
3626 blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block; in xlog_verify_tail_lsn()
3628 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__); in xlog_verify_tail_lsn()
3649 struct xlog *log, in xlog_verify_iclog() argument
3663 spin_lock(&log->l_icloglock); in xlog_verify_iclog()
3664 icptr = log->l_iclog; in xlog_verify_iclog()
3665 for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next) in xlog_verify_iclog()
3668 if (icptr != log->l_iclog) in xlog_verify_iclog()
3669 xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__); in xlog_verify_iclog()
3670 spin_unlock(&log->l_icloglock); in xlog_verify_iclog()
3674 xfs_emerg(log->l_mp, "%s: invalid magic num", __func__); in xlog_verify_iclog()
3680 xfs_emerg(log->l_mp, "%s: unexpected magic num", in xlog_verify_iclog()
3710 xfs_warn(log->l_mp, in xlog_verify_iclog()
3757 struct xlog *log, in xlog_force_shutdown() argument
3762 if (!log) in xlog_force_shutdown()
3780 if (!log_error && !xlog_in_recovery(log)) in xlog_force_shutdown()
3781 xfs_log_force(log->l_mp, XFS_LOG_SYNC); in xlog_force_shutdown()
3794 spin_lock(&log->l_icloglock); in xlog_force_shutdown()
3795 if (test_and_set_bit(XLOG_IO_ERROR, &log->l_opstate)) { in xlog_force_shutdown()
3796 spin_unlock(&log->l_icloglock); in xlog_force_shutdown()
3799 spin_unlock(&log->l_icloglock); in xlog_force_shutdown()
3805 if (!test_and_set_bit(XFS_OPSTATE_SHUTDOWN, &log->l_mp->m_opstate)) { in xlog_force_shutdown()
3806 xfs_alert_tag(log->l_mp, XFS_PTAG_SHUTDOWN_LOGERROR, in xlog_force_shutdown()
3809 xfs_alert(log->l_mp, in xlog_force_shutdown()
3822 xlog_grant_head_wake_all(&log->l_reserve_head); in xlog_force_shutdown()
3823 xlog_grant_head_wake_all(&log->l_write_head); in xlog_force_shutdown()
3831 spin_lock(&log->l_cilp->xc_push_lock); in xlog_force_shutdown()
3832 wake_up_all(&log->l_cilp->xc_start_wait); in xlog_force_shutdown()
3833 wake_up_all(&log->l_cilp->xc_commit_wait); in xlog_force_shutdown()
3834 spin_unlock(&log->l_cilp->xc_push_lock); in xlog_force_shutdown()
3836 spin_lock(&log->l_icloglock); in xlog_force_shutdown()
3837 xlog_state_shutdown_callbacks(log); in xlog_force_shutdown()
3838 spin_unlock(&log->l_icloglock); in xlog_force_shutdown()
3840 wake_up_var(&log->l_opstate); in xlog_force_shutdown()
3846 struct xlog *log) in xlog_iclogs_empty() argument
3850 iclog = log->l_iclog; in xlog_iclogs_empty()
3858 } while (iclog != log->l_iclog); in xlog_iclogs_empty()
3871 struct xlog *log = mp->m_log; in xfs_log_check_lsn() local
3893 spin_lock(&log->l_icloglock); in xfs_log_check_lsn()
3898 log->l_curr_cycle, log->l_curr_block); in xfs_log_check_lsn()
3899 spin_unlock(&log->l_icloglock); in xfs_log_check_lsn()
3912 struct xlog *log) in xlog_use_incompat_feat() argument
3914 down_read(&log->l_incompat_users); in xlog_use_incompat_feat()
3920 struct xlog *log) in xlog_drop_incompat_feat() argument
3922 up_read(&log->l_incompat_users); in xlog_drop_incompat_feat()