Lines Matching refs:iclog
43 struct xlog_in_core *iclog);
50 struct xlog_in_core **iclog,
60 struct xlog_in_core *iclog,
69 struct xlog_in_core *iclog,
74 struct xlog_in_core *iclog);
526 struct xlog_in_core *iclog; in xlog_state_shutdown_callbacks() local
529 iclog = log->l_iclog; in xlog_state_shutdown_callbacks()
531 if (atomic_read(&iclog->ic_refcnt)) { in xlog_state_shutdown_callbacks()
535 list_splice_init(&iclog->ic_callbacks, &cb_list); in xlog_state_shutdown_callbacks()
541 wake_up_all(&iclog->ic_write_wait); in xlog_state_shutdown_callbacks()
542 wake_up_all(&iclog->ic_force_wait); in xlog_state_shutdown_callbacks()
543 } while ((iclog = iclog->ic_next) != log->l_iclog); in xlog_state_shutdown_callbacks()
571 struct xlog_in_core *iclog, in xlog_state_release_iclog() argument
579 trace_xlog_iclog_release(iclog, _RET_IP_); in xlog_state_release_iclog()
586 if ((iclog->ic_state == XLOG_STATE_WANT_SYNC || in xlog_state_release_iclog()
587 (iclog->ic_flags & XLOG_ICL_NEED_FUA)) && in xlog_state_release_iclog()
588 !iclog->ic_header.h_tail_lsn) { in xlog_state_release_iclog()
590 iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn); in xlog_state_release_iclog()
593 last_ref = atomic_dec_and_test(&iclog->ic_refcnt); in xlog_state_release_iclog()
609 if (iclog->ic_state != XLOG_STATE_WANT_SYNC) { in xlog_state_release_iclog()
610 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); in xlog_state_release_iclog()
614 iclog->ic_state = XLOG_STATE_SYNCING; in xlog_state_release_iclog()
615 xlog_verify_tail_lsn(log, iclog); in xlog_state_release_iclog()
616 trace_xlog_iclog_syncing(iclog, _RET_IP_); in xlog_state_release_iclog()
619 xlog_sync(log, iclog, ticket); in xlog_state_release_iclog()
882 struct xlog_in_core *iclog) in xlog_force_iclog() argument
884 atomic_inc(&iclog->ic_refcnt); in xlog_force_iclog()
885 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA; in xlog_force_iclog()
886 if (iclog->ic_state == XLOG_STATE_ACTIVE) in xlog_force_iclog()
887 xlog_state_switch_iclogs(iclog->ic_log, iclog, 0); in xlog_force_iclog()
888 return xlog_state_release_iclog(iclog->ic_log, iclog, NULL); in xlog_force_iclog()
899 struct xlog_in_core *iclog = log->l_iclog; in xlog_wait_iclog_completion() local
902 down(&iclog->ic_sema); in xlog_wait_iclog_completion()
903 up(&iclog->ic_sema); in xlog_wait_iclog_completion()
904 iclog = iclog->ic_next; in xlog_wait_iclog_completion()
916 struct xlog_in_core *iclog) in xlog_wait_on_iclog() argument
917 __releases(iclog->ic_log->l_icloglock) in xlog_wait_on_iclog()
919 struct xlog *log = iclog->ic_log; in xlog_wait_on_iclog()
921 trace_xlog_iclog_wait_on(iclog, _RET_IP_); in xlog_wait_on_iclog()
923 iclog->ic_state != XLOG_STATE_ACTIVE && in xlog_wait_on_iclog()
924 iclog->ic_state != XLOG_STATE_DIRTY) { in xlog_wait_on_iclog()
926 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); in xlog_wait_on_iclog()
990 struct xlog_in_core *iclog; in xlog_unmount_write() local
1008 iclog = log->l_iclog; in xlog_unmount_write()
1009 error = xlog_force_iclog(iclog); in xlog_unmount_write()
1010 xlog_wait_on_iclog(iclog); in xlog_unmount_write()
1022 struct xlog_in_core *iclog = log->l_iclog; in xfs_log_unmount_verify_iclog() local
1025 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); in xfs_log_unmount_verify_iclog()
1026 ASSERT(iclog->ic_offset == 0); in xfs_log_unmount_verify_iclog()
1027 } while ((iclog = iclog->ic_next) != log->l_iclog); in xfs_log_unmount_verify_iclog()
1412 struct xlog_in_core *iclog = in xlog_ioend_work() local
1414 struct xlog *log = iclog->ic_log; in xlog_ioend_work()
1417 error = blk_status_to_errno(iclog->ic_bio.bi_status); in xlog_ioend_work()
1420 if (iclog->ic_fail_crc) in xlog_ioend_work()
1432 xlog_state_done_syncing(iclog); in xlog_ioend_work()
1433 bio_uninit(&iclog->ic_bio); in xlog_ioend_work()
1441 up(&iclog->ic_sema); in xlog_ioend_work()
1560 xlog_in_core_t *iclog, *prev_iclog=NULL; in xlog_alloc_log() local
1641 iclog = kmem_zalloc(sizeof(*iclog) + bvec_size, KM_MAYFAIL); in xlog_alloc_log()
1642 if (!iclog) in xlog_alloc_log()
1645 *iclogp = iclog; in xlog_alloc_log()
1646 iclog->ic_prev = prev_iclog; in xlog_alloc_log()
1647 prev_iclog = iclog; in xlog_alloc_log()
1649 iclog->ic_data = kvzalloc(log->l_iclog_size, in xlog_alloc_log()
1651 if (!iclog->ic_data) in xlog_alloc_log()
1653 head = &iclog->ic_header; in xlog_alloc_log()
1663 iclog->ic_size = log->l_iclog_size - log->l_iclog_hsize; in xlog_alloc_log()
1664 iclog->ic_state = XLOG_STATE_ACTIVE; in xlog_alloc_log()
1665 iclog->ic_log = log; in xlog_alloc_log()
1666 atomic_set(&iclog->ic_refcnt, 0); in xlog_alloc_log()
1667 INIT_LIST_HEAD(&iclog->ic_callbacks); in xlog_alloc_log()
1668 iclog->ic_datap = (void *)iclog->ic_data + log->l_iclog_hsize; in xlog_alloc_log()
1670 init_waitqueue_head(&iclog->ic_force_wait); in xlog_alloc_log()
1671 init_waitqueue_head(&iclog->ic_write_wait); in xlog_alloc_log()
1672 INIT_WORK(&iclog->ic_end_io_work, xlog_ioend_work); in xlog_alloc_log()
1673 sema_init(&iclog->ic_sema, 1); in xlog_alloc_log()
1675 iclogp = &iclog->ic_next; in xlog_alloc_log()
1695 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) { in xlog_alloc_log()
1696 prev_iclog = iclog->ic_next; in xlog_alloc_log()
1697 kmem_free(iclog->ic_data); in xlog_alloc_log()
1698 kmem_free(iclog); in xlog_alloc_log()
1797 struct xlog_in_core *iclog, in xlog_pack_data() argument
1801 int size = iclog->ic_offset + roundoff; in xlog_pack_data()
1805 cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn); in xlog_pack_data()
1807 dp = iclog->ic_datap; in xlog_pack_data()
1811 iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp; in xlog_pack_data()
1817 xlog_in_core_2_t *xhdr = iclog->ic_data; in xlog_pack_data()
1876 struct xlog_in_core *iclog = bio->bi_private; in xlog_bio_end_io() local
1878 queue_work(iclog->ic_log->l_ioend_workqueue, in xlog_bio_end_io()
1879 &iclog->ic_end_io_work); in xlog_bio_end_io()
1906 struct xlog_in_core *iclog, in xlog_write_iclog() argument
1911 trace_xlog_iclog_write(iclog, _RET_IP_); in xlog_write_iclog()
1921 down(&iclog->ic_sema); in xlog_write_iclog()
1930 xlog_state_done_syncing(iclog); in xlog_write_iclog()
1931 up(&iclog->ic_sema); in xlog_write_iclog()
1941 bio_init(&iclog->ic_bio, log->l_targ->bt_bdev, iclog->ic_bvec, in xlog_write_iclog()
1944 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno; in xlog_write_iclog()
1945 iclog->ic_bio.bi_end_io = xlog_bio_end_io; in xlog_write_iclog()
1946 iclog->ic_bio.bi_private = iclog; in xlog_write_iclog()
1948 if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH) { in xlog_write_iclog()
1949 iclog->ic_bio.bi_opf |= REQ_PREFLUSH; in xlog_write_iclog()
1967 if (iclog->ic_flags & XLOG_ICL_NEED_FUA) in xlog_write_iclog()
1968 iclog->ic_bio.bi_opf |= REQ_FUA; in xlog_write_iclog()
1970 iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA); in xlog_write_iclog()
1972 if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) { in xlog_write_iclog()
1976 if (is_vmalloc_addr(iclog->ic_data)) in xlog_write_iclog()
1977 flush_kernel_vmap_range(iclog->ic_data, count); in xlog_write_iclog()
1986 split = bio_split(&iclog->ic_bio, log->l_logBBsize - bno, in xlog_write_iclog()
1988 bio_chain(split, &iclog->ic_bio); in xlog_write_iclog()
1992 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart; in xlog_write_iclog()
1995 submit_bio(&iclog->ic_bio); in xlog_write_iclog()
2025 struct xlog_in_core *iclog, in xlog_calc_iclog_size() argument
2031 count_init = log->l_iclog_hsize + iclog->ic_offset; in xlog_calc_iclog_size()
2067 struct xlog_in_core *iclog, in xlog_sync() argument
2075 ASSERT(atomic_read(&iclog->ic_refcnt) == 0); in xlog_sync()
2076 trace_xlog_iclog_sync(iclog, _RET_IP_); in xlog_sync()
2078 count = xlog_calc_iclog_size(log, iclog, &roundoff); in xlog_sync()
2093 xlog_pack_data(log, iclog, roundoff); in xlog_sync()
2096 size = iclog->ic_offset; in xlog_sync()
2099 iclog->ic_header.h_len = cpu_to_be32(size); in xlog_sync()
2104 bno = BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn)); in xlog_sync()
2108 xlog_split_iclog(log, &iclog->ic_header, bno, count); in xlog_sync()
2111 iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header, in xlog_sync()
2112 iclog->ic_datap, size); in xlog_sync()
2122 iclog->ic_header.h_crc &= cpu_to_le32(0xAAAAAAAA); in xlog_sync()
2123 iclog->ic_fail_crc = true; in xlog_sync()
2126 be64_to_cpu(iclog->ic_header.h_lsn)); in xlog_sync()
2129 xlog_verify_iclog(log, iclog, count); in xlog_sync()
2130 xlog_write_iclog(log, iclog, bno, count); in xlog_sync()
2140 xlog_in_core_t *iclog, *next_iclog; in xlog_dealloc_log() local
2150 iclog = log->l_iclog; in xlog_dealloc_log()
2152 next_iclog = iclog->ic_next; in xlog_dealloc_log()
2153 kmem_free(iclog->ic_data); in xlog_dealloc_log()
2154 kmem_free(iclog); in xlog_dealloc_log()
2155 iclog = next_iclog; in xlog_dealloc_log()
2169 struct xlog_in_core *iclog, in xlog_state_finish_copy() argument
2175 be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt); in xlog_state_finish_copy()
2176 iclog->ic_offset += copy_bytes; in xlog_state_finish_copy()
2247 struct xlog_in_core *iclog, in xlog_write_iovec() argument
2255 ASSERT(*log_offset < iclog->ic_log->l_iclog_size); in xlog_write_iovec()
2259 memcpy(iclog->ic_datap + *log_offset, data, write_len); in xlog_write_iovec()
2274 struct xlog_in_core *iclog, in xlog_write_full() argument
2282 ASSERT(*log_offset + *len <= iclog->ic_size || in xlog_write_full()
2283 iclog->ic_state == XLOG_STATE_WANT_SYNC); in xlog_write_full()
2294 xlog_write_iovec(iclog, log_offset, reg->i_addr, in xlog_write_full()
2308 struct xlog_in_core *iclog = *iclogp; in xlog_write_get_more_iclog_space() local
2309 struct xlog *log = iclog->ic_log; in xlog_write_get_more_iclog_space()
2313 ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC); in xlog_write_get_more_iclog_space()
2314 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); in xlog_write_get_more_iclog_space()
2315 error = xlog_state_release_iclog(log, iclog, ticket); in xlog_write_get_more_iclog_space()
2320 error = xlog_state_get_iclog_space(log, len, &iclog, ticket, in xlog_write_get_more_iclog_space()
2326 *iclogp = iclog; in xlog_write_get_more_iclog_space()
2346 struct xlog_in_core *iclog = *iclogp; in xlog_write_partial() local
2368 if (iclog->ic_size - *log_offset <= in xlog_write_partial()
2371 &iclog, log_offset, *len, record_cnt, in xlog_write_partial()
2378 rlen = min_t(uint32_t, reg->i_len, iclog->ic_size - *log_offset); in xlog_write_partial()
2385 xlog_write_iovec(iclog, log_offset, reg->i_addr, in xlog_write_partial()
2422 &iclog, log_offset, in xlog_write_partial()
2428 ophdr = iclog->ic_datap + *log_offset; in xlog_write_partial()
2444 if (rlen <= iclog->ic_size - *log_offset) in xlog_write_partial()
2449 rlen = min_t(uint32_t, rlen, iclog->ic_size - *log_offset); in xlog_write_partial()
2452 xlog_write_iovec(iclog, log_offset, in xlog_write_partial()
2463 *iclogp = iclog; in xlog_write_partial()
2516 struct xlog_in_core *iclog = NULL; in xlog_write() local
2530 error = xlog_state_get_iclog_space(log, len, &iclog, ticket, in xlog_write()
2535 ASSERT(log_offset <= iclog->ic_size - 1); in xlog_write()
2543 xlog_cil_set_ctx_write_state(ctx, iclog); in xlog_write()
2551 lv->lv_bytes > iclog->ic_size - log_offset) { in xlog_write()
2552 error = xlog_write_partial(lv, ticket, &iclog, in xlog_write()
2563 xlog_write_full(lv, ticket, iclog, &log_offset, in xlog_write()
2576 xlog_state_finish_copy(log, iclog, record_cnt, 0); in xlog_write()
2577 error = xlog_state_release_iclog(log, iclog, ticket); in xlog_write()
2585 struct xlog_in_core *iclog, in xlog_state_activate_iclog() argument
2588 ASSERT(list_empty_careful(&iclog->ic_callbacks)); in xlog_state_activate_iclog()
2589 trace_xlog_iclog_activate(iclog, _RET_IP_); in xlog_state_activate_iclog()
2598 iclog->ic_header.h_num_logops == cpu_to_be32(XLOG_COVER_OPS)) { in xlog_state_activate_iclog()
2608 iclog->ic_state = XLOG_STATE_ACTIVE; in xlog_state_activate_iclog()
2609 iclog->ic_offset = 0; in xlog_state_activate_iclog()
2610 iclog->ic_header.h_num_logops = 0; in xlog_state_activate_iclog()
2611 memset(iclog->ic_header.h_cycle_data, 0, in xlog_state_activate_iclog()
2612 sizeof(iclog->ic_header.h_cycle_data)); in xlog_state_activate_iclog()
2613 iclog->ic_header.h_lsn = 0; in xlog_state_activate_iclog()
2614 iclog->ic_header.h_tail_lsn = 0; in xlog_state_activate_iclog()
2626 struct xlog_in_core *iclog = log->l_iclog; in xlog_state_activate_iclogs() local
2629 if (iclog->ic_state == XLOG_STATE_DIRTY) in xlog_state_activate_iclogs()
2630 xlog_state_activate_iclog(iclog, iclogs_changed); in xlog_state_activate_iclogs()
2635 else if (iclog->ic_state != XLOG_STATE_ACTIVE) in xlog_state_activate_iclogs()
2637 } while ((iclog = iclog->ic_next) != log->l_iclog); in xlog_state_activate_iclogs()
2698 struct xlog_in_core *iclog = log->l_iclog; in xlog_get_lowest_lsn() local
2702 if (iclog->ic_state == XLOG_STATE_ACTIVE || in xlog_get_lowest_lsn()
2703 iclog->ic_state == XLOG_STATE_DIRTY) in xlog_get_lowest_lsn()
2706 lsn = be64_to_cpu(iclog->ic_header.h_lsn); in xlog_get_lowest_lsn()
2709 } while ((iclog = iclog->ic_next) != log->l_iclog); in xlog_get_lowest_lsn()
2739 struct xlog_in_core *iclog, in xlog_state_set_callback() argument
2742 trace_xlog_iclog_callback(iclog, _RET_IP_); in xlog_state_set_callback()
2743 iclog->ic_state = XLOG_STATE_CALLBACK; in xlog_state_set_callback()
2748 if (list_empty_careful(&iclog->ic_callbacks)) in xlog_state_set_callback()
2763 struct xlog_in_core *iclog) in xlog_state_iodone_process_iclog() argument
2768 switch (iclog->ic_state) { in xlog_state_iodone_process_iclog()
2782 header_lsn = be64_to_cpu(iclog->ic_header.h_lsn); in xlog_state_iodone_process_iclog()
2786 xlog_state_set_callback(log, iclog, header_lsn); in xlog_state_iodone_process_iclog()
2812 struct xlog_in_core *iclog = first_iclog; in xlog_state_do_iclog_callbacks() local
2818 if (xlog_state_iodone_process_iclog(log, iclog)) in xlog_state_do_iclog_callbacks()
2820 if (iclog->ic_state != XLOG_STATE_CALLBACK) { in xlog_state_do_iclog_callbacks()
2821 iclog = iclog->ic_next; in xlog_state_do_iclog_callbacks()
2824 list_splice_init(&iclog->ic_callbacks, &cb_list); in xlog_state_do_iclog_callbacks()
2827 trace_xlog_iclog_callbacks_start(iclog, _RET_IP_); in xlog_state_do_iclog_callbacks()
2829 trace_xlog_iclog_callbacks_done(iclog, _RET_IP_); in xlog_state_do_iclog_callbacks()
2833 xlog_state_clean_iclog(log, iclog); in xlog_state_do_iclog_callbacks()
2834 iclog = iclog->ic_next; in xlog_state_do_iclog_callbacks()
2835 } while (iclog != first_iclog); in xlog_state_do_iclog_callbacks()
2881 struct xlog_in_core *iclog) in xlog_state_done_syncing() argument
2883 struct xlog *log = iclog->ic_log; in xlog_state_done_syncing()
2886 ASSERT(atomic_read(&iclog->ic_refcnt) == 0); in xlog_state_done_syncing()
2887 trace_xlog_iclog_sync_done(iclog, _RET_IP_); in xlog_state_done_syncing()
2895 ASSERT(iclog->ic_state == XLOG_STATE_SYNCING); in xlog_state_done_syncing()
2896 iclog->ic_state = XLOG_STATE_DONE_SYNC; in xlog_state_done_syncing()
2904 wake_up_all(&iclog->ic_write_wait); in xlog_state_done_syncing()
2937 xlog_in_core_t *iclog; in xlog_state_get_iclog_space() local
2946 iclog = log->l_iclog; in xlog_state_get_iclog_space()
2947 if (iclog->ic_state != XLOG_STATE_ACTIVE) { in xlog_state_get_iclog_space()
2955 head = &iclog->ic_header; in xlog_state_get_iclog_space()
2957 atomic_inc(&iclog->ic_refcnt); /* prevents sync */ in xlog_state_get_iclog_space()
2958 log_offset = iclog->ic_offset; in xlog_state_get_iclog_space()
2960 trace_xlog_iclog_get_space(iclog, _RET_IP_); in xlog_state_get_iclog_space()
2984 if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) { in xlog_state_get_iclog_space()
2987 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); in xlog_state_get_iclog_space()
2996 if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1)) in xlog_state_get_iclog_space()
2997 error = xlog_state_release_iclog(log, iclog, ticket); in xlog_state_get_iclog_space()
3010 if (len <= iclog->ic_size - iclog->ic_offset) in xlog_state_get_iclog_space()
3011 iclog->ic_offset += len; in xlog_state_get_iclog_space()
3013 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); in xlog_state_get_iclog_space()
3014 *iclogp = iclog; in xlog_state_get_iclog_space()
3016 ASSERT(iclog->ic_offset <= iclog->ic_size); in xlog_state_get_iclog_space()
3114 struct xlog_in_core *iclog, in xlog_state_switch_iclogs() argument
3117 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); in xlog_state_switch_iclogs()
3119 trace_xlog_iclog_switch(iclog, _RET_IP_); in xlog_state_switch_iclogs()
3122 eventual_size = iclog->ic_offset; in xlog_state_switch_iclogs()
3123 iclog->ic_state = XLOG_STATE_WANT_SYNC; in xlog_state_switch_iclogs()
3124 iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block); in xlog_state_switch_iclogs()
3152 ASSERT(iclog == log->l_iclog); in xlog_state_switch_iclogs()
3153 log->l_iclog = iclog->ic_next; in xlog_state_switch_iclogs()
3165 struct xlog_in_core *iclog, in xlog_force_and_check_iclog() argument
3168 xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header.h_lsn); in xlog_force_and_check_iclog()
3172 error = xlog_force_iclog(iclog); in xlog_force_and_check_iclog()
3180 if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) in xlog_force_and_check_iclog()
3218 struct xlog_in_core *iclog; in xfs_log_force() local
3229 iclog = log->l_iclog; in xfs_log_force()
3230 trace_xlog_iclog_force(iclog, _RET_IP_); in xfs_log_force()
3232 if (iclog->ic_state == XLOG_STATE_DIRTY || in xfs_log_force()
3233 (iclog->ic_state == XLOG_STATE_ACTIVE && in xfs_log_force()
3234 atomic_read(&iclog->ic_refcnt) == 0 && iclog->ic_offset == 0)) { in xfs_log_force()
3243 iclog = iclog->ic_prev; in xfs_log_force()
3244 } else if (iclog->ic_state == XLOG_STATE_ACTIVE) { in xfs_log_force()
3245 if (atomic_read(&iclog->ic_refcnt) == 0) { in xfs_log_force()
3249 if (xlog_force_and_check_iclog(iclog, &completed)) in xfs_log_force()
3260 xlog_state_switch_iclogs(log, iclog, 0); in xfs_log_force()
3270 if (iclog->ic_state == XLOG_STATE_WANT_SYNC) in xfs_log_force()
3271 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA; in xfs_log_force()
3274 return xlog_wait_on_iclog(iclog); in xfs_log_force()
3305 struct xlog_in_core *iclog; in xlog_force_lsn() local
3312 iclog = log->l_iclog; in xlog_force_lsn()
3313 while (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) { in xlog_force_lsn()
3314 trace_xlog_iclog_force_lsn(iclog, _RET_IP_); in xlog_force_lsn()
3315 iclog = iclog->ic_next; in xlog_force_lsn()
3316 if (iclog == log->l_iclog) in xlog_force_lsn()
3320 switch (iclog->ic_state) { in xlog_force_lsn()
3338 (iclog->ic_prev->ic_state == XLOG_STATE_WANT_SYNC || in xlog_force_lsn()
3339 iclog->ic_prev->ic_state == XLOG_STATE_SYNCING)) { in xlog_force_lsn()
3340 xlog_wait(&iclog->ic_prev->ic_write_wait, in xlog_force_lsn()
3344 if (xlog_force_and_check_iclog(iclog, &completed)) in xlog_force_lsn()
3359 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA; in xlog_force_lsn()
3372 return xlog_wait_on_iclog(iclog); in xlog_force_lsn()
3610 struct xlog_in_core *iclog) in xlog_verify_tail_lsn() argument
3612 xfs_lsn_t tail_lsn = be64_to_cpu(iclog->ic_header.h_tail_lsn); in xlog_verify_tail_lsn()
3618 if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize)) in xlog_verify_tail_lsn()
3627 if (blocks < BTOBB(iclog->ic_offset) + 1) in xlog_verify_tail_lsn()
3650 struct xlog_in_core *iclog, in xlog_verify_iclog() argument
3673 if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) in xlog_verify_iclog()
3676 base_ptr = ptr = &iclog->ic_header; in xlog_verify_iclog()
3677 p = &iclog->ic_header; in xlog_verify_iclog()
3685 len = be32_to_cpu(iclog->ic_header.h_num_logops); in xlog_verify_iclog()
3686 base_ptr = ptr = iclog->ic_datap; in xlog_verify_iclog()
3688 xhdr = iclog->ic_data; in xlog_verify_iclog()
3698 idx = BTOBBT((void *)&ophead->oh_clientid - iclog->ic_datap); in xlog_verify_iclog()
3706 iclog->ic_header.h_cycle_data[idx]); in xlog_verify_iclog()
3722 idx = BTOBBT((void *)&ophead->oh_len - iclog->ic_datap); in xlog_verify_iclog()
3728 op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]); in xlog_verify_iclog()
3848 xlog_in_core_t *iclog; in xlog_iclogs_empty() local
3850 iclog = log->l_iclog; in xlog_iclogs_empty()
3855 if (iclog->ic_header.h_num_logops) in xlog_iclogs_empty()
3857 iclog = iclog->ic_next; in xlog_iclogs_empty()
3858 } while (iclog != log->l_iclog); in xlog_iclogs_empty()