Lines Matching refs:iclog
43 struct xlog_in_core *iclog);
48 struct xlog_in_core **iclog,
55 struct xlog_in_core *iclog,
64 struct xlog_in_core *iclog);
76 struct xlog_in_core *iclog,
81 struct xlog_in_core *iclog,
488 struct xlog_in_core *iclog) in __xlog_state_release_iclog() argument
492 if (iclog->ic_state == XLOG_STATE_WANT_SYNC) { in __xlog_state_release_iclog()
496 iclog->ic_state = XLOG_STATE_SYNCING; in __xlog_state_release_iclog()
497 iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn); in __xlog_state_release_iclog()
498 xlog_verify_tail_lsn(log, iclog, tail_lsn); in __xlog_state_release_iclog()
503 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); in __xlog_state_release_iclog()
514 struct xlog_in_core *iclog) in xlog_state_release_iclog() argument
518 if (iclog->ic_state == XLOG_STATE_IOERROR) in xlog_state_release_iclog()
521 if (atomic_dec_and_test(&iclog->ic_refcnt) && in xlog_state_release_iclog()
522 __xlog_state_release_iclog(log, iclog)) { in xlog_state_release_iclog()
524 xlog_sync(log, iclog); in xlog_state_release_iclog()
533 struct xlog_in_core *iclog) in xfs_log_release_iclog() argument
535 struct xlog *log = iclog->ic_log; in xfs_log_release_iclog()
538 if (atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock)) { in xfs_log_release_iclog()
539 if (iclog->ic_state != XLOG_STATE_IOERROR) in xfs_log_release_iclog()
540 sync = __xlog_state_release_iclog(log, iclog); in xfs_log_release_iclog()
545 xlog_sync(log, iclog); in xfs_log_release_iclog()
792 struct xlog_in_core *iclog) in xlog_wait_on_iclog() argument
793 __releases(iclog->ic_log->l_icloglock) in xlog_wait_on_iclog()
795 struct xlog *log = iclog->ic_log; in xlog_wait_on_iclog()
798 iclog->ic_state != XLOG_STATE_ACTIVE && in xlog_wait_on_iclog()
799 iclog->ic_state != XLOG_STATE_DIRTY) { in xlog_wait_on_iclog()
801 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock); in xlog_wait_on_iclog()
850 struct xlog_in_core *iclog; in xlog_unmount_write() local
870 iclog = log->l_iclog; in xlog_unmount_write()
871 atomic_inc(&iclog->ic_refcnt); in xlog_unmount_write()
872 if (iclog->ic_state == XLOG_STATE_ACTIVE) in xlog_unmount_write()
873 xlog_state_switch_iclogs(log, iclog, 0); in xlog_unmount_write()
875 ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC || in xlog_unmount_write()
876 iclog->ic_state == XLOG_STATE_IOERROR); in xlog_unmount_write()
877 error = xlog_state_release_iclog(log, iclog); in xlog_unmount_write()
878 xlog_wait_on_iclog(iclog); in xlog_unmount_write()
890 struct xlog_in_core *iclog = log->l_iclog; in xfs_log_unmount_verify_iclog() local
893 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); in xfs_log_unmount_verify_iclog()
894 ASSERT(iclog->ic_offset == 0); in xfs_log_unmount_verify_iclog()
895 } while ((iclog = iclog->ic_next) != log->l_iclog); in xfs_log_unmount_verify_iclog()
1194 struct xlog_in_core *iclog = in xlog_ioend_work() local
1196 struct xlog *log = iclog->ic_log; in xlog_ioend_work()
1199 error = blk_status_to_errno(iclog->ic_bio.bi_status); in xlog_ioend_work()
1202 if (iclog->ic_fail_crc) in xlog_ioend_work()
1214 xlog_state_done_syncing(iclog); in xlog_ioend_work()
1215 bio_uninit(&iclog->ic_bio); in xlog_ioend_work()
1223 up(&iclog->ic_sema); in xlog_ioend_work()
1315 xlog_in_core_t *iclog, *prev_iclog=NULL; in xlog_alloc_log() local
1390 iclog = kmem_zalloc(sizeof(*iclog) + bvec_size, KM_MAYFAIL); in xlog_alloc_log()
1391 if (!iclog) in xlog_alloc_log()
1394 *iclogp = iclog; in xlog_alloc_log()
1395 iclog->ic_prev = prev_iclog; in xlog_alloc_log()
1396 prev_iclog = iclog; in xlog_alloc_log()
1398 iclog->ic_data = kmem_alloc_io(log->l_iclog_size, align_mask, in xlog_alloc_log()
1400 if (!iclog->ic_data) in xlog_alloc_log()
1403 log->l_iclog_bak[i] = &iclog->ic_header; in xlog_alloc_log()
1405 head = &iclog->ic_header; in xlog_alloc_log()
1415 iclog->ic_size = log->l_iclog_size - log->l_iclog_hsize; in xlog_alloc_log()
1416 iclog->ic_state = XLOG_STATE_ACTIVE; in xlog_alloc_log()
1417 iclog->ic_log = log; in xlog_alloc_log()
1418 atomic_set(&iclog->ic_refcnt, 0); in xlog_alloc_log()
1419 spin_lock_init(&iclog->ic_callback_lock); in xlog_alloc_log()
1420 INIT_LIST_HEAD(&iclog->ic_callbacks); in xlog_alloc_log()
1421 iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize; in xlog_alloc_log()
1423 init_waitqueue_head(&iclog->ic_force_wait); in xlog_alloc_log()
1424 init_waitqueue_head(&iclog->ic_write_wait); in xlog_alloc_log()
1425 INIT_WORK(&iclog->ic_end_io_work, xlog_ioend_work); in xlog_alloc_log()
1426 sema_init(&iclog->ic_sema, 1); in xlog_alloc_log()
1428 iclogp = &iclog->ic_next; in xlog_alloc_log()
1447 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) { in xlog_alloc_log()
1448 prev_iclog = iclog->ic_next; in xlog_alloc_log()
1449 kmem_free(iclog->ic_data); in xlog_alloc_log()
1450 kmem_free(iclog); in xlog_alloc_log()
1468 struct xlog_in_core **iclog, in xlog_commit_record() argument
1485 error = xlog_write(log, &vec, ticket, lsn, iclog, XLOG_COMMIT_TRANS, in xlog_commit_record()
1581 struct xlog_in_core *iclog, in xlog_pack_data() argument
1585 int size = iclog->ic_offset + roundoff; in xlog_pack_data()
1589 cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn); in xlog_pack_data()
1591 dp = iclog->ic_datap; in xlog_pack_data()
1595 iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp; in xlog_pack_data()
1601 xlog_in_core_2_t *xhdr = iclog->ic_data; in xlog_pack_data()
1660 struct xlog_in_core *iclog = bio->bi_private; in xlog_bio_end_io() local
1662 queue_work(iclog->ic_log->l_ioend_workqueue, in xlog_bio_end_io()
1663 &iclog->ic_end_io_work); in xlog_bio_end_io()
1690 struct xlog_in_core *iclog, in xlog_write_iclog() argument
1705 down(&iclog->ic_sema); in xlog_write_iclog()
1706 if (unlikely(iclog->ic_state == XLOG_STATE_IOERROR)) { in xlog_write_iclog()
1714 xlog_state_done_syncing(iclog); in xlog_write_iclog()
1715 up(&iclog->ic_sema); in xlog_write_iclog()
1719 bio_init(&iclog->ic_bio, iclog->ic_bvec, howmany(count, PAGE_SIZE)); in xlog_write_iclog()
1720 bio_set_dev(&iclog->ic_bio, log->l_targ->bt_bdev); in xlog_write_iclog()
1721 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno; in xlog_write_iclog()
1722 iclog->ic_bio.bi_end_io = xlog_bio_end_io; in xlog_write_iclog()
1723 iclog->ic_bio.bi_private = iclog; in xlog_write_iclog()
1731 iclog->ic_bio.bi_opf = REQ_OP_WRITE | REQ_META | REQ_SYNC | in xlog_write_iclog()
1734 iclog->ic_bio.bi_opf |= REQ_PREFLUSH; in xlog_write_iclog()
1736 if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) { in xlog_write_iclog()
1740 if (is_vmalloc_addr(iclog->ic_data)) in xlog_write_iclog()
1741 flush_kernel_vmap_range(iclog->ic_data, count); in xlog_write_iclog()
1750 split = bio_split(&iclog->ic_bio, log->l_logBBsize - bno, in xlog_write_iclog()
1752 bio_chain(split, &iclog->ic_bio); in xlog_write_iclog()
1756 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart; in xlog_write_iclog()
1759 submit_bio(&iclog->ic_bio); in xlog_write_iclog()
1789 struct xlog_in_core *iclog, in xlog_calc_iclog_size() argument
1799 count_init = log->l_iclog_hsize + iclog->ic_offset; in xlog_calc_iclog_size()
1845 struct xlog_in_core *iclog) in xlog_sync() argument
1853 ASSERT(atomic_read(&iclog->ic_refcnt) == 0); in xlog_sync()
1855 count = xlog_calc_iclog_size(log, iclog, &roundoff); in xlog_sync()
1862 xlog_pack_data(log, iclog, roundoff); in xlog_sync()
1865 size = iclog->ic_offset; in xlog_sync()
1868 iclog->ic_header.h_len = cpu_to_be32(size); in xlog_sync()
1873 bno = BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn)); in xlog_sync()
1877 xlog_split_iclog(log, &iclog->ic_header, bno, count); in xlog_sync()
1882 iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header, in xlog_sync()
1883 iclog->ic_datap, size); in xlog_sync()
1893 iclog->ic_header.h_crc &= cpu_to_le32(0xAAAAAAAA); in xlog_sync()
1894 iclog->ic_fail_crc = true; in xlog_sync()
1897 be64_to_cpu(iclog->ic_header.h_lsn)); in xlog_sync()
1914 xlog_verify_iclog(log, iclog, count); in xlog_sync()
1915 xlog_write_iclog(log, iclog, bno, count, need_flush); in xlog_sync()
1925 xlog_in_core_t *iclog, *next_iclog; in xlog_dealloc_log() local
1934 iclog = log->l_iclog; in xlog_dealloc_log()
1936 down(&iclog->ic_sema); in xlog_dealloc_log()
1937 up(&iclog->ic_sema); in xlog_dealloc_log()
1938 iclog = iclog->ic_next; in xlog_dealloc_log()
1941 iclog = log->l_iclog; in xlog_dealloc_log()
1943 next_iclog = iclog->ic_next; in xlog_dealloc_log()
1944 kmem_free(iclog->ic_data); in xlog_dealloc_log()
1945 kmem_free(iclog); in xlog_dealloc_log()
1946 iclog = next_iclog; in xlog_dealloc_log()
1960 struct xlog_in_core *iclog, in xlog_state_finish_copy() argument
1966 be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt); in xlog_state_finish_copy()
1967 iclog->ic_offset += copy_bytes; in xlog_state_finish_copy()
2223 struct xlog_in_core *iclog, in xlog_write_copy_finish() argument
2240 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); in xlog_write_copy_finish()
2249 if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) { in xlog_write_copy_finish()
2252 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt); in xlog_write_copy_finish()
2256 if (iclog->ic_state == XLOG_STATE_ACTIVE) in xlog_write_copy_finish()
2257 xlog_state_switch_iclogs(log, iclog, 0); in xlog_write_copy_finish()
2259 ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC || in xlog_write_copy_finish()
2260 iclog->ic_state == XLOG_STATE_IOERROR); in xlog_write_copy_finish()
2265 *commit_iclog = iclog; in xlog_write_copy_finish()
2271 error = xlog_state_release_iclog(log, iclog); in xlog_write_copy_finish()
2326 struct xlog_in_core *iclog = NULL; in xlog_write() local
2358 error = xlog_state_get_iclog_space(log, len, &iclog, ticket, in xlog_write()
2363 ASSERT(log_offset <= iclog->ic_size - 1); in xlog_write()
2364 ptr = iclog->ic_datap + log_offset; in xlog_write()
2368 *start_lsn = be64_to_cpu(iclog->ic_header.h_lsn); in xlog_write()
2411 iclog->ic_size-log_offset, in xlog_write()
2441 error = xlog_write_copy_finish(log, iclog, flags, in xlog_write()
2483 xlog_state_finish_copy(log, iclog, record_cnt, data_cnt); in xlog_write()
2486 *commit_iclog = iclog; in xlog_write()
2488 error = xlog_state_release_iclog(log, iclog); in xlog_write()
2497 struct xlog_in_core *iclog, in xlog_state_activate_iclog() argument
2500 ASSERT(list_empty_careful(&iclog->ic_callbacks)); in xlog_state_activate_iclog()
2509 iclog->ic_header.h_num_logops == cpu_to_be32(XLOG_COVER_OPS)) { in xlog_state_activate_iclog()
2519 iclog->ic_state = XLOG_STATE_ACTIVE; in xlog_state_activate_iclog()
2520 iclog->ic_offset = 0; in xlog_state_activate_iclog()
2521 iclog->ic_header.h_num_logops = 0; in xlog_state_activate_iclog()
2522 memset(iclog->ic_header.h_cycle_data, 0, in xlog_state_activate_iclog()
2523 sizeof(iclog->ic_header.h_cycle_data)); in xlog_state_activate_iclog()
2524 iclog->ic_header.h_lsn = 0; in xlog_state_activate_iclog()
2536 struct xlog_in_core *iclog = log->l_iclog; in xlog_state_activate_iclogs() local
2539 if (iclog->ic_state == XLOG_STATE_DIRTY) in xlog_state_activate_iclogs()
2540 xlog_state_activate_iclog(iclog, iclogs_changed); in xlog_state_activate_iclogs()
2545 else if (iclog->ic_state != XLOG_STATE_ACTIVE) in xlog_state_activate_iclogs()
2547 } while ((iclog = iclog->ic_next) != log->l_iclog); in xlog_state_activate_iclogs()
2602 struct xlog_in_core *iclog = log->l_iclog; in xlog_get_lowest_lsn() local
2606 if (iclog->ic_state == XLOG_STATE_ACTIVE || in xlog_get_lowest_lsn()
2607 iclog->ic_state == XLOG_STATE_DIRTY) in xlog_get_lowest_lsn()
2610 lsn = be64_to_cpu(iclog->ic_header.h_lsn); in xlog_get_lowest_lsn()
2613 } while ((iclog = iclog->ic_next) != log->l_iclog); in xlog_get_lowest_lsn()
2643 struct xlog_in_core *iclog, in xlog_state_set_callback() argument
2646 iclog->ic_state = XLOG_STATE_CALLBACK; in xlog_state_set_callback()
2651 if (list_empty_careful(&iclog->ic_callbacks)) in xlog_state_set_callback()
2666 struct xlog_in_core *iclog, in xlog_state_iodone_process_iclog() argument
2672 switch (iclog->ic_state) { in xlog_state_iodone_process_iclog()
2695 header_lsn = be64_to_cpu(iclog->ic_header.h_lsn); in xlog_state_iodone_process_iclog()
2699 xlog_state_set_callback(log, iclog, header_lsn); in xlog_state_iodone_process_iclog()
2723 struct xlog_in_core *iclog) in xlog_state_do_iclog_callbacks() argument
2728 spin_lock(&iclog->ic_callback_lock); in xlog_state_do_iclog_callbacks()
2729 while (!list_empty(&iclog->ic_callbacks)) { in xlog_state_do_iclog_callbacks()
2732 list_splice_init(&iclog->ic_callbacks, &tmp); in xlog_state_do_iclog_callbacks()
2734 spin_unlock(&iclog->ic_callback_lock); in xlog_state_do_iclog_callbacks()
2736 spin_lock(&iclog->ic_callback_lock); in xlog_state_do_iclog_callbacks()
2745 spin_unlock(&iclog->ic_callback_lock); in xlog_state_do_iclog_callbacks()
2752 struct xlog_in_core *iclog; in xlog_state_do_callback() local
2770 iclog = log->l_iclog; in xlog_state_do_callback()
2776 if (xlog_state_iodone_process_iclog(log, iclog, in xlog_state_do_callback()
2780 if (iclog->ic_state != XLOG_STATE_CALLBACK && in xlog_state_do_callback()
2781 iclog->ic_state != XLOG_STATE_IOERROR) { in xlog_state_do_callback()
2782 iclog = iclog->ic_next; in xlog_state_do_callback()
2791 xlog_state_do_iclog_callbacks(log, iclog); in xlog_state_do_callback()
2793 wake_up_all(&iclog->ic_force_wait); in xlog_state_do_callback()
2795 xlog_state_clean_iclog(log, iclog); in xlog_state_do_callback()
2796 iclog = iclog->ic_next; in xlog_state_do_callback()
2797 } while (first_iclog != iclog); in xlog_state_do_callback()
2831 struct xlog_in_core *iclog) in xlog_state_done_syncing() argument
2833 struct xlog *log = iclog->ic_log; in xlog_state_done_syncing()
2836 ASSERT(atomic_read(&iclog->ic_refcnt) == 0); in xlog_state_done_syncing()
2844 ASSERT(iclog->ic_state == XLOG_STATE_SYNCING); in xlog_state_done_syncing()
2845 iclog->ic_state = XLOG_STATE_DONE_SYNC; in xlog_state_done_syncing()
2853 wake_up_all(&iclog->ic_write_wait); in xlog_state_done_syncing()
2887 xlog_in_core_t *iclog; in xlog_state_get_iclog_space() local
2896 iclog = log->l_iclog; in xlog_state_get_iclog_space()
2897 if (iclog->ic_state != XLOG_STATE_ACTIVE) { in xlog_state_get_iclog_space()
2905 head = &iclog->ic_header; in xlog_state_get_iclog_space()
2907 atomic_inc(&iclog->ic_refcnt); /* prevents sync */ in xlog_state_get_iclog_space()
2908 log_offset = iclog->ic_offset; in xlog_state_get_iclog_space()
2935 if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) { in xlog_state_get_iclog_space()
2938 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); in xlog_state_get_iclog_space()
2947 if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1)) in xlog_state_get_iclog_space()
2948 error = xlog_state_release_iclog(log, iclog); in xlog_state_get_iclog_space()
2961 if (len <= iclog->ic_size - iclog->ic_offset) { in xlog_state_get_iclog_space()
2963 iclog->ic_offset += len; in xlog_state_get_iclog_space()
2966 xlog_state_switch_iclogs(log, iclog, iclog->ic_size); in xlog_state_get_iclog_space()
2968 *iclogp = iclog; in xlog_state_get_iclog_space()
2970 ASSERT(iclog->ic_offset <= iclog->ic_size); in xlog_state_get_iclog_space()
3070 struct xlog_in_core *iclog, in xlog_state_switch_iclogs() argument
3073 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); in xlog_state_switch_iclogs()
3077 eventual_size = iclog->ic_offset; in xlog_state_switch_iclogs()
3078 iclog->ic_state = XLOG_STATE_WANT_SYNC; in xlog_state_switch_iclogs()
3079 iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block); in xlog_state_switch_iclogs()
3108 ASSERT(iclog == log->l_iclog); in xlog_state_switch_iclogs()
3109 log->l_iclog = iclog->ic_next; in xlog_state_switch_iclogs()
3145 struct xlog_in_core *iclog; in xfs_log_force() local
3154 iclog = log->l_iclog; in xfs_log_force()
3155 if (iclog->ic_state == XLOG_STATE_IOERROR) in xfs_log_force()
3158 if (iclog->ic_state == XLOG_STATE_DIRTY || in xfs_log_force()
3159 (iclog->ic_state == XLOG_STATE_ACTIVE && in xfs_log_force()
3160 atomic_read(&iclog->ic_refcnt) == 0 && iclog->ic_offset == 0)) { in xfs_log_force()
3169 iclog = iclog->ic_prev; in xfs_log_force()
3170 } else if (iclog->ic_state == XLOG_STATE_ACTIVE) { in xfs_log_force()
3171 if (atomic_read(&iclog->ic_refcnt) == 0) { in xfs_log_force()
3179 atomic_inc(&iclog->ic_refcnt); in xfs_log_force()
3180 lsn = be64_to_cpu(iclog->ic_header.h_lsn); in xfs_log_force()
3181 xlog_state_switch_iclogs(log, iclog, 0); in xfs_log_force()
3182 if (xlog_state_release_iclog(log, iclog)) in xfs_log_force()
3185 if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) in xfs_log_force()
3195 xlog_state_switch_iclogs(log, iclog, 0); in xfs_log_force()
3206 return xlog_wait_on_iclog(iclog); in xfs_log_force()
3223 struct xlog_in_core *iclog; in xlog_force_lsn() local
3226 iclog = log->l_iclog; in xlog_force_lsn()
3227 if (iclog->ic_state == XLOG_STATE_IOERROR) in xlog_force_lsn()
3230 while (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) { in xlog_force_lsn()
3231 iclog = iclog->ic_next; in xlog_force_lsn()
3232 if (iclog == log->l_iclog) in xlog_force_lsn()
3236 if (iclog->ic_state == XLOG_STATE_ACTIVE) { in xlog_force_lsn()
3253 (iclog->ic_prev->ic_state == XLOG_STATE_WANT_SYNC || in xlog_force_lsn()
3254 iclog->ic_prev->ic_state == XLOG_STATE_SYNCING)) { in xlog_force_lsn()
3255 xlog_wait(&iclog->ic_prev->ic_write_wait, in xlog_force_lsn()
3259 atomic_inc(&iclog->ic_refcnt); in xlog_force_lsn()
3260 xlog_state_switch_iclogs(log, iclog, 0); in xlog_force_lsn()
3261 if (xlog_state_release_iclog(log, iclog)) in xlog_force_lsn()
3268 return xlog_wait_on_iclog(iclog); in xlog_force_lsn()
3536 struct xlog_in_core *iclog, in xlog_verify_tail_lsn() argument
3544 if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize)) in xlog_verify_tail_lsn()
3553 if (blocks < BTOBB(iclog->ic_offset) + 1) in xlog_verify_tail_lsn()
3576 struct xlog_in_core *iclog, in xlog_verify_iclog() argument
3599 if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) in xlog_verify_iclog()
3602 base_ptr = ptr = &iclog->ic_header; in xlog_verify_iclog()
3603 p = &iclog->ic_header; in xlog_verify_iclog()
3611 len = be32_to_cpu(iclog->ic_header.h_num_logops); in xlog_verify_iclog()
3612 base_ptr = ptr = iclog->ic_datap; in xlog_verify_iclog()
3614 xhdr = iclog->ic_data; in xlog_verify_iclog()
3624 idx = BTOBBT((char *)&ophead->oh_clientid - iclog->ic_datap); in xlog_verify_iclog()
3632 iclog->ic_header.h_cycle_data[idx]); in xlog_verify_iclog()
3648 (uintptr_t)iclog->ic_datap); in xlog_verify_iclog()
3654 op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]); in xlog_verify_iclog()
3669 xlog_in_core_t *iclog, *ic; in xlog_state_ioerror() local
3671 iclog = log->l_iclog; in xlog_state_ioerror()
3672 if (iclog->ic_state != XLOG_STATE_IOERROR) { in xlog_state_ioerror()
3677 ic = iclog; in xlog_state_ioerror()
3681 } while (ic != iclog); in xlog_state_ioerror()
3793 xlog_in_core_t *iclog; in xlog_iclogs_empty() local
3795 iclog = log->l_iclog; in xlog_iclogs_empty()
3800 if (iclog->ic_header.h_num_logops) in xlog_iclogs_empty()
3802 iclog = iclog->ic_next; in xlog_iclogs_empty()
3803 } while (iclog != log->l_iclog); in xlog_iclogs_empty()