Lines Matching defs:iclog

43 	struct xlog_in_core	*iclog);
50 struct xlog_in_core **iclog,
60 struct xlog_in_core *iclog,
69 struct xlog_in_core *iclog,
74 struct xlog_in_core *iclog);
509 * Run all the pending iclog callbacks and wake log force waiters and iclog
518 * while the iclog owner might still be preparing the iclog for IO submssion.
520 * again to process any callbacks that may have been added to that iclog.
526 struct xlog_in_core *iclog;
529 iclog = log->l_iclog;
531 if (atomic_read(&iclog->ic_refcnt)) {
532 /* Reference holder will re-run iclog callbacks. */
535 list_splice_init(&iclog->ic_callbacks, &cb_list);
541 wake_up_all(&iclog->ic_write_wait);
542 wake_up_all(&iclog->ic_force_wait);
543 } while ((iclog = iclog->ic_next) != log->l_iclog);
549 * Flush iclog to disk if this is the last reference to the given iclog and the
552 * If XLOG_ICL_NEED_FUA is already set on the iclog, we need to ensure that the
553 * log tail is updated correctly. NEED_FUA indicates that the iclog will be
555 * within the iclog. We need to ensure that the log tail does not move beyond
556 * the tail that the first commit record in the iclog ordered against, otherwise
558 * performed on this iclog.
560 * Hence if NEED_FUA is set and the current iclog tail lsn is empty, write the
561 * current tail into iclog. Once the iclog tail is set, future operations must
564 * the iclog will get zeroed on activation of the iclog after sync, so we
565 * always capture the tail lsn on the iclog on the first NEED_FUA release
566 * regardless of the number of active reference counts on this iclog.
571 struct xlog_in_core *iclog,
579 trace_xlog_iclog_release(iclog, _RET_IP_);
582 * of the tail LSN into the iclog so we guarantee that the log tail does
583 * not move between the first time we know that the iclog needs to be
586 if ((iclog->ic_state == XLOG_STATE_WANT_SYNC ||
587 (iclog->ic_flags & XLOG_ICL_NEED_FUA)) &&
588 !iclog->ic_header.h_tail_lsn) {
590 iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
593 last_ref = atomic_dec_and_test(&iclog->ic_refcnt);
597 * If there are no more references to this iclog, process the
598 * pending iclog callbacks that were waiting on the release of
599 * this iclog.
609 if (iclog->ic_state != XLOG_STATE_WANT_SYNC) {
610 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
614 iclog->ic_state = XLOG_STATE_SYNCING;
615 xlog_verify_tail_lsn(log, iclog);
616 trace_xlog_iclog_syncing(iclog, _RET_IP_);
619 xlog_sync(log, iclog, ticket);
843 * Flush out the iclog to disk ensuring that device caches are flushed and
844 * the iclog hits stable storage before any completion waiters are woken.
848 struct xlog_in_core *iclog)
850 atomic_inc(&iclog->ic_refcnt);
851 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
852 if (iclog->ic_state == XLOG_STATE_ACTIVE)
853 xlog_state_switch_iclogs(iclog->ic_log, iclog, 0);
854 return xlog_state_release_iclog(iclog->ic_log, iclog, NULL);
865 struct xlog_in_core *iclog = log->l_iclog;
868 down(&iclog->ic_sema);
869 up(&iclog->ic_sema);
870 iclog = iclog->ic_next;
875 * Wait for the iclog and all prior iclogs to be written disk as required by the
876 * log force state machine. Waiting on ic_force_wait ensures iclog completions
882 struct xlog_in_core *iclog)
883 __releases(iclog->ic_log->l_icloglock)
885 struct xlog *log = iclog->ic_log;
887 trace_xlog_iclog_wait_on(iclog, _RET_IP_);
889 iclog->ic_state != XLOG_STATE_ACTIVE &&
890 iclog->ic_state != XLOG_STATE_DIRTY) {
892 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
956 struct xlog_in_core *iclog;
974 iclog = log->l_iclog;
975 error = xlog_force_iclog(iclog);
976 xlog_wait_on_iclog(iclog);
988 struct xlog_in_core *iclog = log->l_iclog;
991 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
992 ASSERT(iclog->ic_offset == 0);
993 } while ((iclog = iclog->ic_next) != log->l_iclog);
1100 * If shutdown has come from iclog IO context, the log
1102 * for the iclog to complete shutdown processing before we
1224 * the log is idle and suitable for covering. The CIL, iclog buffers and AIL
1276 * We may be holding the log iclog lock upon entering this routine.
1378 struct xlog_in_core *iclog =
1380 struct xlog *log = iclog->ic_log;
1383 error = blk_status_to_errno(iclog->ic_bio.bi_status);
1386 if (iclog->ic_fail_crc)
1398 xlog_state_done_syncing(iclog);
1399 bio_uninit(&iclog->ic_bio);
1403 * iclog after this, so an unmount waiting on this lock can now tear it
1404 * down safely. As such, it is unsafe to reference the iclog after the
1407 up(&iclog->ic_sema);
1526 xlog_in_core_t *iclog, *prev_iclog=NULL;
1597 * The amount of memory to allocate for the iclog structure is
1608 iclog = kzalloc(sizeof(*iclog) + bvec_size,
1610 if (!iclog)
1613 *iclogp = iclog;
1614 iclog->ic_prev = prev_iclog;
1615 prev_iclog = iclog;
1617 iclog->ic_data = kvzalloc(log->l_iclog_size,
1619 if (!iclog->ic_data)
1621 head = &iclog->ic_header;
1631 iclog->ic_size = log->l_iclog_size - log->l_iclog_hsize;
1632 iclog->ic_state = XLOG_STATE_ACTIVE;
1633 iclog->ic_log = log;
1634 atomic_set(&iclog->ic_refcnt, 0);
1635 INIT_LIST_HEAD(&iclog->ic_callbacks);
1636 iclog->ic_datap = (void *)iclog->ic_data + log->l_iclog_hsize;
1638 init_waitqueue_head(&iclog->ic_force_wait);
1639 init_waitqueue_head(&iclog->ic_write_wait);
1640 INIT_WORK(&iclog->ic_end_io_work, xlog_ioend_work);
1641 sema_init(&iclog->ic_sema, 1);
1643 iclogp = &iclog->ic_next;
1663 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) {
1664 prev_iclog = iclog->ic_next;
1665 kvfree(iclog->ic_data);
1666 kfree(iclog);
1765 struct xlog_in_core *iclog,
1769 int size = iclog->ic_offset + roundoff;
1773 cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn);
1775 dp = iclog->ic_datap;
1779 iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp;
1785 xlog_in_core_2_t *xhdr = iclog->ic_data;
1844 struct xlog_in_core *iclog = bio->bi_private;
1846 queue_work(iclog->ic_log->l_ioend_workqueue,
1847 &iclog->ic_end_io_work);
1874 struct xlog_in_core *iclog,
1879 trace_xlog_iclog_write(iclog, _RET_IP_);
1889 down(&iclog->ic_sema);
1907 bio_init(&iclog->ic_bio, log->l_targ->bt_bdev, iclog->ic_bvec,
1910 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno;
1911 iclog->ic_bio.bi_end_io = xlog_bio_end_io;
1912 iclog->ic_bio.bi_private = iclog;
1914 if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH) {
1915 iclog->ic_bio.bi_opf |= REQ_PREFLUSH;
1919 * by the LSN in this iclog is on stable storage. This is slow,
1931 if (iclog->ic_flags & XLOG_ICL_NEED_FUA)
1932 iclog->ic_bio.bi_opf |= REQ_FUA;
1934 iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA);
1936 if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count))
1939 if (is_vmalloc_addr(iclog->ic_data))
1940 flush_kernel_vmap_range(iclog->ic_data, count);
1949 split = bio_split(&iclog->ic_bio, log->l_logBBsize - bno,
1951 bio_chain(split, &iclog->ic_bio);
1955 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart;
1958 submit_bio(&iclog->ic_bio);
1963 xlog_state_done_syncing(iclog);
1964 up(&iclog->ic_sema);
1968 * We need to bump cycle number for the part of the iclog that is
1994 struct xlog_in_core *iclog,
2000 count_init = log->l_iclog_hsize + iclog->ic_offset;
2011 * Flush out the in-core log (iclog) to the on-disk log in an asynchronous
2012 * fashion. Previously, we should have moved the current iclog
2013 * ptr in the log to point to the next available iclog. This allows further
2014 * write to continue while this code syncs out an iclog ready to go.
2024 * This routine is single threaded on the iclog. No other thread can be in
2025 * this routine with the same iclog. Changing contents of iclog can there-
2036 struct xlog_in_core *iclog,
2044 ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
2045 trace_xlog_iclog_sync(iclog, _RET_IP_);
2047 count = xlog_calc_iclog_size(log, iclog, &roundoff);
2062 xlog_pack_data(log, iclog, roundoff);
2065 size = iclog->ic_offset;
2068 iclog->ic_header.h_len = cpu_to_be32(size);
2073 bno = BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn));
2077 xlog_split_iclog(log, &iclog->ic_header, bno, count);
2080 iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header,
2081 iclog->ic_datap, size);
2091 iclog->ic_header.h_crc &= cpu_to_le32(0xAAAAAAAA);
2092 iclog->ic_fail_crc = true;
2095 be64_to_cpu(iclog->ic_header.h_lsn));
2098 xlog_verify_iclog(log, iclog, count);
2099 xlog_write_iclog(log, iclog, bno, count);
2109 xlog_in_core_t *iclog, *next_iclog;
2113 * Destroy the CIL after waiting for iclog IO completion because an
2114 * iclog EIO error will try to shut down the log, which accesses the
2119 iclog = log->l_iclog;
2121 next_iclog = iclog->ic_next;
2122 kvfree(iclog->ic_data);
2123 kfree(iclog);
2124 iclog = next_iclog;
2138 struct xlog_in_core *iclog,
2144 be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt);
2145 iclog->ic_offset += copy_bytes;
2216 struct xlog_in_core *iclog,
2224 ASSERT(*log_offset < iclog->ic_log->l_iclog_size);
2228 memcpy(iclog->ic_datap + *log_offset, data, write_len);
2236 * Write log vectors into a single iclog which is guaranteed by the caller
2243 struct xlog_in_core *iclog,
2251 ASSERT(*log_offset + *len <= iclog->ic_size ||
2252 iclog->ic_state == XLOG_STATE_WANT_SYNC);
2263 xlog_write_iovec(iclog, log_offset, reg->i_addr,
2277 struct xlog_in_core *iclog = *iclogp;
2278 struct xlog *log = iclog->ic_log;
2282 ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC);
2283 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
2284 error = xlog_state_release_iclog(log, iclog, ticket);
2289 error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
2295 *iclogp = iclog;
2300 * Write log vectors into a single iclog which is smaller than the current chain
2303 * wholly fit in the iclog.
2315 struct xlog_in_core *iclog = *iclogp;
2321 /* walk the logvec, copying until we run out of space in the iclog */
2335 * opheader, then we need to start afresh with a new iclog.
2337 if (iclog->ic_size - *log_offset <=
2340 &iclog, log_offset, *len, record_cnt,
2347 rlen = min_t(uint32_t, reg->i_len, iclog->ic_size - *log_offset);
2354 xlog_write_iovec(iclog, log_offset, reg->i_addr,
2363 * multiple iclogs so we loop here. First we release the iclog
2364 * we currently have, then we get a new iclog and add a new
2366 * we either complete the iovec or fill the iclog. If we
2368 * back to the top of the outer loop. if we fill the iclog, we
2372 * space in an iclog and hence requiring us to release the iclog
2376 * iclog, hence we cannot just terminate the loop at the end
2378 * space left in the current iclog, and check for the end of the
2379 * continuation after getting a new iclog.
2384 * space we need in the new iclog by adding that size
2391 &iclog, log_offset,
2397 ophdr = iclog->ic_datap + *log_offset;
2408 * If rlen fits in the iclog, then end the region
2413 if (rlen <= iclog->ic_size - *log_offset)
2418 rlen = min_t(uint32_t, rlen, iclog->ic_size - *log_offset);
2421 xlog_write_iovec(iclog, log_offset,
2432 *iclogp = iclog;
2446 * 3. While writing to this iclog
2447 * A. Reserve as much space in this iclog as can get
2452 * 3. Find out if we can fit entire region into this iclog
2455 * 6. If partial copy, release iclog; otherwise, continue
2456 * copying more regions into current iclog
2458 * 5. Release iclog for potential flush to on-disk log.
2471 * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog,
2485 struct xlog_in_core *iclog = NULL;
2499 error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
2504 ASSERT(log_offset <= iclog->ic_size - 1);
2507 * If we have a context pointer, pass it the first iclog we are
2508 * writing to so it can record state needed for iclog write
2512 xlog_cil_set_ctx_write_state(ctx, iclog);
2516 * If the entire log vec does not fit in the iclog, punt it to
2520 lv->lv_bytes > iclog->ic_size - log_offset) {
2521 error = xlog_write_partial(lv, ticket, &iclog,
2526 * We have no iclog to release, so just return
2532 xlog_write_full(lv, ticket, iclog, &log_offset,
2540 * the current iclog, and hence it will already have the space used by
2542 * iclog with the number of bytes written here.
2545 xlog_state_finish_copy(log, iclog, record_cnt, 0);
2546 error = xlog_state_release_iclog(log, iclog, ticket);
2554 struct xlog_in_core *iclog,
2557 ASSERT(list_empty_careful(&iclog->ic_callbacks));
2558 trace_xlog_iclog_activate(iclog, _RET_IP_);
2561 * If the number of ops in this iclog indicate it just contains the
2567 iclog->ic_header.h_num_logops == cpu_to_be32(XLOG_COVER_OPS)) {
2577 iclog->ic_state = XLOG_STATE_ACTIVE;
2578 iclog->ic_offset = 0;
2579 iclog->ic_header.h_num_logops = 0;
2580 memset(iclog->ic_header.h_cycle_data, 0,
2581 sizeof(iclog->ic_header.h_cycle_data));
2582 iclog->ic_header.h_lsn = 0;
2583 iclog->ic_header.h_tail_lsn = 0;
2588 * ACTIVE after iclog I/O has completed.
2595 struct xlog_in_core *iclog = log->l_iclog;
2598 if (iclog->ic_state == XLOG_STATE_DIRTY)
2599 xlog_state_activate_iclog(iclog, iclogs_changed);
2602 * an iclog doesn't become ACTIVE beyond one that is SYNCING.
2604 else if (iclog->ic_state != XLOG_STATE_ACTIVE)
2606 } while ((iclog = iclog->ic_next) != log->l_iclog);
2667 struct xlog_in_core *iclog = log->l_iclog;
2671 if (iclog->ic_state == XLOG_STATE_ACTIVE ||
2672 iclog->ic_state == XLOG_STATE_DIRTY)
2675 lsn = be64_to_cpu(iclog->ic_header.h_lsn);
2678 } while ((iclog = iclog->ic_next) != log->l_iclog);
2684 * Completion of a iclog IO does not imply that a transaction has completed, as
2689 * should only update the last_sync_lsn if this iclog contains transaction
2708 struct xlog_in_core *iclog,
2711 trace_xlog_iclog_callback(iclog, _RET_IP_);
2712 iclog->ic_state = XLOG_STATE_CALLBACK;
2717 if (list_empty_careful(&iclog->ic_callbacks))
2726 * iclog. The caller will need to run callbacks if the iclog is returned in the
2732 struct xlog_in_core *iclog)
2737 switch (iclog->ic_state) {
2746 * Now that we have an iclog that is in the DONE_SYNC state, do
2748 * If this is not the lowest lsn iclog, then we will leave it
2751 header_lsn = be64_to_cpu(iclog->ic_header.h_lsn);
2755 xlog_state_set_callback(log, iclog, header_lsn);
2759 * Can only perform callbacks in order. Since this iclog is not
2781 struct xlog_in_core *iclog = first_iclog;
2787 if (xlog_state_iodone_process_iclog(log, iclog))
2789 if (iclog->ic_state != XLOG_STATE_CALLBACK) {
2790 iclog = iclog->ic_next;
2793 list_splice_init(&iclog->ic_callbacks, &cb_list);
2796 trace_xlog_iclog_callbacks_start(iclog, _RET_IP_);
2798 trace_xlog_iclog_callbacks_done(iclog, _RET_IP_);
2802 xlog_state_clean_iclog(log, iclog);
2803 iclog = iclog->ic_next;
2804 } while (iclog != first_iclog);
2811 * Loop running iclog completion callbacks until there are no more iclogs in a
2843 * Finish transitioning this iclog to the dirty state.
2850 struct xlog_in_core *iclog)
2852 struct xlog *log = iclog->ic_log;
2855 ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
2856 trace_xlog_iclog_sync_done(iclog, _RET_IP_);
2864 ASSERT(iclog->ic_state == XLOG_STATE_SYNCING);
2865 iclog->ic_state = XLOG_STATE_DONE_SYNC;
2870 * iclog buffer, we wake them all, one will get to do the
2873 wake_up_all(&iclog->ic_write_wait);
2880 * sleep. We wait on the flush queue on the head iclog as that should be
2881 * the first iclog to complete flushing. Hence if all iclogs are syncing,
2885 * out-of-order even when an iclog past the head is free.
2906 xlog_in_core_t *iclog;
2915 iclog = log->l_iclog;
2916 if (iclog->ic_state != XLOG_STATE_ACTIVE) {
2924 head = &iclog->ic_header;
2926 atomic_inc(&iclog->ic_refcnt); /* prevents sync */
2927 log_offset = iclog->ic_offset;
2929 trace_xlog_iclog_get_space(iclog, _RET_IP_);
2931 /* On the 1st write to an iclog, figure out lsn. This works
2953 if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) {
2956 xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2959 * If we are the only one writing to this iclog, sync it to
2963 * reference to the iclog.
2965 if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1))
2966 error = xlog_state_release_iclog(log, iclog, ticket);
2974 * of this iclog? Or must we continue a write on the next iclog and
2975 * mark this iclog as completely taken? In the case where we switch
2976 * iclogs (to mark it taken), this particular iclog will release/sync
2979 if (len <= iclog->ic_size - iclog->ic_offset)
2980 iclog->ic_offset += len;
2982 xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2983 *iclogp = iclog;
2985 ASSERT(iclog->ic_offset <= iclog->ic_size);
3077 * This routine will mark the current iclog in the ring as WANT_SYNC and move
3078 * the current iclog pointer to the next iclog in the ring.
3083 struct xlog_in_core *iclog,
3086 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
3088 trace_xlog_iclog_switch(iclog, _RET_IP_);
3091 eventual_size = iclog->ic_offset;
3092 iclog->ic_state = XLOG_STATE_WANT_SYNC;
3093 iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block);
3121 ASSERT(iclog == log->l_iclog);
3122 log->l_iclog = iclog->ic_next;
3126 * Force the iclog to disk and check if the iclog has been completed before
3130 * unnecessary wait on the iclog.
3134 struct xlog_in_core *iclog,
3137 xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header.h_lsn);
3141 error = xlog_force_iclog(iclog);
3146 * If the iclog has already been completed and reused the header LSN
3149 if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn)
3165 * 1. the current iclog is active and has no data; the previous iclog
3167 * 2. the current iclog is drity, and the previous iclog is in the
3172 * 1. the current iclog is not in the active nor dirty state.
3173 * 2. the current iclog dirty, and the previous iclog is not in the
3175 * 3. the current iclog is active, and there is another thread writing
3176 * to this particular iclog.
3177 * 4. a) the current iclog is active and has no other writers
3178 * b) when we return from flushing out this iclog, it is still
3187 struct xlog_in_core *iclog;
3198 iclog = log->l_iclog;
3199 trace_xlog_iclog_force(iclog, _RET_IP_);
3201 if (iclog->ic_state == XLOG_STATE_DIRTY ||
3202 (iclog->ic_state == XLOG_STATE_ACTIVE &&
3203 atomic_read(&iclog->ic_refcnt) == 0 && iclog->ic_offset == 0)) {
3206 * look at the previous iclog.
3208 * If the previous iclog is active or dirty we are done. There
3210 * previous iclog and go to sleep.
3212 iclog = iclog->ic_prev;
3213 } else if (iclog->ic_state == XLOG_STATE_ACTIVE) {
3214 if (atomic_read(&iclog->ic_refcnt) == 0) {
3215 /* We have exclusive access to this iclog. */
3218 if (xlog_force_and_check_iclog(iclog, &completed))
3225 * Someone else is still writing to this iclog, so we
3226 * need to ensure that when they release the iclog it
3229 xlog_state_switch_iclogs(log, iclog, 0);
3234 * The iclog we are about to wait on may contain the checkpoint pushed
3237 * are flushed when this iclog is written.
3239 if (iclog->ic_state == XLOG_STATE_WANT_SYNC)
3240 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
3243 return xlog_wait_on_iclog(iclog);
3255 * If an iclog with that lsn can be found:
3274 struct xlog_in_core *iclog;
3281 iclog = log->l_iclog;
3282 while (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) {
3283 trace_xlog_iclog_force_lsn(iclog, _RET_IP_);
3284 iclog = iclog->ic_next;
3285 if (iclog == log->l_iclog)
3289 switch (iclog->ic_state) {
3293 * first time we've looked at the correct iclog buf) and the
3297 * transactions into this iclog before we close it down.
3307 (iclog->ic_prev->ic_state == XLOG_STATE_WANT_SYNC ||
3308 iclog->ic_prev->ic_state == XLOG_STATE_SYNCING)) {
3309 xlog_wait(&iclog->ic_prev->ic_write_wait,
3313 if (xlog_force_and_check_iclog(iclog, &completed))
3322 * This iclog may contain the checkpoint pushed by the
3326 * when this iclog is written.
3328 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
3341 return xlog_wait_on_iclog(iclog);
3355 * the iclog that needs to be flushed to stable storage. If the caller needs
3356 * a synchronous log force, we will wait on the iclog with the LSN returned by
3459 * for LR headers - the space for data in an iclog is the size minus
3460 * the space used for the headers. If we use the iclog size, then we
3468 * transaction is the first in an iclog and hence has the LR headers
3469 * accounted to it, then the remaining space in the iclog is
3471 * than the iclog, it will be the only thing in that iclog.
3580 struct xlog_in_core *iclog)
3582 xfs_lsn_t tail_lsn = be64_to_cpu(iclog->ic_header.h_tail_lsn);
3588 if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize))
3597 if (blocks < BTOBB(iclog->ic_offset) + 1)
3603 * Perform a number of checks on the iclog before writing to disk.
3620 struct xlog_in_core *iclog,
3632 /* check validity of iclog pointers */
3639 xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__);
3643 if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
3646 base_ptr = ptr = &iclog->ic_header;
3647 p = &iclog->ic_header;
3655 len = be32_to_cpu(iclog->ic_header.h_num_logops);
3656 base_ptr = ptr = iclog->ic_datap;
3658 xhdr = iclog->ic_data;
3668 idx = BTOBBT((void *)&ophead->oh_clientid - iclog->ic_datap);
3676 iclog->ic_header.h_cycle_data[idx]);
3692 idx = BTOBBT((void *)&ophead->oh_len - iclog->ic_datap);
3698 op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]);
3818 xlog_in_core_t *iclog;
3820 iclog = log->l_iclog;
3825 if (iclog->ic_header.h_num_logops)
3827 iclog = iclog->ic_next;
3828 } while (iclog != log->l_iclog);