Lines Matching refs:log

34  * In core log state
37 XLOG_STATE_ACTIVE, /* Current IC log being written to */
39 XLOG_STATE_SYNCING, /* This IC log is syncing */
42 XLOG_STATE_DIRTY, /* Dirty IC log, not ready for ACTIVE status */
54 * In core log flags
75 * log write such that no allocation transactions will be re-done during
77 * log write.
79 * These states are used to insert dummy log entries to cover
92 * in the log record header needs to point beyond the last possible
105 * when the log becomes idle.
109 * on disk log with no other transactions.
114 * 1.) In xfs_sync, when we detect an idle log and are in NEED or NEED2.
118 * 2.) When we finish writing the on-disk log (xlog_state_clean_log).
134 * one file space allocation. When this happens, the log recovery
161 * - A log record header is 512 bytes. There is plenty of room to grow the
167 * - ic_log is a pointer back to the global log structure.
168 * - ic_size is the full size of the log buffer, minus the cycle headers.
170 * - ic_refcnt is bumped when someone is writing to the log.
229 struct list_head log_items; /* log items in chkpt */
256 * This structure is used to track log items that have been committed but not
257 * yet written into the log. It is used only when the delayed logging mount
295 * The amount of log space we allow the CIL to aggregate is difficult to size.
297 * log space effectively, that it is large enough to capture sufficient
298 * relogging to reduce log buffer IO significantly, but it is not too large for
299 * the log or induces too much latency when writing out through the iclogs. We
303 * Every log buffer we write out during a push needs a header reserved, which
305 * at least 512 bytes per 32k of log space just for the LR headers. That means
310 * limit space consumed in the log rather than by the number of objects being
313 * Further, use of static reservations through the log grant mechanism is
316 * can block on log pushes. Hence if we have to regrant log space during a log
330 * Recovery imposes a rule that no transaction exceed half the log, so we are
331 * limited by that. Furthermore, the log transaction reservation subsystem
332 * tries to keep 25% of the log free, so we need to keep below that limit or we
333 * risk running out of free log space to start any new transactions.
338 * to grow to a substantial fraction of the log, then we may be pinning hundreds
349 * defined to be 12.5% of the log space - half the 25% push threshold of the
377 #define XLOG_CIL_SPACE_LIMIT(log) \
378 min_t(int, (log)->l_logsize >> 3, BBTOB(XLOG_TOTAL_REC_SHIFT(log)) << 4)
380 #define XLOG_CIL_BLOCKING_SPACE_LIMIT(log) \
381 (XLOG_CIL_SPACE_LIMIT(log) * 2)
402 struct xfs_ail *l_ailp; /* AIL log is working with */
403 struct xfs_cil *l_cilp; /* CIL log is working with */
404 struct xfs_buftarg *l_targ; /* buftarg of log */
410 struct list_head r_dfops; /* recovered log intent items */
414 int l_iclog_size; /* size of log in bytes */
416 xfs_daddr_t l_logBBstart; /* start block of log */
417 int l_logsize; /* size of log in bytes */
418 int l_logBBsize; /* size of log in BB chunks */
424 * log entries" */
425 xlog_in_core_t *l_iclog; /* head log queue */
427 int l_curr_cycle; /* Cycle number of log writes */
430 int l_curr_block; /* current logical log block */
431 int l_prev_block; /* previous logical log block */
449 /* log recovery lsn tracking (for buffer submission */
454 /* Users of log incompat features should take a read lock. */
462 #define XLOG_RECOVERY_NEEDED 1 /* log was recovered */
463 #define XLOG_IO_ERROR 2 /* log hit an I/O error, and being
465 #define XLOG_TAIL_WARN 3 /* log tail verify warning issued */
468 xlog_recovery_needed(struct xlog *log)
470 return test_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate);
474 xlog_in_recovery(struct xlog *log)
476 return test_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
480 xlog_is_shutdown(struct xlog *log)
482 return test_bit(XLOG_IO_ERROR, &log->l_opstate);
486 * Wait until the xlog_force_shutdown() has marked the log as shut down
491 struct xlog *log)
493 wait_var_event(&log->l_opstate, xlog_is_shutdown(log));
499 struct xlog *log);
502 struct xlog *log);
506 extern __le32 xlog_cksum(struct xlog *log, struct xlog_rec_header *rhead,
510 struct xlog_ticket *xlog_ticket_alloc(struct xlog *log, int unit_bytes,
515 int xlog_write(struct xlog *log, struct xfs_cil_ctx *ctx,
518 void xfs_log_ticket_ungrant(struct xlog *log, struct xlog_ticket *ticket);
519 void xfs_log_ticket_regrant(struct xlog *log, struct xlog_ticket *ticket);
521 void xlog_state_switch_iclogs(struct xlog *log, struct xlog_in_core *iclog,
523 int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog,
584 int xlog_cil_init(struct xlog *log);
585 void xlog_cil_init_post_recovery(struct xlog *log);
586 void xlog_cil_destroy(struct xlog *log);
587 bool xlog_cil_empty(struct xlog *log);
588 void xlog_cil_commit(struct xlog *log, struct xfs_trans *tp,
597 void xlog_cil_flush(struct xlog *log);
598 xfs_lsn_t xlog_cil_force_seq(struct xlog *log, xfs_csn_t sequence);
601 xlog_cil_force(struct xlog *log)
603 xlog_cil_force_seq(log, log->l_cilp->xc_current_sequence);
609 * log code.
630 * means that the next log record that includes this metadata could have a
631 * smaller LSN. In turn, this means that the modification in the log would not
636 struct xlog *log,
655 cur_cycle = READ_ONCE(log->l_curr_cycle);
657 cur_block = READ_ONCE(log->l_curr_block);
663 * above raced with a wrap to the next log cycle. Grab the lock
666 spin_lock(&log->l_icloglock);
667 cur_cycle = log->l_curr_cycle;
668 cur_block = log->l_curr_block;
669 spin_unlock(&log->l_icloglock);