Lines Matching refs:log

35 	struct xlog		*log,
39 struct xlog *log);
45 struct xlog *log);
48 struct xlog *log,
55 struct xlog *log,
59 struct xlog *log,
65 struct xlog *log);
68 struct xlog *log,
73 struct xlog *log,
83 struct xlog *log);
93 * However, this padding does not get written into the log, and hence we have to
94 * track the space used by the log vectors separately to prevent log space hangs
95 * due to inaccurate accounting (i.e. a leak) of the used log space through the
99 * log. This prepends the data region we return to the caller to copy their data
146 struct xlog *log,
160 space += log->l_logsize;
172 struct xlog *log,
185 tmp = log->l_logsize - space;
222 struct xlog *log,
226 if (head == &log->l_write_head) {
239 struct xlog *log,
252 * limiting the target to the log head (l_last_sync_lsn) at the
253 * time. This may not reflect where the log head is now as the
257 * log that has moved rather than the tail. As the tail didn't
260 * pushed to the target defined by the old log head location, we
266 * target reflects both the current log tail and log head
270 need_bytes = xlog_ticket_reservation(log, head, tic);
273 xlog_grant_push_ail(log, need_bytes);
278 trace_xfs_log_grant_wake_up(log, tic);
288 struct xlog *log,
297 if (xlog_is_shutdown(log))
299 xlog_grant_push_ail(log, need_bytes);
304 XFS_STATS_INC(log->l_mp, xs_sleep_logspace);
306 trace_xfs_log_grant_sleep(log, tic);
308 trace_xfs_log_grant_wake(log, tic);
311 if (xlog_is_shutdown(log))
313 } while (xlog_space_left(log, &head->grant) < need_bytes);
323 * Atomically get the log space required for a log ticket.
341 struct xlog *log,
349 ASSERT(!xlog_in_recovery(log));
357 *need_bytes = xlog_ticket_reservation(log, head, tic);
358 free_bytes = xlog_space_left(log, &head->grant);
361 if (!xlog_grant_head_wake(log, head, &free_bytes) ||
363 error = xlog_grant_head_wait(log, head, tic,
369 error = xlog_grant_head_wait(log, head, tic, *need_bytes);
381 * Do not write to the log on norecovery mounts, if the data or log
383 * mounts allow internal writes for log recovery and unmount purposes,
405 struct xlog *log = mp->m_log;
409 if (xlog_is_shutdown(log))
417 * the log. Just add one to the existing tid so that we can see chains
418 * of rolling transactions in the log easily.
422 xlog_grant_push_ail(log, tic->t_unit_res);
428 trace_xfs_log_regrant(log, tic);
430 error = xlog_grant_head_check(log, &log->l_write_head, tic,
435 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
436 trace_xfs_log_regrant_exit(log, tic);
437 xlog_verify_grant_tail(log);
452 * Reserve log space and return a ticket corresponding to the reservation.
454 * Each reservation is going to reserve extra space for a log record header.
455 * When writes happen to the on-disk log, we don't subtract the length of the
456 * log record header from any reservation. By wasting space in each
467 struct xlog *log = mp->m_log;
472 if (xlog_is_shutdown(log))
478 tic = xlog_ticket_alloc(log, unit_bytes, cnt, permanent);
481 xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt
484 trace_xfs_log_reserve(log, tic);
486 error = xlog_grant_head_check(log, &log->l_reserve_head, tic,
491 xlog_grant_add_space(log, &log->l_reserve_head.grant, need_bytes);
492 xlog_grant_add_space(log, &log->l_write_head.grant, need_bytes);
493 trace_xfs_log_reserve_exit(log, tic);
494 xlog_verify_grant_tail(log);
509 * Run all the pending iclog callbacks and wake log force waiters and iclog
511 * don't care what order we process callbacks here because the log is shut down
524 struct xlog *log)
529 iclog = log->l_iclog;
536 spin_unlock(&log->l_icloglock);
540 spin_lock(&log->l_icloglock);
543 } while ((iclog = iclog->ic_next) != log->l_iclog);
545 wake_up_all(&log->l_flush_wait);
553 * log tail is updated correctly. NEED_FUA indicates that the iclog will be
555 * within the iclog. We need to ensure that the log tail does not move beyond
570 struct xlog *log,
577 lockdep_assert_held(&log->l_icloglock);
581 * Grabbing the current log tail needs to be atomic w.r.t. the writing
582 * of the tail LSN into the iclog so we guarantee that the log tail does
589 tail_lsn = xlog_assign_tail_lsn(log->l_mp);
595 if (xlog_is_shutdown(log)) {
602 xlog_state_shutdown_callbacks(log);
615 xlog_verify_tail_lsn(log, iclog);
618 spin_unlock(&log->l_icloglock);
619 xlog_sync(log, iclog, ticket);
620 spin_lock(&log->l_icloglock);
625 * Mount a log filesystem
628 * log_target - buftarg of on-disk log device
630 * num_bblocks - Number of BBSIZE blocks in on-disk log
641 struct xlog *log;
657 log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks);
658 if (IS_ERR(log)) {
659 error = PTR_ERR(log);
662 mp->m_log = log;
665 * Now that we have set up the log and it's internal geometry
666 * parameters, we can validate the given log space and drop a critical
667 * message via syslog if the log size is too small. A log that is too
668 * small can lead to unexpected situations in transaction log space
670 * the other log geometry constraints, so we don't have to check those
676 * way to grow the log (short of black magic surgery with xfs_db).
680 * filesystem with a log that is too small.
700 "Continuing onwards, but if log hangs are experienced then please report this message in the bug report.");
704 * Initialize the AIL now we have a log.
711 log->l_ailp = mp->m_ail;
714 * skip log recovery on a norecovery mount. pretend it all
718 error = xlog_recover(log);
720 xfs_warn(mp, "log mount/recovery failed: error %d",
722 xlog_recover_cancel(log);
727 error = xfs_sysfs_init(&log->l_kobj, &xfs_log_ktype, &mp->m_kobj,
728 "log");
733 clear_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
736 * Now the log has been fully initialised and we know were our
740 xlog_cil_init_post_recovery(log);
747 xlog_dealloc_log(log);
758 * If we finish recovery successfully, start the background log work. If we are
766 struct xlog *log = mp->m_log;
775 * During the second phase of log recovery, we need iget and
778 * of inodes before we're done replaying log items on those
787 * in log recovery failure. We have to evict the unreferenced
796 if (xlog_recovery_needed(log))
797 error = xlog_recover_finish(log);
802 * Drain the buffer LRU after log recovery. This is required for v4
810 if (xlog_recovery_needed(log)) {
822 clear_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate);
824 /* Make sure the log is dead if we're returning failure. */
825 ASSERT(!error || xlog_is_shutdown(log));
832 * the log.
858 * Cycle all the iclogbuf locks to make sure all log IO completion
862 xlog_wait_iclog_completion(struct xlog *log)
865 struct xlog_in_core *iclog = log->l_iclog;
867 for (i = 0; i < log->l_iclog_bufs; i++) {
876 * log force state machine. Waiting on ic_force_wait ensures iclog completions
885 struct xlog *log = iclog->ic_log;
888 if (!xlog_is_shutdown(log) &&
891 XFS_STATS_INC(log->l_mp, xs_log_force_sleep);
892 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
894 spin_unlock(&log->l_icloglock);
897 if (xlog_is_shutdown(log))
909 struct xlog *log,
944 return xlog_write(log, NULL, &lv_chain, ticket, reg.i_len);
949 * log.
953 struct xlog *log)
955 struct xfs_mount *mp = log->l_mp;
964 error = xlog_write_unmount_record(log, tic);
967 * transitioning log state to shutdown. Just continue...
973 spin_lock(&log->l_icloglock);
974 iclog = log->l_iclog;
979 trace_xfs_log_umount_write(log, tic);
980 xfs_log_ticket_ungrant(log, tic);
986 struct xlog *log)
988 struct xlog_in_core *iclog = log->l_iclog;
993 } while ((iclog = iclog->ic_next) != log->l_iclog);
1007 struct xlog *log = mp->m_log;
1014 if (xlog_is_shutdown(log))
1019 * record to force log recovery at next mount, after which the summary
1030 xfs_log_unmount_verify_iclog(log);
1031 xlog_unmount_write(log);
1035 * Empty the log for unmount/freeze.
1037 * To do this, we first need to shut down the background log work so it is not
1038 * trying to cover the log as we clean up. We then need to unpin all objects in
1039 * the log so we can then flush them out. Once they have completed their IO and
1040 * run the callbacks removing themselves from the AIL, we can cover the log.
1047 * Clear log incompat features since we're quiescing the log. Report
1048 * failures, though it's not fatal to have a higher log feature
1049 * protection level than the log contents actually require.
1057 "Failed to clear log incompat features on quiesce");
1090 * from the AIL so that the log is empty before we write the unmount record to
1091 * the log. Once this is done, we can tear down the AIL and the log.
1100 * If shutdown has come from iclog IO context, the log
1136 * Wake up processes waiting for log space after we have moved the log tail.
1142 struct xlog *log = mp->m_log;
1145 if (xlog_is_shutdown(log))
1148 if (!list_empty_careful(&log->l_write_head.waiters)) {
1149 ASSERT(!xlog_in_recovery(log));
1151 spin_lock(&log->l_write_head.lock);
1152 free_bytes = xlog_space_left(log, &log->l_write_head.grant);
1153 xlog_grant_head_wake(log, &log->l_write_head, &free_bytes);
1154 spin_unlock(&log->l_write_head.lock);
1157 if (!list_empty_careful(&log->l_reserve_head.waiters)) {
1158 ASSERT(!xlog_in_recovery(log));
1160 spin_lock(&log->l_reserve_head.lock);
1161 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
1162 xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes);
1163 spin_unlock(&log->l_reserve_head.lock);
1169 * covered. To begin the transition to the idle state firstly the log needs to
1171 * we start attempting to cover the log.
1174 * informed that dummy transactions are required to move the log into the idle
1178 * cover the log as we may be in a situation where there isn't log space
1180 * tail of the log is pinned by an item that is modified in the CIL. Hence
1182 * can't start trying to idle the log until both the CIL and AIL are empty.
1188 struct xlog *log = mp->m_log;
1191 if (!xlog_cil_empty(log))
1194 spin_lock(&log->l_icloglock);
1195 switch (log->l_covered_state) {
1202 if (xfs_ail_min_lsn(log->l_ailp))
1204 if (!xlog_iclogs_empty(log))
1208 if (log->l_covered_state == XLOG_STATE_COVER_NEED)
1209 log->l_covered_state = XLOG_STATE_COVER_DONE;
1211 log->l_covered_state = XLOG_STATE_COVER_DONE2;
1217 spin_unlock(&log->l_icloglock);
1222 * Explicitly cover the log. This is similar to background log covering but
1224 * the log is idle and suitable for covering. The CIL, iclog buffers and AIL
1243 * state machine if the log requires covering. Therefore, we must call
1247 * Fall into the covering sequence if the log needs covering or the
1257 * To cover the log, commit the superblock twice (at most) in
1262 * covering the log. Push the AIL one more time to leave it empty, as
1276 * We may be holding the log iclog lock upon entering this routine.
1282 struct xlog *log = mp->m_log;
1289 * To make sure we always have a valid LSN for the log tail we keep
1290 * track of the last LSN which was committed in log->l_last_sync_lsn,
1297 tail_lsn = atomic64_read(&log->l_last_sync_lsn);
1298 trace_xfs_log_assign_tail_lsn(log, tail_lsn);
1299 atomic64_set(&log->l_tail_lsn, tail_lsn);
1317 * Return the space in the log between the tail and the head. The head
1321 * in the log. This works for all places where this function is called
1327 * but then treat it as if the log is empty.
1329 * If the log is shut down, the head and tail may be invalid or out of whack, so
1335 struct xlog *log,
1344 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes);
1347 return log->l_logsize - (head_bytes - tail_bytes);
1352 if (xlog_is_shutdown(log))
1353 return log->l_logsize;
1362 * return the size of the log as the amount of space left.
1364 xfs_alert(log->l_mp, "xlog_space_left: head behind tail");
1365 xfs_alert(log->l_mp, " tail_cycle = %d, tail_bytes = %d",
1367 xfs_alert(log->l_mp, " GH cycle = %d, GH bytes = %d",
1370 return log->l_logsize;
1380 struct xlog *log = iclog->ic_log;
1393 if (XFS_TEST_ERROR(error, log->l_mp, XFS_ERRTAG_IODONE_IOERR)) {
1394 xfs_alert(log->l_mp, "log I/O error %d", error);
1395 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
1411 * Return size of each in-core log record buffer.
1421 struct xlog *log)
1428 log->l_iclog_bufs = mp->m_logbufs;
1429 log->l_iclog_size = mp->m_logbsize;
1434 log->l_iclog_heads =
1436 log->l_iclog_hsize = log->l_iclog_heads << BBSHIFT;
1448 * Clear the log incompat flags if we have the opportunity.
1450 * This only happens if we're about to log the second dummy transaction as part
1451 * of covering the log and we can get the log incompat feature usage lock.
1455 struct xlog *log)
1457 struct xfs_mount *mp = log->l_mp;
1463 if (log->l_covered_state != XLOG_STATE_COVER_DONE2)
1466 if (!down_write_trylock(&log->l_incompat_users))
1470 up_write(&log->l_incompat_users);
1475 * disk. If there is nothing dirty, then we might need to cover the log to
1482 struct xlog *log = container_of(to_delayed_work(work),
1484 struct xfs_mount *mp = log->l_mp;
1489 * Dump a transaction into the log that contains no real change.
1490 * This is needed to stamp the current tail LSN into the log
1495 * will prevent log covering from making progress. Hence we
1496 * synchronously log the superblock instead to ensure the
1499 xlog_clear_incompat(log);
1512 * This routine initializes some of the log structure for a given mount point.
1523 struct xlog *log;
1531 log = kzalloc(sizeof(struct xlog), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1532 if (!log) {
1537 log->l_mp = mp;
1538 log->l_targ = log_target;
1539 log->l_logsize = BBTOB(num_bblks);
1540 log->l_logBBstart = blk_offset;
1541 log->l_logBBsize = num_bblks;
1542 log->l_covered_state = XLOG_STATE_COVER_IDLE;
1543 set_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
1544 INIT_DELAYED_WORK(&log->l_work, xfs_log_worker);
1545 INIT_LIST_HEAD(&log->r_dfops);
1547 log->l_prev_block = -1;
1548 /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
1549 xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0);
1550 xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0);
1551 log->l_curr_cycle = 1; /* 0 is bad since this is initial value */
1554 log->l_iclog_roundoff = mp->m_sb.sb_logsunit;
1556 log->l_iclog_roundoff = BBSIZE;
1558 xlog_grant_head_init(&log->l_reserve_head);
1559 xlog_grant_head_init(&log->l_write_head);
1577 /* for larger sector sizes, must have v2 or external log */
1578 if (log2_size && log->l_logBBstart > 0 &&
1581 "log sector size (0x%x) invalid for configuration.",
1586 log->l_sectBBsize = 1 << log2_size;
1588 init_rwsem(&log->l_incompat_users);
1590 xlog_get_iclog_buffer_size(mp, log);
1592 spin_lock_init(&log->l_icloglock);
1593 init_waitqueue_head(&log->l_flush_wait);
1595 iclogp = &log->l_iclog;
1603 ASSERT(log->l_iclog_size >= 4096);
1604 for (i = 0; i < log->l_iclog_bufs; i++) {
1605 size_t bvec_size = howmany(log->l_iclog_size, PAGE_SIZE) *
1617 iclog->ic_data = kvzalloc(log->l_iclog_size,
1625 xfs_has_logv2(log->l_mp) ? 2 : 1);
1626 head->h_size = cpu_to_be32(log->l_iclog_size);
1631 iclog->ic_size = log->l_iclog_size - log->l_iclog_hsize;
1633 iclog->ic_log = log;
1636 iclog->ic_datap = (void *)iclog->ic_data + log->l_iclog_hsize;
1645 *iclogp = log->l_iclog; /* complete ring */
1646 log->l_iclog->ic_prev = prev_iclog; /* re-write 1st prev ptr */
1648 log->l_ioend_workqueue = alloc_workqueue("xfs-log/%s",
1652 if (!log->l_ioend_workqueue)
1655 error = xlog_cil_init(log);
1658 return log;
1661 destroy_workqueue(log->l_ioend_workqueue);
1663 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) {
1667 if (prev_iclog == log->l_iclog)
1671 kfree(log);
1677 * Compute the LSN that we'd need to push the log tail towards in order to have
1678 * (a) enough on-disk log space to log the number of bytes specified, (b) at
1679 * least 25% of the log space free, and (c) at least 256 blocks free. If the
1680 * log free space already meets all three thresholds, this function returns
1685 struct xlog *log,
1696 ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
1698 free_bytes = xlog_space_left(log, &log->l_reserve_head.grant);
1703 * log to the maximum of what the caller needs, one quarter of the
1704 * log, and 256 blocks.
1707 free_threshold = max(free_threshold, (log->l_logBBsize >> 2));
1712 xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle,
1715 if (threshold_block >= log->l_logBBsize) {
1716 threshold_block -= log->l_logBBsize;
1723 * log record known to be on disk. Use a snapshot of the last sync lsn
1726 last_sync_lsn = atomic64_read(&log->l_last_sync_lsn);
1734 * Push the tail of the log if we need to do so to maintain the free log space
1736 * policy which pushes on an lsn which is further along in the log once we
1742 struct xlog *log,
1747 threshold_lsn = xlog_grant_push_threshold(log, need_bytes);
1748 if (threshold_lsn == NULLCOMMITLSN || xlog_is_shutdown(log))
1756 xfs_ail_push(log->l_ailp, threshold_lsn);
1764 struct xlog *log,
1784 if (xfs_has_logv2(log->l_mp)) {
1795 for (i = 1; i < log->l_iclog_heads; i++)
1801 * Calculate the checksum for a log buffer.
1808 struct xlog *log,
1821 if (xfs_has_logv2(log->l_mp)) {
1873 struct xlog *log,
1878 ASSERT(bno < log->l_logBBsize);
1887 * across the log IO to archieve that.
1890 if (xlog_is_shutdown(log)) {
1893 * the log state machine to propagate I/O errors instead of
1904 * writeback throttle from throttling log writes behind background
1907 bio_init(&iclog->ic_bio, log->l_targ->bt_bdev, iclog->ic_bvec,
1910 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno;
1917 * For external log devices, we also need to flush the data
1920 * but it *must* complete before we issue the external log IO.
1923 * writeback from the log succeeded. Repeating the flush is
1924 * not possible, hence we must shut down with log IO error to
1927 if (log->l_targ != log->l_mp->m_ddev_targp &&
1928 blkdev_issue_flush(log->l_mp->m_ddev_targp->bt_bdev))
1943 * If this log buffer would straddle the end of the log we will have
1946 if (bno + BTOBB(count) > log->l_logBBsize) {
1949 split = bio_split(&iclog->ic_bio, log->l_logBBsize - bno,
1955 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart;
1961 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
1969 * written to the start of the log. Watch out for the header magic
1974 struct xlog *log,
1979 unsigned int split_offset = BBTOB(log->l_logBBsize - bno);
1993 struct xlog *log,
2000 count_init = log->l_iclog_hsize + iclog->ic_offset;
2001 count = roundup(count_init, log->l_iclog_roundoff);
2006 ASSERT(*roundoff < log->l_iclog_roundoff);
2011 * Flush out the in-core log (iclog) to the on-disk log in an asynchronous
2013 * ptr in the log to point to the next available iclog. This allows further
2015 * Before an in-core log can be written out, the data section must be scanned
2027 * log will require grabbing the lock though.
2029 * The entire log manager uses a logical block numbering scheme. Only
2030 * xlog_write_iclog knows about the fact that the log may not start with
2035 struct xlog *log,
2047 count = xlog_calc_iclog_size(log, iclog, &roundoff);
2057 xlog_grant_add_space(log, &log->l_reserve_head.grant, roundoff);
2058 xlog_grant_add_space(log, &log->l_write_head.grant, roundoff);
2062 xlog_pack_data(log, iclog, roundoff);
2066 if (xfs_has_logv2(log->l_mp))
2070 XFS_STATS_INC(log->l_mp, xs_log_writes);
2071 XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count));
2076 if (bno + BTOBB(count) > log->l_logBBsize)
2077 xlog_split_iclog(log, &iclog->ic_header, bno, count);
2080 iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header,
2083 * Intentionally corrupt the log record CRC based on the error injection
2084 * frequency, if defined. This facilitates testing log recovery in the
2085 * event of torn writes. Hence, set the IOABORT state to abort the log
2090 if (XFS_TEST_ERROR(false, log->l_mp, XFS_ERRTAG_LOG_BAD_CRC)) {
2093 xfs_warn(log->l_mp,
2094 "Intentionally corrupted log record at LSN 0x%llx. Shutdown imminent.",
2098 xlog_verify_iclog(log, iclog, count);
2099 xlog_write_iclog(log, iclog, bno, count);
2103 * Deallocate a log structure
2107 struct xlog *log)
2114 * iclog EIO error will try to shut down the log, which accesses the
2117 xlog_cil_destroy(log);
2119 iclog = log->l_iclog;
2120 for (i = 0; i < log->l_iclog_bufs; i++) {
2127 log->l_mp->m_log = NULL;
2128 destroy_workqueue(log->l_ioend_workqueue);
2129 kfree(log);
2137 struct xlog *log,
2142 lockdep_assert_held(&log->l_icloglock);
2176 xfs_warn(mp, " log res = %d", tp->t_log_res);
2177 xfs_warn(mp, " log count = %d", tp->t_log_count);
2182 /* dump each log item */
2188 xfs_warn(mp, "log item: ");
2198 /* dump each iovec for the log item */
2236 * Write log vectors into a single iclog which is guaranteed by the caller
2237 * to have enough space to write the entire log vector into.
2255 * Ordered log vectors have no regions to write so this
2278 struct xlog *log = iclog->ic_log;
2281 spin_lock(&log->l_icloglock);
2283 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
2284 error = xlog_state_release_iclog(log, iclog, ticket);
2285 spin_unlock(&log->l_icloglock);
2289 error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
2300 * Write log vectors into a single iclog which is smaller than the current chain
2302 * and then stop. We return the log vector that is to be written that cannot
2328 * length otherwise log recovery will just skip over it and
2375 * space for log transaction opheaders left in the current
2429 * No more iovecs remain in this logvec so return the next log vec to
2437 * Write some region out to in-core log
2451 * 2. Write log operation header (header per region)
2458 * 5. Release iclog for potential flush to on-disk log.
2468 * on all log operation writes which don't contain the end of the
2469 * region. The XLOG_END_TRANS bit is used for the in-core log
2478 struct xlog *log,
2493 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
2495 xlog_print_tic_res(log->l_mp, ticket);
2496 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
2499 error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
2516 * If the entire log vec does not fit in the iclog, punt it to
2544 spin_lock(&log->l_icloglock);
2545 xlog_state_finish_copy(log, iclog, record_cnt, 0);
2546 error = xlog_state_release_iclog(log, iclog, ticket);
2547 spin_unlock(&log->l_icloglock);
2592 struct xlog *log,
2595 struct xlog_in_core *iclog = log->l_iclog;
2606 } while ((iclog = iclog->ic_next) != log->l_iclog);
2645 struct xlog *log,
2654 xlog_state_activate_iclogs(log, &iclogs_changed);
2658 log->l_covered_state = xlog_covered_state(log->l_covered_state,
2665 struct xlog *log)
2667 struct xlog_in_core *iclog = log->l_iclog;
2678 } while ((iclog = iclog->ic_next) != log->l_iclog);
2686 * tail of the log half way through a transaction as this may be the only
2687 * transaction in the log and moving the tail to point to the middle of it
2698 * amount of log space bound up in this committing transaction then the
2700 * freeing space in the log. Hence once we've updated the last_sync_lsn we
2702 * no longer bound by the old log head location and can move forwards and make
2707 struct xlog *log,
2714 ASSERT(XFS_LSN_CMP(atomic64_read(&log->l_last_sync_lsn),
2720 atomic64_set(&log->l_last_sync_lsn, header_lsn);
2721 xlog_grant_push_ail(log, 0);
2731 struct xlog *log,
2752 lowest_lsn = xlog_get_lowest_lsn(log);
2755 xlog_state_set_callback(log, iclog, header_lsn);
2776 struct xlog *log)
2777 __releases(&log->l_icloglock)
2778 __acquires(&log->l_icloglock)
2780 struct xlog_in_core *first_iclog = log->l_iclog;
2787 if (xlog_state_iodone_process_iclog(log, iclog))
2794 spin_unlock(&log->l_icloglock);
2801 spin_lock(&log->l_icloglock);
2802 xlog_state_clean_iclog(log, iclog);
2816 struct xlog *log)
2821 spin_lock(&log->l_icloglock);
2822 while (xlog_state_do_iclog_callbacks(log)) {
2823 if (xlog_is_shutdown(log))
2829 xfs_warn(log->l_mp,
2835 if (log->l_iclog->ic_state == XLOG_STATE_ACTIVE)
2836 wake_up_all(&log->l_flush_wait);
2838 spin_unlock(&log->l_icloglock);
2846 * global state machine log lock.
2852 struct xlog *log = iclog->ic_log;
2854 spin_lock(&log->l_icloglock);
2860 * split log writes, on the second, we shut down the file system and
2863 if (!xlog_is_shutdown(log)) {
2874 spin_unlock(&log->l_icloglock);
2875 xlog_state_do_callback(log);
2879 * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must
2889 * log's data space.
2890 * * in-core log pointer to which xlog_write() should write.
2891 * * boolean indicating this is a continued write to an in-core log.
2892 * If this is the last write, then the in-core log's offset field
2898 struct xlog *log,
2909 spin_lock(&log->l_icloglock);
2910 if (xlog_is_shutdown(log)) {
2911 spin_unlock(&log->l_icloglock);
2915 iclog = log->l_iclog;
2917 XFS_STATS_INC(log->l_mp, xs_log_noiclogs);
2919 /* Wait for log writes to have flushed */
2920 xlog_wait(&log->l_flush_wait, &log->l_icloglock);
2937 ticket->t_curr_res -= log->l_iclog_hsize;
2938 head->h_cycle = cpu_to_be32(log->l_curr_cycle);
2940 xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block));
2941 ASSERT(log->l_curr_block >= 0);
2956 xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2966 error = xlog_state_release_iclog(log, iclog, ticket);
2967 spin_unlock(&log->l_icloglock);
2982 xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2986 spin_unlock(&log->l_icloglock);
3001 struct xlog *log,
3004 trace_xfs_log_ticket_regrant(log, ticket);
3009 xlog_grant_sub_space(log, &log->l_reserve_head.grant,
3011 xlog_grant_sub_space(log, &log->l_write_head.grant,
3015 trace_xfs_log_ticket_regrant_sub(log, ticket);
3019 xlog_grant_add_space(log, &log->l_reserve_head.grant,
3021 trace_xfs_log_ticket_regrant_exit(log, ticket);
3045 struct xlog *log,
3050 trace_xfs_log_ticket_ungrant(log, ticket);
3055 trace_xfs_log_ticket_ungrant_sub(log, ticket);
3067 xlog_grant_sub_space(log, &log->l_reserve_head.grant, bytes);
3068 xlog_grant_sub_space(log, &log->l_write_head.grant, bytes);
3070 trace_xfs_log_ticket_ungrant_exit(log, ticket);
3072 xfs_log_space_wake(log->l_mp);
3082 struct xlog *log,
3087 assert_spin_locked(&log->l_icloglock);
3093 iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block);
3094 log->l_prev_block = log->l_curr_block;
3095 log->l_prev_cycle = log->l_curr_cycle;
3097 /* roll log?: ic_offset changed later */
3098 log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize);
3100 /* Round up to next log-sunit */
3101 if (log->l_iclog_roundoff > BBSIZE) {
3102 uint32_t sunit_bb = BTOBB(log->l_iclog_roundoff);
3103 log->l_curr_block = roundup(log->l_curr_block, sunit_bb);
3106 if (log->l_curr_block >= log->l_logBBsize) {
3110 * when the log wraps to the next cycle. This is to support the
3114 log->l_curr_block -= log->l_logBBsize;
3115 ASSERT(log->l_curr_block >= 0);
3117 log->l_curr_cycle++;
3118 if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM)
3119 log->l_curr_cycle++;
3121 ASSERT(iclog == log->l_iclog);
3122 log->l_iclog = iclog->ic_next;
3155 * Write out all data in the in-core log as of this exact moment in time.
3157 * Data may be written to the in-core log during this call. However,
3186 struct xlog *log = mp->m_log;
3192 xlog_cil_force(log);
3194 spin_lock(&log->l_icloglock);
3195 if (xlog_is_shutdown(log))
3198 iclog = log->l_iclog;
3229 xlog_state_switch_iclogs(log, iclog, 0);
3245 spin_unlock(&log->l_icloglock);
3248 spin_unlock(&log->l_icloglock);
3253 * Force the log to a specific LSN.
3257 * If it is in the ACTIVE state, move the in-core log into the WANT_SYNC
3263 * specific in-core log. When given in-core log finally completes its write
3268 struct xlog *log,
3277 spin_lock(&log->l_icloglock);
3278 if (xlog_is_shutdown(log))
3281 iclog = log->l_iclog;
3285 if (iclog == log->l_iclog)
3300 * refcnt so we can release the log (which drops the ref count).
3310 &log->l_icloglock);
3343 spin_unlock(&log->l_icloglock);
3346 spin_unlock(&log->l_icloglock);
3351 * Force the log to a specific checkpoint sequence.
3356 * a synchronous log force, we will wait on the iclog with the LSN returned by
3366 struct xlog *log = mp->m_log;
3374 lsn = xlog_cil_force_seq(log, seq);
3378 ret = xlog_force_lsn(log, lsn, flags, log_flushed, false);
3381 ret = xlog_force_lsn(log, lsn, flags, log_flushed, true);
3408 * Figure out the total log space unit (in bytes) that would be
3409 * required for a log ticket.
3413 struct xlog *log,
3421 * Permanent reservations have up to 'cnt'-1 active log operations
3422 * in the log. A unit in this case is the amount of space for one
3423 * of these log operations. Normal reservations have a cnt of 1
3427 * which occupy space in the on-disk log.
3464 * increase the space required enough to require more log and op
3472 * Fundamentally, this means we must pass the entire log vector to
3475 iclog_space = log->l_iclog_size - log->l_iclog_hsize;
3487 unit_bytes += log->l_iclog_hsize * num_headers;
3490 unit_bytes += log->l_iclog_hsize;
3493 unit_bytes += 2 * log->l_iclog_roundoff;
3509 * Allocate and initialise a new log ticket.
3513 struct xlog *log,
3524 unit_res = xlog_calc_unit_res(log, unit_bytes, &tic->t_iclog_hdrs);
3554 struct xlog *log)
3559 xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &space);
3560 xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_blocks);
3563 !test_and_set_bit(XLOG_TAIL_WARN, &log->l_opstate)) {
3564 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
3569 !test_and_set_bit(XLOG_TAIL_WARN, &log->l_opstate)) {
3570 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
3579 struct xlog *log,
3585 if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) {
3587 log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn));
3588 if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize))
3589 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__);
3591 ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle);
3593 if (BLOCK_LSN(tail_lsn) == log->l_prev_block)
3594 xfs_emerg(log->l_mp, "%s: tail wrapped", __func__);
3596 blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block;
3598 xfs_emerg(log->l_mp, "%s: ran out of log space", __func__);
3608 * 4. Check fields of each log operation header for:
3611 * C. Length in log record header is correct according to the
3614 * log, check the preceding blocks of the physical log to make sure all
3619 struct xlog *log,
3633 spin_lock(&log->l_icloglock);
3634 icptr = log->l_iclog;
3635 for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next)
3638 if (icptr != log->l_iclog)
3639 xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__);
3640 spin_unlock(&log->l_icloglock);
3642 /* check log magic numbers */
3644 xfs_emerg(log->l_mp, "%s: invalid magic num", __func__);
3650 xfs_emerg(log->l_mp, "%s: unexpected magic num",
3680 xfs_warn(log->l_mp,
3707 * Perform a forced shutdown on the log.
3709 * This can be called from low level log code to trigger a shutdown, or from the
3713 * a. if the shutdown was not due to a log IO error, flush the logs to
3715 * b. the log gets atomically marked 'XLOG_IO_ERROR' for all interested
3717 * c. Tasks sleeping on log reservations, pinned objects and
3719 * d. The mount is also marked as shut down so that log triggered shutdowns
3722 * Return true if the shutdown cause was a log IO error and we actually shut the
3723 * log down.
3727 struct xlog *log,
3732 if (!log)
3736 * Flush all the completed transactions to disk before marking the log
3737 * being shut down. We need to do this first as shutting down the log
3738 * before the force will prevent the log force from flushing the iclogs
3742 * we don't want to touch the log because we don't want to perturb the
3744 * avoid a log force in this case.
3746 * If we are shutting down due to a log IO error, then we must avoid
3747 * trying to write the log as that may just result in more IO errors and
3750 if (!log_error && !xlog_in_recovery(log))
3751 xfs_log_force(log->l_mp, XFS_LOG_SYNC);
3759 * Much of the log state machine transitions assume that shutdown state
3760 * cannot change once they hold the log->l_icloglock. Hence we need to
3764 spin_lock(&log->l_icloglock);
3765 if (test_and_set_bit(XLOG_IO_ERROR, &log->l_opstate)) {
3766 spin_unlock(&log->l_icloglock);
3769 spin_unlock(&log->l_icloglock);
3772 * If this log shutdown also sets the mount shutdown state, issue a
3775 if (!test_and_set_bit(XFS_OPSTATE_SHUTDOWN, &log->l_mp->m_opstate)) {
3776 xfs_alert_tag(log->l_mp, XFS_PTAG_SHUTDOWN_LOGERROR,
3777 "Filesystem has been shut down due to log error (0x%x).",
3779 xfs_alert(log->l_mp,
3786 * We don't want anybody waiting for log reservations after this. That
3792 xlog_grant_head_wake_all(&log->l_reserve_head);
3793 xlog_grant_head_wake_all(&log->l_write_head);
3797 * as if the log writes were completed. The abort handling in the log
3801 spin_lock(&log->l_cilp->xc_push_lock);
3802 wake_up_all(&log->l_cilp->xc_start_wait);
3803 wake_up_all(&log->l_cilp->xc_commit_wait);
3804 spin_unlock(&log->l_cilp->xc_push_lock);
3806 spin_lock(&log->l_icloglock);
3807 xlog_state_shutdown_callbacks(log);
3808 spin_unlock(&log->l_icloglock);
3810 wake_up_var(&log->l_opstate);
3816 struct xlog *log)
3820 iclog = log->l_iclog;
3828 } while (iclog != log->l_iclog);
3841 struct xlog *log = mp->m_log;
3845 * norecovery mode skips mount-time log processing and unconditionally
3863 spin_lock(&log->l_icloglock);
3868 log->l_curr_cycle, log->l_curr_block);
3869 spin_unlock(&log->l_icloglock);
3876 * Notify the log that we're about to start using a feature that is protected
3877 * by a log incompat feature flag. This will prevent log covering from
3882 struct xlog *log)
3884 down_read(&log->l_incompat_users);
3887 /* Notify the log that we've finished using log incompat features. */
3890 struct xlog *log)
3892 up_read(&log->l_incompat_users);