Lines Matching refs:log

51  * Verify the log-relative block number and length in basic blocks are valid for
52 * an operation involving the given XFS log buffer. Returns true if the fields
57 struct xlog *log,
61 if (blk_no < 0 || blk_no >= log->l_logBBsize)
63 if (bbcount <= 0 || (blk_no + bbcount) > log->l_logBBsize)
69 * Allocate a buffer to hold log data. The buffer needs to be able to map to
70 * a range of nbblks basic blocks at any valid offset within the log.
74 struct xlog *log,
78 * Pass log block 0 since we don't have an addr yet, buffer will be
81 if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, 0, nbblks))) {
82 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
88 * We do log I/O in units of log sectors (a power-of-2 multiple of the
90 * the basic blocks required for complete log sectors.
96 * issue. Nor will this be a problem if the log I/O is done in basic
98 * extra log sector to ensure there's space to accommodate this
101 if (nbblks > 1 && log->l_sectBBsize > 1)
102 nbblks += log->l_sectBBsize;
103 nbblks = round_up(nbblks, log->l_sectBBsize);
109 * in a log buffer. The buffer covers a log sector-aligned region.
113 struct xlog *log,
116 return BBTOB(blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1));
121 struct xlog *log,
129 if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, blk_no, nbblks))) {
130 xfs_warn(log->l_mp,
131 "Invalid log block/length (0x%llx, 0x%x) for buffer",
136 blk_no = round_down(blk_no, log->l_sectBBsize);
137 nbblks = round_up(nbblks, log->l_sectBBsize);
140 error = xfs_rw_bdev(log->l_targ->bt_bdev, log->l_logBBstart + blk_no,
142 if (error && !xlog_is_shutdown(log)) {
143 xfs_alert(log->l_mp,
144 "log recovery %s I/O error at daddr 0x%llx len %d error %d",
153 struct xlog *log,
158 return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
163 struct xlog *log,
171 error = xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
173 *offset = data + xlog_align(log, blk_no);
179 struct xlog *log,
184 return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_WRITE);
189 * dump debug superblock and log record information
198 xfs_debug(mp, " log : uuid = %pU, fmt = %d",
206 * check log record header for recovery
218 * a dirty log created in IRIX.
222 "dirty log written in incompatible format - can't recover");
229 "dirty log entry has mismatched uuid - can't recover");
237 * read the head block of the log and check the header
249 * h_fs_uuid is null, we assume this log was last mounted
252 xfs_warn(mp, "null uuid in log - IRIX style log");
255 xfs_warn(mp, "log has mismatched uuid - can't recover");
264 * log which contains the given cycle. It uses a binary search algorithm.
270 struct xlog *log,
285 error = xlog_bread(log, mid_blk, 1, buffer, &offset);
313 struct xlog *log,
330 * a log sector, or we're out of luck.
333 while (bufblks > log->l_logBBsize)
335 while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
337 if (bufblks < log->l_sectBBsize)
346 error = xlog_bread(log, i, bcount, buffer, &buf);
369 xlog_logrec_hblks(struct xlog *log, struct xlog_rec_header *rh)
371 if (xfs_has_logv2(log->l_mp)) {
382 * Potentially backup over partial log record write.
385 * a good log record. Therefore, we subtract one to get the block number
388 * last log record is split over the end of the physical log.
395 struct xlog *log,
411 buffer = xlog_alloc_buffer(log, num_blks);
413 buffer = xlog_alloc_buffer(log, 1);
418 error = xlog_bread(log, start_blk, num_blks, buffer, &offset);
426 /* valid log record not found */
427 xfs_warn(log->l_mp,
435 error = xlog_bread(log, i, 1, buffer, &offset);
450 * We hit the beginning of the physical log & still no header. Return
452 * will be called again for the end of the physical log.
460 * We have the final block of the good log (the first block
461 * of the log record _before_ the head. So we check the uuid.
463 if ((error = xlog_header_check_mount(log->l_mp, head)))
467 * We may have found a log record header before we expected one.
469 * up reading an entire log record. In this case, we don't want to
470 * reset last_blk. Only when last_blk points in the middle of a log
473 xhdrs = xlog_logrec_hblks(log, head);
485 * Head is defined to be the point of the log where the next log write
489 * current cycle number -1 won't be present in the log if we start writing
499 struct xlog *log,
508 int error, log_bbnum = log->l_logBBsize;
510 /* Is the end of the log device zeroed? */
511 error = xlog_find_zeroed(log, &first_blk);
513 xfs_warn(log->l_mp, "empty log check failed");
523 * log so we can store the uuid in there
525 xfs_warn(log->l_mp, "totally zeroed log");
532 buffer = xlog_alloc_buffer(log, 1);
536 error = xlog_bread(log, 0, 1, buffer, &offset);
543 error = xlog_bread(log, last_blk, 1, buffer, &offset);
552 * then the entire log is stamped with the same cycle number. In this
563 * In this case we believe that the entire log should have
573 * log, as one of the latest writes at the beginning was
579 * end of the log.
581 * In the 256k log case, we will read from the beginning to the
582 * end of the log and search for cycle numbers equal to x-1.
584 * because we know that they cannot be the head since the log
592 * number matching last_half_cycle. We expect the log to be
601 * the log, then we look for occurrences of last_half_cycle - 1
602 * at the end of the log. The cases we're looking for look
613 error = xlog_find_cycle_start(log, buffer, first_blk, &head_blk,
623 * in the in-core log. The following number can be made tighter if
626 num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log));
633 if ((error = xlog_find_verify_cycle(log,
639 } else { /* need to read 2 parts of log */
641 * We are going to scan backwards in the log in two parts.
642 * First we scan the physical end of the log. In this part
643 * of the log, we are looking for blocks with cycle number
645 * If we find one, then we know that the log starts there, as
647 * the end of the physical log. The simple case for this is
650 * If all of the blocks at the end of the log have cycle number
652 * the log looking for occurrences of last_half_cycle. If we
662 * In a 256k log, the scan at the end of the log will see the
664 * certainly not the head of the log. By searching for
670 if ((error = xlog_find_verify_cycle(log, start_blk,
680 * Scan beginning of log now. The last part of the physical
681 * log is good. This scan needs to verify that it doesn't find
686 if ((error = xlog_find_verify_cycle(log,
697 * the middle of a log record.
699 num_scan_bblks = XLOG_REC_SHIFT(log);
704 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
712 error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
716 /* We hit the beginning of the log during our search */
722 error = xlog_find_verify_log_record(log, start_blk,
750 xfs_warn(log->l_mp, "failed to find log head");
755 * Seek backwards in the log for log record headers.
757 * Given a starting log block, walk backwards until we find the provided number
759 * records encountered or a negative error code. The log block and buffer
764 struct xlog *log,
783 * block in the log.
787 error = xlog_bread(log, i, 1, buffer, &offset);
800 * If we haven't hit the tail block or the log record header count,
801 * start looking again from the end of the physical log. Note that
805 for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) {
806 error = xlog_bread(log, i, 1, buffer, &offset);
828 * Seek forward in the log for log record headers.
832 * number of records encountered or a negative error code. The log block and
838 struct xlog *log,
857 * block in the log.
859 end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1;
861 error = xlog_bread(log, i, 1, buffer, &offset);
874 * If we haven't hit the head block or the log record header count,
875 * start looking again from the start of the physical log.
879 error = xlog_bread(log, i, 1, buffer, &offset);
901 * Calculate distance from head to tail (i.e., unused space in the log).
905 struct xlog *log,
912 return tail_blk + (log->l_logBBsize - head_blk);
916 * Verify the log tail. This is particularly important when torn or incomplete
917 * writes have been detected near the front of the log and the head has been
924 * log with garbage. This is not a coherency problem because the tail must have
925 * been pushed before it can be overwritten, but appears as log corruption to
929 * Therefore, CRC check the log from tail to head. If a failure occurs and the
936 struct xlog *log,
949 buffer = xlog_alloc_buffer(log, 1);
957 error = xlog_seek_logrec_hdr(log, head_blk, *tail_blk, 1, buffer,
972 error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
981 tail_distance = xlog_tail_distance(log, head_blk, first_bad);
986 error = xlog_seek_logrec_hdr(log, head_blk, first_bad, 2,
993 error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
998 xfs_warn(log->l_mp,
1007 * Detect and trim torn writes from the head of the log.
1010 * log in the event of a crash. Our only means to detect this scenario is via
1013 * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at
1015 * the log and treat failures in this range as torn writes as a matter of
1017 * record in the log and the tail is updated from that record and verified.
1021 struct xlog *log,
1027 bool *wrapped) /* last rec. wraps phys. log */
1038 * Check the head of the log for torn writes. Search backwards from the
1039 * head until we hit the tail or the maximum number of log record I/Os
1043 tmp_buffer = xlog_alloc_buffer(log, 1);
1046 error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk,
1056 * log block of the first bad record is saved in first_bad.
1058 error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk,
1066 xfs_warn(log->l_mp,
1067 "Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.",
1078 found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1,
1087 * log record and set the tail block based on the last good
1104 return xlog_verify_tail(log, *head_blk, tail_blk,
1109 * We need to make sure we handle log wrapping properly, so we can't use the
1111 * log.
1113 * The log is limited to 32 bit sizes, so we use the appropriate modulus
1118 struct xlog *log,
1123 div_s64_rem(bno, log->l_logBBsize, &mod);
1128 * Check whether the head of the log points to an unmount record. In other
1129 * words, determine whether the log is clean. If so, update the in-core state
1134 struct xlog *log,
1154 * log, we convert to a log block before comparing to the head_blk.
1160 hblks = xlog_logrec_hblks(log, rhead);
1161 after_umount_blk = xlog_wrap_logbno(log,
1166 umount_data_blk = xlog_wrap_logbno(log, rhead_blk + hblks);
1167 error = xlog_bread(log, umount_data_blk, 1, buffer, &offset);
1174 * Set tail and last sync so that newly written log
1178 xlog_assign_atomic_lsn(&log->l_tail_lsn,
1179 log->l_curr_cycle, after_umount_blk);
1180 xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1181 log->l_curr_cycle, after_umount_blk);
1193 struct xlog *log,
1200 * Reset log values according to the state of the log when we
1203 * continuing the cycle of the last good log record. At this
1204 * point we have guaranteed that all partial log records have been
1205 * accounted for. Therefore, we know that the last good log record
1207 * of the physical log.
1209 log->l_prev_block = rhead_blk;
1210 log->l_curr_block = (int)head_blk;
1211 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
1213 log->l_curr_cycle++;
1214 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
1215 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
1216 xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
1217 BBTOB(log->l_curr_block));
1218 xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
1219 BBTOB(log->l_curr_block));
1223 * Find the sync block number or the tail of the log.
1226 * associated buffers synced to disk. Every log record header has
1229 * log record header to believe.
1231 * The following algorithm uses the log record header with the largest
1232 * lsn. The entire log record does not need to be valid. We only care
1240 struct xlog *log,
1254 * Find previous log record
1256 if ((error = xlog_find_head(log, head_blk)))
1260 buffer = xlog_alloc_buffer(log, 1);
1264 error = xlog_bread(log, 0, 1, buffer, &offset);
1270 /* leave all other log inited values alone */
1276 * Search backwards through the log looking for the log record header
1280 error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, buffer,
1285 xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
1292 * Set the log state based on the current head record.
1294 xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
1295 tail_lsn = atomic64_read(&log->l_tail_lsn);
1298 * Look for an unmount record at the head of the log. This sets the log
1301 error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead,
1307 * Verify the log head if the log is not clean (e.g., we have anything
1310 * considered torn writes and the log head is trimmed accordingly.
1312 * Note that we can only run CRC verification when the log is dirty
1313 * because there's no guarantee that the log data behind an unmount
1319 error = xlog_verify_head(log, head_blk, tail_blk, buffer,
1326 xlog_set_state(log, *head_blk, rhead, rhead_blk,
1328 tail_lsn = atomic64_read(&log->l_tail_lsn);
1329 error = xlog_check_unmount_rec(log, head_blk, tail_blk,
1343 set_bit(XFS_OPSTATE_CLEAN, &log->l_mp->m_opstate);
1348 * because we allow multiple outstanding log writes concurrently,
1364 if (!xfs_readonly_buftarg(log->l_targ))
1365 error = xlog_clear_stale_blocks(log, tail_lsn);
1371 xfs_warn(log->l_mp, "failed to locate log tail");
1376 * Is the log zeroed at all?
1382 * If the log is partially zeroed, this routine will pass back the blkno
1387 * 0 => the log is completely written to
1388 * 1 => use *blk_no as the first block of the log
1393 struct xlog *log,
1401 int error, log_bbnum = log->l_logBBsize;
1406 /* check totally zeroed log */
1407 buffer = xlog_alloc_buffer(log, 1);
1410 error = xlog_bread(log, 0, 1, buffer, &offset);
1415 if (first_cycle == 0) { /* completely zeroed log */
1420 /* check partially zeroed log */
1421 error = xlog_bread(log, log_bbnum-1, 1, buffer, &offset);
1426 if (last_cycle != 0) { /* log completely written to */
1431 /* we have a partially zeroed log */
1433 error = xlog_find_cycle_start(log, buffer, 0, &last_blk, 0);
1439 * the entire log is made up of log records which are the same size,
1443 num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1456 if ((error = xlog_find_verify_cycle(log, start_blk,
1463 * Potentially backup over partial log record write. We don't need
1464 * to search the end of the log because we know it is zero.
1466 error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0);
1482 * to initialize a buffer full of empty log record headers and write
1483 * them into the log.
1487 struct xlog *log,
1500 xfs_has_logv2(log->l_mp) ? 2 : 1);
1504 memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1509 struct xlog *log,
1519 int sectbb = log->l_sectBBsize;
1529 * log sector, or we're out of luck.
1532 while (bufblks > log->l_logBBsize)
1534 while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
1546 error = xlog_bread_noalign(log, start_block, 1, buffer);
1565 error = xlog_bread_noalign(log, ealign, sectbb,
1572 offset = buffer + xlog_align(log, start_block);
1574 xlog_add_record(log, offset, cycle, i+j,
1578 error = xlog_bwrite(log, start_block, endcount, buffer);
1591 * This routine is called to blow away any incomplete log writes out
1592 * in front of the log head. We do this so that we won't become confused
1594 * If we leave the partial log records out there, this situation could
1597 * with empty log records with the old cycle number rather than the
1601 * the log so that we will not write over the unmount record after a
1602 * clean unmount in a 512 block log. Doing so would leave the log without
1603 * any valid log records in it until a new one was written. If we crashed
1608 struct xlog *log,
1619 head_cycle = log->l_curr_cycle;
1620 head_block = log->l_curr_block;
1623 * Figure out the distance between the new head of the log
1626 * we don't want to overwrite the tail of the log.
1630 * The tail is behind the head in the physical log,
1632 * distance from the head to the end of the log plus
1633 * the distance from the beginning of the log to the
1636 if (XFS_IS_CORRUPT(log->l_mp,
1638 head_block >= log->l_logBBsize))
1640 tail_distance = tail_block + (log->l_logBBsize - head_block);
1643 * The head is behind the tail in the physical log,
1647 if (XFS_IS_CORRUPT(log->l_mp,
1663 max_distance = XLOG_TOTAL_REC_SHIFT(log);
1673 if ((head_block + max_distance) <= log->l_logBBsize) {
1676 * wrapping around the end of the log. Just do it
1678 * current cycle minus one so that the log will look like:
1681 error = xlog_write_log_records(log, (head_cycle - 1),
1688 * We need to wrap around the end of the physical log in
1691 * end of the physical log, and it should use the current
1694 distance = log->l_logBBsize - head_block;
1695 error = xlog_write_log_records(log, (head_cycle - 1),
1703 * Now write the blocks at the start of the physical log.
1710 distance = max_distance - (log->l_logBBsize - head_block);
1711 error = xlog_write_log_records(log, head_cycle, 0, distance,
1726 struct xlog *log,
1732 list_for_each_entry_safe(dfp, n, &log->r_dfops, dfp_list) {
1742 xfs_defer_cancel_recovery(log->l_mp, dfp);
1808 * Sort the log items in the transaction.
1858 struct xlog *log,
1876 xfs_warn(log->l_mp,
1877 "%s: unrecognized type of log operation (%d)",
1898 trace_xfs_log_recover_item_reorder_head(log,
1906 trace_xfs_log_recover_item_reorder_tail(log,
1927 struct xlog *log,
1932 if (!xlog_is_buffer_cancelled(log, blkno, len))
1933 xfs_buf_readahead(log->l_mp->m_ddev_targp, blkno, len, ops);
1938 * log intent item that was found during recovery.
1942 struct xlog *log,
1949 xfs_defer_start_recovery(lip, &log->r_dfops, ops);
1955 xfs_trans_ail_insert(log->l_ailp, lip, lsn);
1961 struct xlog *log,
1970 trace_xfs_log_recover_item_recover(log, trans, item,
1974 error = item->ri_ops->commit_pass2(log, buffer_list,
1991 struct xlog *log,
2007 error = xlog_recover_reorder_trans(log, trans, pass);
2012 trace_xfs_log_recover_item_recover(log, trans, item, pass);
2017 error = item->ri_ops->commit_pass1(log, item);
2021 item->ri_ops->ra_pass2(log, item);
2025 error = xlog_recover_items_pass2(log, trans,
2043 error = xlog_recover_items_pass2(log, trans,
2068 struct xlog *log,
2084 xfs_warn(log->l_mp, "%s: bad header length", __func__);
2108 trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
2123 * will appear in the current log item.
2127 struct xlog *log,
2139 /* we need to catch log corruptions here */
2141 xfs_warn(log->l_mp, "%s: bad header magic number",
2148 xfs_warn(log->l_mp, "%s: bad header length", __func__);
2182 xfs_warn(log->l_mp,
2183 "bad number of regions (%d) in inode log format",
2196 xfs_warn(log->l_mp,
2197 "log item region count (%d) overflowed size (%d)",
2208 trace_xfs_log_recover_item_add(log, trans, item, 0);
2244 struct xlog *log,
2268 error = xlog_recover_add_to_trans(log, trans, dp, len);
2271 error = xlog_recover_add_to_cont_trans(log, trans, dp, len);
2274 error = xlog_recover_commit_trans(log, trans, pass,
2283 xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
2288 xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags);
2351 struct xlog *log,
2367 xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
2378 xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len);
2403 * In other words, we are allowed to submit a buffer from log recovery
2408 * LSN. Therefore, track the current LSN of each commit log record as it
2412 if (log->l_recovery_lsn != trans->r_lsn &&
2417 log->l_recovery_lsn = trans->r_lsn;
2420 return xlog_recovery_process_trans(log, trans, dp, len,
2435 struct xlog *log,
2450 /* check the log format matches our own - else we can't recover */
2451 if (xlog_header_check_recover(log->l_mp, rhead))
2454 trace_xfs_log_recover_record(log, rhead, pass);
2462 error = xlog_recover_process_ophdr(log, rhash, rhead, ohead,
2491 * in recovery no matter how full the log might be.
2536 * When this is called, all of the log intent items which did not have
2537 * corresponding log done items should be in the AIL. What we do now is update
2540 * Since we process the log intent items in normal transactions, they will be
2553 struct xlog *log)
2561 last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
2564 list_for_each_entry_safe(dfp, n, &log->r_dfops, dfp_list) {
2569 * the last transaction we found in the log at the start
2580 * The recovery function can free the log item, so we must not
2584 error = xfs_defer_finish_recovery(log->l_mp, dfp,
2592 error = xlog_finish_defer_ops(log->l_mp, &capture_list);
2598 xlog_abort_defer_ops(log->l_mp, &capture_list);
2604 * pending log intent items that we haven't started recovery on so they don't
2609 struct xlog *log)
2613 list_for_each_entry_safe(dfp, n, &log->r_dfops, dfp_list) {
2616 xfs_defer_cancel_recovery(log->l_mp, dfp);
2757 * of log space.
2761 * can lead to deadlocks if the recovery process runs out of log reservation
2815 struct xlog *log)
2820 for_each_perag(log->l_mp, agno, pag)
2828 struct xlog *log)
2838 if (xfs_has_logv2(log->l_mp)) {
2850 * CRC check, unpack and process a log record.
2854 struct xlog *log,
2864 crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
2886 if (old_crc || xfs_has_crc(log->l_mp)) {
2887 xfs_alert(log->l_mp,
2888 "log record CRC mismatch: found 0x%x, expected 0x%x.",
2896 * fatal log corruption failure.
2898 if (xfs_has_crc(log->l_mp)) {
2899 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
2904 xlog_unpack_data(rhead, dp, log);
2906 return xlog_recover_process_data(log, rhash, rhead, dp, pass,
2912 struct xlog *log,
2919 if (XFS_IS_CORRUPT(log->l_mp,
2922 if (XFS_IS_CORRUPT(log->l_mp,
2926 xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
2936 if (XFS_IS_CORRUPT(log->l_mp, hlen <= 0 || hlen > bufsize))
2939 if (XFS_IS_CORRUPT(log->l_mp,
2940 blkno > log->l_logBBsize || blkno > INT_MAX))
2946 * Read the log from tail to head and process the log records found.
2948 * and where the active portion of the log wraps around the end of
2949 * the physical log separately. The pass parameter is passed through
2955 struct xlog *log,
2959 xfs_daddr_t *first_bad) /* out: first bad log rec */
2982 * h_size. Use this to tell how many sectors make up the log header.
2984 if (xfs_has_logv2(log->l_mp)) {
2990 hbp = xlog_alloc_buffer(log, 1);
2994 error = xlog_bread(log, tail_blk, 1, hbp, &offset);
3004 * log buffer can be too small for the record and cause an
3013 if (h_len > h_size && h_len <= log->l_mp->m_logbsize &&
3015 xfs_warn(log->l_mp,
3017 h_size, log->l_mp->m_logbsize);
3018 h_size = log->l_mp->m_logbsize;
3021 error = xlog_valid_rec_header(log, rhead, tail_blk, h_size);
3025 hblks = xlog_logrec_hblks(log, rhead);
3028 hbp = xlog_alloc_buffer(log, hblks);
3031 ASSERT(log->l_sectBBsize == 1);
3033 hbp = xlog_alloc_buffer(log, 1);
3039 dbp = xlog_alloc_buffer(log, BTOBB(h_size));
3048 * Perform recovery around the end of the physical log.
3052 while (blk_no < log->l_logBBsize) {
3054 * Check for header wrapping around physical end-of-log
3059 if (blk_no + hblks <= log->l_logBBsize) {
3061 error = xlog_bread(log, blk_no, hblks, hbp,
3066 /* This LR is split across physical log end */
3067 if (blk_no != log->l_logBBsize) {
3068 /* some data before physical log end */
3070 split_hblks = log->l_logBBsize - (int)blk_no;
3072 error = xlog_bread(log, blk_no,
3085 * - the log start is guaranteed to be sector
3087 * - we read the log end (LR header start)
3088 * _first_, then the log start (LR header end)
3092 error = xlog_bread_noalign(log, 0,
3099 error = xlog_valid_rec_header(log, rhead,
3108 * Read the log record data in multiple reads if it
3109 * wraps around the end of the log. Note that if the
3111 * end of the log. The record data is contiguous in
3114 if (blk_no + bblks <= log->l_logBBsize ||
3115 blk_no >= log->l_logBBsize) {
3116 rblk_no = xlog_wrap_logbno(log, blk_no);
3117 error = xlog_bread(log, rblk_no, bblks, dbp,
3122 /* This log record is split across the
3123 * physical end of log */
3126 if (blk_no != log->l_logBBsize) {
3128 * end of log */
3132 log->l_logBBsize - (int)blk_no;
3134 error = xlog_bread(log, blk_no,
3147 * - the log start is guaranteed to be sector
3149 * - we read the log end (LR header start)
3150 * _first_, then the log start (LR header end)
3153 error = xlog_bread_noalign(log, 0,
3160 error = xlog_recover_process(log, rhash, rhead, offset,
3169 ASSERT(blk_no >= log->l_logBBsize);
3170 blk_no -= log->l_logBBsize;
3174 /* read first part of physical log */
3176 error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3181 error = xlog_valid_rec_header(log, rhead, blk_no, h_size);
3187 error = xlog_bread(log, blk_no+hblks, bblks, dbp,
3192 error = xlog_recover_process(log, rhash, rhead, offset, pass,
3225 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
3250 * Do the recovery of the log. We actually do this in two phases.
3252 * of cancelling a record written into the log. The first pass
3254 * second pass replays log items normally except for those which
3256 * takes place in the log item type specific routines.
3258 * The table of items which have cancel records in the log is allocated
3260 * the log recovery has been completed.
3264 struct xlog *log,
3273 * First do a pass to find all of the cancelled buf log items.
3276 error = xlog_alloc_buf_cancel_table(log);
3280 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3286 * Then do a second pass to actually recover the items in the log.
3289 error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3292 xlog_check_buf_cancel_table(log);
3294 xlog_free_buf_cancel_table(log);
3303 struct xlog *log,
3307 struct xfs_mount *mp = log->l_mp;
3312 trace_xfs_log_recover(log, head_blk, tail_blk);
3315 * First replay the images in the log.
3317 error = xlog_do_log_recovery(log, head_blk, tail_blk);
3321 if (xlog_is_shutdown(log))
3327 * or iunlinks, we can free up the entire log and set the tail_lsn to
3343 if (!xlog_is_shutdown(log)) {
3367 clear_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
3372 * Perform recovery and re-initialize some log variables in xlog_find_tail.
3378 struct xlog *log)
3383 /* find the tail of the log */
3384 error = xlog_find_tail(log, &head_blk, &tail_blk);
3389 * The superblock was read before the log was available and thus the LSN
3393 if (xfs_has_crc(log->l_mp) &&
3394 !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn))
3409 if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
3414 * Version 5 superblock log feature mask validation. We know the
3415 * log is dirty so check if there are any unknown log features
3420 if (xfs_sb_is_v5(&log->l_mp->m_sb) &&
3421 xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
3423 xfs_warn(log->l_mp,
3424 "Superblock has unknown incompatible log features (0x%x) enabled.",
3425 (log->l_mp->m_sb.sb_features_log_incompat &
3427 xfs_warn(log->l_mp,
3428 "The log can not be fully and/or safely recovered by this kernel.");
3429 xfs_warn(log->l_mp,
3430 "Please recover the log on a kernel that supports the unknown features.");
3435 * Delay log recovery if the debug hook is set. This is debug
3437 * log recovery.
3440 xfs_notice(log->l_mp,
3441 "Delaying log recovery for %d seconds.",
3446 xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
3447 log->l_mp->m_logname ? log->l_mp->m_logname
3450 error = xlog_do_recover(log, head_blk, tail_blk);
3451 set_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate);
3472 struct xlog *log)
3477 error = xlog_recover_process_intents(log);
3486 xlog_recover_cancel_intents(log);
3487 xfs_alert(log->l_mp, "Failed to recover intents");
3488 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
3493 * Sync the log to get all the intents out of the AIL. This isn't
3497 xfs_log_force(log->l_mp, XFS_LOG_SYNC);
3500 * Now that we've recovered the log and all the intents, we can clear
3501 * the log incompat feature bits in the superblock because there's no
3505 if (xfs_clear_incompat_log_features(log->l_mp)) {
3506 error = xfs_sync_sb(log->l_mp, false);
3508 xfs_alert(log->l_mp,
3509 "Failed to clear log incompat features on recovery");
3514 xlog_recover_process_iunlinks(log);
3523 error = xfs_reflink_recover_cow(log->l_mp);
3525 xfs_alert(log->l_mp,
3529 * If we get an error here, make sure the log is shut down
3530 * but return zero so that any log items committed since the
3534 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
3546 struct xlog *log)
3548 if (xlog_recovery_needed(log))
3549 xlog_recover_cancel_intents(log);