• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/fs/xfs/

Lines Matching defs:log

44 STATIC int	 xlog_commit_record(struct log *log, struct xlog_ticket *ticket,
50 STATIC int xlog_space_left(xlog_t *log, int cycle, int bytes);
51 STATIC int xlog_sync(xlog_t *log, xlog_in_core_t *iclog);
52 STATIC void xlog_dealloc_log(xlog_t *log);
56 STATIC void xlog_state_do_callback(xlog_t *log,int aborted, xlog_in_core_t *iclog);
57 STATIC int xlog_state_get_iclog_space(xlog_t *log,
63 STATIC int xlog_state_release_iclog(xlog_t *log,
65 STATIC void xlog_state_switch_iclogs(xlog_t *log,
68 STATIC void xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog);
71 STATIC int xlog_grant_log_space(xlog_t *log,
75 STATIC void xlog_regrant_reserve_log_space(xlog_t *log,
77 STATIC int xlog_regrant_write_log_space(xlog_t *log,
79 STATIC void xlog_ungrant_log_space(xlog_t *log,
83 STATIC void xlog_verify_dest_ptr(xlog_t *log, char *ptr);
84 STATIC void xlog_verify_grant_head(xlog_t *log, int equals);
85 STATIC void xlog_verify_iclog(xlog_t *log, xlog_in_core_t *iclog,
87 STATIC void xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog,
96 STATIC int xlog_iclogs_empty(xlog_t *log);
131 xlog_grant_sub_space(struct log *log, int bytes)
133 log->l_grant_write_bytes -= bytes;
134 if (log->l_grant_write_bytes < 0) {
135 log->l_grant_write_bytes += log->l_logsize;
136 log->l_grant_write_cycle--;
139 log->l_grant_reserve_bytes -= bytes;
140 if ((log)->l_grant_reserve_bytes < 0) {
141 log->l_grant_reserve_bytes += log->l_logsize;
142 log->l_grant_reserve_cycle--;
148 xlog_grant_add_space_write(struct log *log, int bytes)
150 int tmp = log->l_logsize - log->l_grant_write_bytes;
152 log->l_grant_write_bytes += bytes;
154 log->l_grant_write_cycle++;
155 log->l_grant_write_bytes = bytes - tmp;
160 xlog_grant_add_space_reserve(struct log *log, int bytes)
162 int tmp = log->l_logsize - log->l_grant_reserve_bytes;
164 log->l_grant_reserve_bytes += bytes;
166 log->l_grant_reserve_cycle++;
167 log->l_grant_reserve_bytes = bytes - tmp;
172 xlog_grant_add_space(struct log *log, int bytes)
174 xlog_grant_add_space_write(log, bytes);
175 xlog_grant_add_space_reserve(log, bytes);
210 * This routine is called when a user of a log manager ticket is done with
212 * the associated transaction is written out as a log operation header with
230 struct log *log = mp->m_log;
233 if (XLOG_FORCED_SHUTDOWN(log) ||
236 * If we get an error, just continue and give back the log ticket.
239 (xlog_commit_record(log, ticket, iclog, &lsn)))) {
249 trace_xfs_log_done_nonperm(log, ticket);
255 xlog_ungrant_log_space(log, ticket);
258 trace_xfs_log_done_perm(log, ticket);
260 xlog_regrant_reserve_log_space(log, ticket);
273 * transaction commit. If the log is in error state, a non-zero
312 * 1. Reserve an amount of on-disk log space and return a ticket corresponding
314 * 2. Potentially, push buffers at tail of log to disk.
316 * Each reservation is going to reserve extra space for a log record header.
317 * When writes happen to the on-disk log, we don't subtract the length of the
318 * log record header from any reservation. By wasting space in each
331 struct log *log = mp->m_log;
337 if (XLOG_FORCED_SHUTDOWN(log))
350 * different TID in the log. Just add one to the existing tid
351 * so that we can see chains of rolling transactions in the log
356 trace_xfs_log_reserve(log, internal_ticket);
359 retval = xlog_regrant_write_log_space(log, internal_ticket);
362 internal_ticket = xlog_ticket_alloc(log, unit_bytes, cnt,
370 trace_xfs_log_reserve(log, internal_ticket);
375 retval = xlog_grant_log_space(log, internal_ticket);
383 * Mount a log filesystem
386 * log_target - buftarg of on-disk log device
388 * num_bblocks - Number of BBSIZE blocks in on-disk log
417 * Initialize the AIL now we have a log.
427 * skip log recovery on a norecovery mount. pretend it all
441 cmn_err(CE_WARN, "XFS: log mount/recovery failed: error %d", error);
450 * Now the log has been fully initialised and we know were our
490 * Final log writes as part of unmount.
508 xlog_t *log = mp->m_log;
525 ASSERT(error || !(XLOG_FORCED_SHUTDOWN(log)));
528 first_iclog = iclog = log->l_iclog;
537 if (! (XLOG_FORCED_SHUTDOWN(log))) {
561 error = xlog_write(log, &vec, tic, &lsn,
565 * so there's no point in transitioning log state
576 spin_lock(&log->l_icloglock);
577 iclog = log->l_iclog;
579 xlog_state_want_sync(log, iclog);
580 spin_unlock(&log->l_icloglock);
581 error = xlog_state_release_iclog(log, iclog);
583 spin_lock(&log->l_icloglock);
586 if (!XLOG_FORCED_SHUTDOWN(log)) {
588 &log->l_icloglock, s);
590 spin_unlock(&log->l_icloglock);
593 spin_unlock(&log->l_icloglock);
596 trace_xfs_log_umount_write(log, tic);
597 xlog_ungrant_log_space(log, tic);
607 * we need to wait for other log I/Os that may already
614 spin_lock(&log->l_icloglock);
615 iclog = log->l_iclog;
618 xlog_state_want_sync(log, iclog);
619 spin_unlock(&log->l_icloglock);
620 error = xlog_state_release_iclog(log, iclog);
622 spin_lock(&log->l_icloglock);
629 &log->l_icloglock, s);
631 spin_unlock(&log->l_icloglock);
639 * Deallocate log structures for unmount/relocation.
642 * and deallocate the log as the aild references the log.
669 * Write region vectors to log. The write happens using the space reservation
673 * number of log headers a transaction requires that may be violated if you
684 struct log *log = mp->m_log;
691 if (XLOG_FORCED_SHUTDOWN(log))
694 error = xlog_write(log, &vec, tic, start_lsn, NULL, 0);
705 xlog_t *log = mp->m_log;
708 if (XLOG_FORCED_SHUTDOWN(log))
713 spin_lock(&log->l_icloglock);
714 tail_lsn = log->l_last_sync_lsn;
715 spin_unlock(&log->l_icloglock);
718 spin_lock(&log->l_grant_lock);
724 log->l_tail_lsn = tail_lsn;
727 if ((tic = log->l_write_headq)) {
729 if (log->l_flags & XLOG_ACTIVE_RECOVERY)
732 cycle = log->l_grant_write_cycle;
733 bytes = log->l_grant_write_bytes;
734 free_bytes = xlog_space_left(log, cycle, bytes);
744 } while (tic != log->l_write_headq);
746 if ((tic = log->l_reserve_headq)) {
748 if (log->l_flags & XLOG_ACTIVE_RECOVERY)
751 cycle = log->l_grant_reserve_cycle;
752 bytes = log->l_grant_reserve_bytes;
753 free_bytes = xlog_space_left(log, cycle, bytes);
765 } while (tic != log->l_reserve_headq);
767 spin_unlock(&log->l_grant_lock);
773 * firstly the log needs to be idle (no AIL and nothing in the iclogs).
775 * that dummy transactions are required to move the log into the idle state.
779 * idle states. This ensures that the log tail is accurately reflected in
780 * the log at the end of the sync, hence if a crash occurrs avoids replay
787 xlog_t *log = mp->m_log;
792 spin_lock(&log->l_icloglock);
793 switch (log->l_covered_state) {
800 if (!xfs_trans_ail_tail(log->l_ailp) &&
801 xlog_iclogs_empty(log)) {
802 if (log->l_covered_state == XLOG_STATE_COVER_NEED)
803 log->l_covered_state = XLOG_STATE_COVER_DONE;
805 log->l_covered_state = XLOG_STATE_COVER_DONE2;
812 spin_unlock(&log->l_icloglock);
824 * The log manager must keep track of the last LR which was committed
827 * the situation where stuff could be written into the log but nothing
831 * We may be holding the log iclog lock upon entering this routine.
837 xlog_t *log = mp->m_log;
840 spin_lock(&log->l_grant_lock);
842 log->l_tail_lsn = tail_lsn;
844 tail_lsn = log->l_tail_lsn = log->l_last_sync_lsn;
846 spin_unlock(&log->l_grant_lock);
853 * Return the space in the log between the tail and the head. The head
857 * in the log. This works for all places where this function is called
864 * result is that we return the size of the log as the amount of space left.
867 xlog_space_left(xlog_t *log, int cycle, int bytes)
873 tail_bytes = BBTOB(BLOCK_LSN(log->l_tail_lsn));
874 tail_cycle = CYCLE_LSN(log->l_tail_lsn);
876 free_bytes = log->l_logsize - (bytes - tail_bytes);
886 * log as the amount of space left.
888 xfs_fs_cmn_err(CE_ALERT, log->l_mp,
894 free_bytes = log->l_logsize;
903 * The log manager needs its own routine, in order to control what
942 * callback routines to let them know that the log-commit
950 /* log I/O is always issued ASYNC */
956 * log.
962 * Return size of each in-core log record buffer.
972 xlog_t *log)
978 log->l_iclog_bufs = XLOG_MAX_ICLOGS;
980 log->l_iclog_bufs = mp->m_logbufs;
986 size = log->l_iclog_size = mp->m_logbsize;
987 log->l_iclog_size_log = 0;
989 log->l_iclog_size_log++;
1001 log->l_iclog_hsize = xhdrs << BBSHIFT;
1002 log->l_iclog_heads = xhdrs;
1005 log->l_iclog_hsize = BBSIZE;
1006 log->l_iclog_heads = 1;
1012 log->l_iclog_size = XLOG_BIG_RECORD_BSIZE;
1013 log->l_iclog_size_log = XLOG_BIG_RECORD_BSHIFT;
1015 /* the default log size is 16k or 32k which is one header sector */
1016 log->l_iclog_hsize = BBSIZE;
1017 log->l_iclog_heads = 1;
1022 mp->m_logbufs = log->l_iclog_bufs;
1024 mp->m_logbsize = log->l_iclog_size;
1029 * This routine initializes some of the log structure for a given mount point.
1039 xlog_t *log;
1048 log = kmem_zalloc(sizeof(xlog_t), KM_MAYFAIL);
1049 if (!log) {
1054 log->l_mp = mp;
1055 log->l_targ = log_target;
1056 log->l_logsize = BBTOB(num_bblks);
1057 log->l_logBBstart = blk_offset;
1058 log->l_logBBsize = num_bblks;
1059 log->l_covered_state = XLOG_STATE_COVER_IDLE;
1060 log->l_flags |= XLOG_ACTIVE_RECOVERY;
1062 log->l_prev_block = -1;
1063 log->l_tail_lsn = xlog_assign_lsn(1, 0);
1064 /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
1065 log->l_last_sync_lsn = log->l_tail_lsn;
1066 log->l_curr_cycle = 1; /* 0 is bad since this is initial value */
1067 log->l_grant_reserve_cycle = 1;
1068 log->l_grant_write_cycle = 1;
1086 /* for larger sector sizes, must have v2 or external log */
1087 if (log2_size && log->l_logBBstart > 0 &&
1090 xlog_warn("XFS: log sector size (0x%x) invalid "
1095 log->l_sectBBsize = 1 << log2_size;
1097 xlog_get_iclog_buffer_size(mp, log);
1100 bp = xfs_buf_get_empty(log->l_iclog_size, mp->m_logdev_targp);
1107 log->l_xbuf = bp;
1109 spin_lock_init(&log->l_icloglock);
1110 spin_lock_init(&log->l_grant_lock);
1111 sv_init(&log->l_flush_wait, 0, "flush_wait");
1113 /* log record size must be multiple of BBSIZE; see xlog_rec_header_t */
1116 iclogp = &log->l_iclog;
1124 ASSERT(log->l_iclog_size >= 4096);
1125 for (i=0; i < log->l_iclog_bufs; i++) {
1134 bp = xfs_buf_get_noaddr(log->l_iclog_size, mp->m_logdev_targp);
1144 log->l_iclog_bak[i] = (xfs_caddr_t)&(iclog->ic_header);
1150 xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1151 head->h_size = cpu_to_be32(log->l_iclog_size);
1156 iclog->ic_size = XFS_BUF_SIZE(bp) - log->l_iclog_hsize;
1158 iclog->ic_log = log;
1162 iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize;
1171 *iclogp = log->l_iclog; /* complete ring */
1172 log->l_iclog->ic_prev = prev_iclog; /* re-write 1st prev ptr */
1174 error = xlog_cil_init(log);
1177 return log;
1180 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) {
1189 spinlock_destroy(&log->l_icloglock);
1190 spinlock_destroy(&log->l_grant_lock);
1191 xfs_buf_free(log->l_xbuf);
1193 kmem_free(log);
1205 struct log *log,
1210 struct xfs_mount *mp = log->l_mp;
1223 error = xlog_write(log, &vec, ticket, commitlsnp, iclog,
1232 * log space. This code pushes on the lsn which would supposedly free up
1234 * pushes on an lsn which is further along in the log once we reach the high
1241 xlog_t *log = mp->m_log; /* pointer to the log */
1242 xfs_lsn_t tail_lsn; /* lsn of the log tail */
1250 ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
1252 spin_lock(&log->l_grant_lock);
1253 free_bytes = xlog_space_left(log,
1254 log->l_grant_reserve_cycle,
1255 log->l_grant_reserve_bytes);
1256 tail_lsn = log->l_tail_lsn;
1261 * log to the maximum of what the caller needs, one quarter of the
1262 * log, and 256 blocks.
1265 free_threshold = MAX(free_threshold, (log->l_logBBsize >> 2));
1270 if (threshold_block >= log->l_logBBsize) {
1271 threshold_block -= log->l_logBBsize;
1277 * log record known to be on disk.
1279 if (XFS_LSN_CMP(threshold_lsn, log->l_last_sync_lsn) > 0)
1280 threshold_lsn = log->l_last_sync_lsn;
1282 spin_unlock(&log->l_grant_lock);
1290 !XLOG_FORCED_SHUTDOWN(log))
1291 xfs_trans_ail_push(log->l_ailp, threshold_lsn);
1295 * The bdstrat callback function for log bufs. This gives us a central
1296 * place to trap bufs in case we get hit by a log I/O error and need to
1297 * shutdown. Actually, in practice, even when we didn't get a log error,
1315 * the log state machine to propagate I/O errors instead of
1327 * Flush out the in-core log (iclog) to the on-disk log in an asynchronous
1329 * ptr in the log to point to the next available iclog. This allows further
1331 * Before an in-core log can be written out, the data section must be scanned
1343 * log will require grabbing the lock though.
1345 * The entire log manager uses a logical block numbering scheme. Only
1346 * log_sync (and then only bwrite()) know about the fact that the log may
1347 * not start with block zero on a given device. The log block start offset
1352 xlog_sync(xlog_t *log,
1363 int v2 = xfs_sb_version_haslogv2(&log->l_mp->m_sb);
1369 count_init = log->l_iclog_hsize + iclog->ic_offset;
1371 /* Round out the log write size */
1372 if (v2 && log->l_mp->m_sb.sb_logsunit > 1) {
1374 count = XLOG_LSUNITTOB(log, XLOG_BTOLSUNIT(log, count_init));
1380 ASSERT((v2 && log->l_mp->m_sb.sb_logsunit > 1 &&
1381 roundoff < log->l_mp->m_sb.sb_logsunit)
1383 (log->l_mp->m_sb.sb_logsunit <= 1 &&
1387 spin_lock(&log->l_grant_lock);
1388 xlog_grant_add_space(log, roundoff);
1389 spin_unlock(&log->l_grant_lock);
1392 xlog_pack_data(log, iclog, roundoff);
1411 if (XFS_BUF_ADDR(bp) + BTOBB(count) > log->l_logBBsize) {
1412 split = count - (BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp)));
1413 count = BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp));
1425 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
1428 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
1429 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
1431 xlog_verify_iclog(log, iclog, count, B_TRUE);
1433 /* account for log which doesn't start at block #0 */
1434 XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart);
1436 * Don't call xfs_bwrite here. We do log-syncs even when the filesystem
1442 xfs_ioerror_alert("xlog_sync", log->l_mp, bp,
1459 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
1475 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
1476 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
1478 /* account for internal log which doesn't start at block #0 */
1479 XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart);
1482 xfs_ioerror_alert("xlog_sync (split)", log->l_mp,
1492 * Deallocate a log structure
1495 xlog_dealloc_log(xlog_t *log)
1500 xlog_cil_destroy(log);
1502 iclog = log->l_iclog;
1503 for (i=0; i<log->l_iclog_bufs; i++) {
1511 spinlock_destroy(&log->l_icloglock);
1512 spinlock_destroy(&log->l_grant_lock);
1514 xfs_buf_free(log->l_xbuf);
1515 log->l_mp->m_log = NULL;
1516 kmem_free(log);
1524 xlog_state_finish_copy(xlog_t *log,
1529 spin_lock(&log->l_icloglock);
1534 spin_unlock(&log->l_icloglock);
1654 * Calculate the potential space needed by the log vector. Each region gets
1713 struct log *log,
1736 xfs_fs_cmn_err(CE_WARN, log->l_mp,
1746 * Set up the parameters of the region copy into the log. This has
1747 * to handle region write split across multiple log buffers - this
1778 /* partial write of region, needs extra log op header reservation */
1787 /* account for new log op header */
1796 struct log *log,
1811 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
1814 return xlog_state_release_iclog(log, iclog);
1822 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
1826 spin_lock(&log->l_icloglock);
1827 xlog_state_want_sync(log, iclog);
1828 spin_unlock(&log->l_icloglock);
1831 return xlog_state_release_iclog(log, iclog);
1840 * Write some region out to in-core log
1854 * 2. Write log operation header (header per region)
1861 * 5. Release iclog for potential flush to on-disk log.
1871 * on all log operation writes which don't contain the end of the
1872 * region. The XLOG_END_TRANS bit is used for the in-core log
1881 struct log *log,
1903 if (log->l_cilp) {
1922 xlog_print_tic_res(log->l_mp, ticket);
1931 error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
1964 ophdr = xlog_write_setup_ophdr(log, ptr, ticket, flags);
1977 xlog_verify_dest_ptr(log, ptr);
1988 error = xlog_write_copy_finish(log, iclog, flags,
2005 * count), then we also need to get more log space. If
2028 xlog_state_finish_copy(log, iclog, record_cnt, data_cnt);
2030 return xlog_state_release_iclog(log, iclog);
2048 * a ordered wait queue to hold off would be writers to the log when every
2054 xlog_state_clean_log(xlog_t *log)
2059 iclog = log->l_iclog;
2094 } while (iclog != log->l_iclog);
2096 /* log is locked when we are called */
2098 * Change state for the dummy log recording.
2105 switch (log->l_covered_state) {
2109 log->l_covered_state = XLOG_STATE_COVER_NEED;
2114 log->l_covered_state = XLOG_STATE_COVER_NEED2;
2116 log->l_covered_state = XLOG_STATE_COVER_NEED;
2121 log->l_covered_state = XLOG_STATE_COVER_IDLE;
2123 log->l_covered_state = XLOG_STATE_COVER_NEED;
2134 xlog_t *log)
2139 lsn_log = log->l_iclog;
2150 } while (lsn_log != log->l_iclog);
2157 xlog_t *log,
2174 spin_lock(&log->l_icloglock);
2175 first_iclog = iclog = log->l_iclog;
2183 * log. Reset this starting point each time the log is
2189 first_iclog = log->l_iclog;
2190 iclog = log->l_iclog;
2205 * the log, we do flush all iclogs to disk (if there
2206 * wasn't a log I/O error). So, we do want things to
2244 lowest_lsn = xlog_get_lowest_lsn(log);
2255 spin_unlock(&log->l_icloglock);
2261 spin_lock(&log->l_grant_lock);
2262 ASSERT(XFS_LSN_CMP(log->l_last_sync_lsn,
2264 log->l_last_sync_lsn =
2266 spin_unlock(&log->l_grant_lock);
2269 spin_unlock(&log->l_icloglock);
2299 spin_lock(&log->l_icloglock);
2309 xlog_state_clean_log(log);
2320 xfs_fs_cmn_err(CE_WARN, log->l_mp,
2332 first_iclog = iclog = log->l_iclog;
2354 if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR))
2356 spin_unlock(&log->l_icloglock);
2359 sv_broadcast(&log->l_flush_wait);
2368 * when we reach the end of the physical log, get turned into 2 separate
2374 * global state machine log lock.
2381 xlog_t *log = iclog->ic_log;
2383 spin_lock(&log->l_icloglock);
2393 * split log writes, on the second, we mark ALL iclogs STATE_IOERROR,
2399 spin_unlock(&log->l_icloglock);
2411 spin_unlock(&log->l_icloglock);
2412 xlog_state_do_callback(log, aborted, iclog); /* also cleans log */
2417 * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must
2427 * log's data space.
2428 * * in-core log pointer to which xlog_write() should write.
2429 * * boolean indicating this is a continued write to an in-core log.
2430 * If this is the last write, then the in-core log's offset field
2435 xlog_state_get_iclog_space(xlog_t *log,
2448 spin_lock(&log->l_icloglock);
2449 if (XLOG_FORCED_SHUTDOWN(log)) {
2450 spin_unlock(&log->l_icloglock);
2454 iclog = log->l_iclog;
2458 /* Wait for log writes to have flushed */
2459 sv_wait(&log->l_flush_wait, 0, &log->l_icloglock, 0);
2474 ticket->t_curr_res -= log->l_iclog_hsize;
2476 log->l_iclog_hsize,
2478 head->h_cycle = cpu_to_be32(log->l_curr_cycle);
2480 xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block));
2481 ASSERT(log->l_curr_block >= 0);
2494 xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2505 spin_unlock(&log->l_icloglock);
2506 error = xlog_state_release_iclog(log, iclog);
2510 spin_unlock(&log->l_icloglock);
2526 xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2531 spin_unlock(&log->l_icloglock);
2538 * Atomically get the log space required for a log ticket.
2544 xlog_grant_log_space(xlog_t *log,
2555 if (log->l_flags & XLOG_ACTIVE_RECOVERY)
2560 spin_lock(&log->l_grant_lock);
2562 trace_xfs_log_grant_enter(log, tic);
2565 if (log->l_reserve_headq) {
2566 xlog_ins_ticketq(&log->l_reserve_headq, tic);
2568 trace_xfs_log_grant_sleep1(log, tic);
2574 if (XLOG_FORCED_SHUTDOWN(log))
2578 sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
2583 trace_xfs_log_grant_wake1(log, tic);
2584 spin_lock(&log->l_grant_lock);
2592 if (XLOG_FORCED_SHUTDOWN(log))
2595 free_bytes = xlog_space_left(log, log->l_grant_reserve_cycle,
2596 log->l_grant_reserve_bytes);
2599 xlog_ins_ticketq(&log->l_reserve_headq, tic);
2601 trace_xfs_log_grant_sleep2(log, tic);
2603 spin_unlock(&log->l_grant_lock);
2604 xlog_grant_push_ail(log->l_mp, need_bytes);
2605 spin_lock(&log->l_grant_lock);
2608 sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
2610 spin_lock(&log->l_grant_lock);
2611 if (XLOG_FORCED_SHUTDOWN(log))
2614 trace_xfs_log_grant_wake2(log, tic);
2618 xlog_del_ticketq(&log->l_reserve_headq, tic);
2621 xlog_grant_add_space(log, need_bytes);
2623 tail_lsn = log->l_tail_lsn;
2630 if (CYCLE_LSN(tail_lsn) != log->l_grant_write_cycle) {
2631 ASSERT(log->l_grant_write_cycle-1 == CYCLE_LSN(tail_lsn));
2632 ASSERT(log->l_grant_write_bytes <= BBTOB(BLOCK_LSN(tail_lsn)));
2635 trace_xfs_log_grant_exit(log, tic);
2636 xlog_verify_grant_head(log, 1);
2637 spin_unlock(&log->l_grant_lock);
2642 xlog_del_ticketq(&log->l_reserve_headq, tic);
2644 trace_xfs_log_grant_error(log, tic);
2653 spin_unlock(&log->l_grant_lock);
2664 xlog_regrant_write_log_space(xlog_t *log,
2680 if (log->l_flags & XLOG_ACTIVE_RECOVERY)
2684 spin_lock(&log->l_grant_lock);
2686 trace_xfs_log_regrant_write_enter(log, tic);
2688 if (XLOG_FORCED_SHUTDOWN(log))
2698 if ((ntic = log->l_write_headq)) {
2699 free_bytes = xlog_space_left(log, log->l_grant_write_cycle,
2700 log->l_grant_write_bytes);
2709 } while (ntic != log->l_write_headq);
2711 if (ntic != log->l_write_headq) {
2713 xlog_ins_ticketq(&log->l_write_headq, tic);
2715 trace_xfs_log_regrant_write_sleep1(log, tic);
2717 spin_unlock(&log->l_grant_lock);
2718 xlog_grant_push_ail(log->l_mp, need_bytes);
2719 spin_lock(&log->l_grant_lock);
2723 &log->l_grant_lock, s);
2727 spin_lock(&log->l_grant_lock);
2728 if (XLOG_FORCED_SHUTDOWN(log))
2731 trace_xfs_log_regrant_write_wake1(log, tic);
2736 if (XLOG_FORCED_SHUTDOWN(log))
2739 free_bytes = xlog_space_left(log, log->l_grant_write_cycle,
2740 log->l_grant_write_bytes);
2743 xlog_ins_ticketq(&log->l_write_headq, tic);
2744 spin_unlock(&log->l_grant_lock);
2745 xlog_grant_push_ail(log->l_mp, need_bytes);
2746 spin_lock(&log->l_grant_lock);
2749 trace_xfs_log_regrant_write_sleep2(log, tic);
2751 sv_wait(&tic->t_wait, PINOD|PLTWAIT, &log->l_grant_lock, s);
2754 spin_lock(&log->l_grant_lock);
2755 if (XLOG_FORCED_SHUTDOWN(log))
2758 trace_xfs_log_regrant_write_wake2(log, tic);
2761 xlog_del_ticketq(&log->l_write_headq, tic);
2764 xlog_grant_add_space_write(log, need_bytes);
2766 tail_lsn = log->l_tail_lsn;
2767 if (CYCLE_LSN(tail_lsn) != log->l_grant_write_cycle) {
2768 ASSERT(log->l_grant_write_cycle-1 == CYCLE_LSN(tail_lsn));
2769 ASSERT(log->l_grant_write_bytes <= BBTOB(BLOCK_LSN(tail_lsn)));
2773 trace_xfs_log_regrant_write_exit(log, tic);
2775 xlog_verify_grant_head(log, 1);
2776 spin_unlock(&log->l_grant_lock);
2782 xlog_del_ticketq(&log->l_reserve_headq, tic);
2784 trace_xfs_log_regrant_write_error(log, tic);
2793 spin_unlock(&log->l_grant_lock);
2806 xlog_regrant_reserve_log_space(xlog_t *log,
2809 trace_xfs_log_regrant_reserve_enter(log, ticket);
2814 spin_lock(&log->l_grant_lock);
2815 xlog_grant_sub_space(log, ticket->t_curr_res);
2819 trace_xfs_log_regrant_reserve_sub(log, ticket);
2821 xlog_verify_grant_head(log, 1);
2825 spin_unlock(&log->l_grant_lock);
2829 xlog_grant_add_space_reserve(log, ticket->t_unit_res);
2831 trace_xfs_log_regrant_reserve_exit(log, ticket);
2833 xlog_verify_grant_head(log, 0);
2834 spin_unlock(&log->l_grant_lock);
2855 xlog_ungrant_log_space(xlog_t *log,
2861 spin_lock(&log->l_grant_lock);
2862 trace_xfs_log_ungrant_enter(log, ticket);
2864 xlog_grant_sub_space(log, ticket->t_curr_res);
2866 trace_xfs_log_ungrant_sub(log, ticket);
2873 xlog_grant_sub_space(log, ticket->t_unit_res*ticket->t_cnt);
2876 trace_xfs_log_ungrant_exit(log, ticket);
2878 xlog_verify_grant_head(log, 1);
2879 spin_unlock(&log->l_grant_lock);
2880 xfs_log_move_tail(log->l_mp, 1);
2895 xlog_t *log,
2904 if (!atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock))
2908 spin_unlock(&log->l_icloglock);
2916 xlog_assign_tail_lsn(log->l_mp);
2919 iclog->ic_header.h_tail_lsn = cpu_to_be64(log->l_tail_lsn);
2920 xlog_verify_tail_lsn(log, iclog, log->l_tail_lsn);
2923 spin_unlock(&log->l_icloglock);
2926 * We let the log lock go, so it's possible that we hit a log I/O
2933 return xlog_sync(log, iclog);
2943 * that every data block. We have run out of space in this log record.
2946 xlog_state_switch_iclogs(xlog_t *log,
2954 iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block);
2955 log->l_prev_block = log->l_curr_block;
2956 log->l_prev_cycle = log->l_curr_cycle;
2958 /* roll log?: ic_offset changed later */
2959 log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize);
2961 /* Round up to next log-sunit */
2962 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) &&
2963 log->l_mp->m_sb.sb_logsunit > 1) {
2964 __uint32_t sunit_bb = BTOBB(log->l_mp->m_sb.sb_logsunit);
2965 log->l_curr_block = roundup(log->l_curr_block, sunit_bb);
2968 if (log->l_curr_block >= log->l_logBBsize) {
2969 log->l_curr_cycle++;
2970 if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM)
2971 log->l_curr_cycle++;
2972 log->l_curr_block -= log->l_logBBsize;
2973 ASSERT(log->l_curr_block >= 0);
2975 ASSERT(iclog == log->l_iclog);
2976 log->l_iclog = iclog->ic_next;
2980 * Write out all data in the in-core log as of this exact moment in time.
2982 * Data may be written to the in-core log during this call. However,
3012 struct log *log = mp->m_log;
3018 if (log->l_cilp)
3019 xlog_cil_force(log);
3021 spin_lock(&log->l_icloglock);
3023 iclog = log->l_iclog;
3025 spin_unlock(&log->l_icloglock);
3060 xlog_state_switch_iclogs(log, iclog, 0);
3061 spin_unlock(&log->l_icloglock);
3063 if (xlog_state_release_iclog(log, iclog))
3068 spin_lock(&log->l_icloglock);
3080 xlog_state_switch_iclogs(log, iclog, 0);
3099 spin_unlock(&log->l_icloglock);
3103 sv_wait(&iclog->ic_force_wait, PINOD, &log->l_icloglock, s);
3105 * No need to grab the log lock here since we're
3116 spin_unlock(&log->l_icloglock);
3123 * about errors or whether the log was flushed or not. This is the normal
3124 * interface to use when trying to unpin items or move the log forward.
3141 * Force the in-core log to disk for a specific LSN.
3143 * Find in-core log with lsn.
3145 * If it is in the ACTIVE state, move the in-core log into the WANT_SYNC
3151 * specific in-core log. When given in-core log finally completes its
3162 struct log *log = mp->m_log;
3170 if (log->l_cilp) {
3171 lsn = xlog_cil_force_lsn(log, lsn);
3177 spin_lock(&log->l_icloglock);
3178 iclog = log->l_iclog;
3180 spin_unlock(&log->l_icloglock);
3191 spin_unlock(&log->l_icloglock);
3207 * up the refcnt so we can release the log (which
3222 PSWP, &log->l_icloglock, s);
3229 xlog_state_switch_iclogs(log, iclog, 0);
3230 spin_unlock(&log->l_icloglock);
3231 if (xlog_state_release_iclog(log, iclog))
3235 spin_lock(&log->l_icloglock);
3243 * gotten a log write error.
3246 spin_unlock(&log->l_icloglock);
3250 sv_wait(&iclog->ic_force_wait, PSWP, &log->l_icloglock, s);
3252 * No need to grab the log lock here since we're
3262 spin_unlock(&log->l_icloglock);
3266 } while (iclog != log->l_iclog);
3268 spin_unlock(&log->l_icloglock);
3274 * about errors or whether the log was flushed or not. This is the normal
3275 * interface to use when trying to unpin items or move the log forward.
3297 xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog)
3299 assert_spin_locked(&log->l_icloglock);
3302 xlog_state_switch_iclogs(log, iclog, 0);
3348 * Allocate and initialise a new log ticket.
3352 struct log *log,
3368 * Permanent reservations have up to 'cnt'-1 active log operations
3369 * in the log. A unit in this case is the amount of space for one
3370 * of these log operations. Normal reservations have a cnt of 1
3374 * which occupy space in the on-disk log.
3411 * increase the space required enough to require more log and op
3419 * Fundamentally, this means we must pass the entire log vector to
3422 iclog_space = log->l_iclog_size - log->l_iclog_hsize;
3434 unit_bytes += log->l_iclog_hsize * num_headers;
3437 unit_bytes += log->l_iclog_hsize;
3440 if (xfs_sb_version_haslogv2(&log->l_mp->m_sb) &&
3441 log->l_mp->m_sb.sb_logsunit > 1) {
3442 /* log su roundoff */
3443 unit_bytes += 2*log->l_mp->m_sb.sb_logsunit;
3478 * part of the log in case we trash the log structure.
3482 struct log *log,
3488 for (i = 0; i < log->l_iclog_bufs; i++) {
3489 if (ptr >= log->l_iclog_bak[i] &&
3490 ptr <= log->l_iclog_bak[i] + log->l_iclog_size)
3499 xlog_verify_grant_head(xlog_t *log, int equals)
3501 if (log->l_grant_reserve_cycle == log->l_grant_write_cycle) {
3503 ASSERT(log->l_grant_reserve_bytes >= log->l_grant_write_bytes);
3505 ASSERT(log->l_grant_reserve_bytes > log->l_grant_write_bytes);
3507 ASSERT(log->l_grant_reserve_cycle-1 == log->l_grant_write_cycle);
3508 ASSERT(log->l_grant_write_bytes >= log->l_grant_reserve_bytes);
3514 xlog_verify_tail_lsn(xlog_t *log,
3520 if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) {
3522 log->l_logBBsize - (log->l_prev_block - BLOCK_LSN(tail_lsn));
3523 if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize))
3524 xlog_panic("xlog_verify_tail_lsn: ran out of log space");
3526 ASSERT(CYCLE_LSN(tail_lsn)+1 == log->l_prev_cycle);
3528 if (BLOCK_LSN(tail_lsn) == log->l_prev_block)
3531 blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block;
3533 xlog_panic("xlog_verify_tail_lsn: ran out of log space");
3543 * 4. Check fields of each log operation header for:
3546 * C. Length in log record header is correct according to the
3549 * log, check the preceding blocks of the physical log to make sure all
3553 xlog_verify_iclog(xlog_t *log,
3569 spin_lock(&log->l_icloglock);
3570 icptr = log->l_iclog;
3571 for (i=0; i < log->l_iclog_bufs; i++) {
3576 if (icptr != log->l_iclog)
3578 spin_unlock(&log->l_icloglock);
3580 /* check log magic numbers */
3648 xlog_t *log)
3652 iclog = log->l_iclog;
3656 * From now on, no log flushes will result.
3677 * b. those who're sleeping on log reservations, pinned objects and
3685 * to be done before the log is marked as shutdown, otherwise the flush to the
3694 xlog_t *log;
3697 log = mp->m_log;
3700 * If this happens during log recovery, don't worry about
3701 * locking; the log isn't open for business yet.
3703 if (!log ||
3704 log->l_flags & XLOG_ACTIVE_RECOVERY) {
3715 if (logerror && log->l_iclog->ic_state & XLOG_STATE_IOERROR) {
3716 ASSERT(XLOG_FORCED_SHUTDOWN(log));
3722 * Flush the in memory commit item list before marking the log as
3728 xlog_cil_force(log);
3735 spin_lock(&log->l_icloglock);
3736 spin_lock(&log->l_grant_lock);
3743 * it's good to maintain the separation between the log and the rest
3746 log->l_flags |= XLOG_IO_ERROR;
3749 * If we hit a log error, we want to mark all the iclogs IOERROR
3753 retval = xlog_state_ioerror(log);
3754 spin_unlock(&log->l_icloglock);
3757 * We don't want anybody waiting for log reservations
3764 if ((tic = log->l_reserve_headq)) {
3768 } while (tic != log->l_reserve_headq);
3771 if ((tic = log->l_write_headq)) {
3775 } while (tic != log->l_write_headq);
3777 spin_unlock(&log->l_grant_lock);
3779 if (!(log->l_iclog->ic_state & XLOG_STATE_IOERROR)) {
3783 * log down completely.
3787 spin_lock(&log->l_icloglock);
3788 retval = xlog_state_ioerror(log);
3789 spin_unlock(&log->l_icloglock);
3793 * Callback all log item committed functions as if the
3794 * log writes were completed.
3796 xlog_state_do_callback(log, XFS_LI_ABORTED, NULL);
3802 spin_lock(&log->l_icloglock);
3803 iclog = log->l_iclog;
3807 } while (iclog != log->l_iclog);
3808 spin_unlock(&log->l_icloglock);
3811 /* return non-zero if log IOERROR transition had already happened */
3816 xlog_iclogs_empty(xlog_t *log)
3820 iclog = log->l_iclog;
3828 } while (iclog != log->l_iclog);