Lines Matching refs:log

18 #include "raid5-log.h"
28 * log->max_free_space is min(1/4 disk size, 10G reclaimable space).
30 * In write through mode, the reclaim runs every log->max_free_space.
70 * writes are committed from the log device. Therefore, a stripe in
72 * - write to log device
87 sector_t device_size; /* log device size, round to
92 sector_t last_checkpoint; /* log tail. where recovery scan
94 u64 last_cp_seq; /* log tail sequence */
96 sector_t log_start; /* log head. where new data appends */
97 u64 seq; /* log head sequence */
107 * written to the log */
109 * written to the log but not yet written
111 struct list_head flushing_ios; /* io_units which are waiting for log
113 struct list_head finished_ios; /* io_units which settle down in log disk */
133 struct list_head no_space_stripes; /* pending stripes, log has no space */
205 * unit is written to log disk with normal write, as we always flush log disk
210 struct r5l_log *log;
221 struct list_head log_sibling; /* log->running_ios */
244 IO_UNIT_IO_START = 1, /* io_unit bio start writing to log,
246 IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */
250 bool r5c_is_writeback(struct r5l_log *log)
252 return (log != NULL &&
253 log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK);
256 static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc)
259 if (start >= log->device_size)
260 start = start - log->device_size;
264 static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start,
270 return end + log->device_size - start;
273 static bool r5l_has_free_space(struct r5l_log *log, sector_t size)
277 used_size = r5l_ring_distance(log, log->last_checkpoint,
278 log->log_start);
280 return log->device_size > used_size + size;
324 void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
330 struct r5l_log *log = READ_ONCE(conf->log);
332 if (!r5c_is_writeback(log))
348 r5l_wake_reclaim(log, 0);
357 struct r5l_log *log = READ_ONCE(conf->log);
359 if (!r5c_is_writeback(log))
369 r5l_wake_reclaim(log, 0);
373 * Total log space (in sectors) needed to flush all data in cache
375 * To avoid deadlock due to log space, it is necessary to reserve log
376 * space to flush critical stripes (stripes that occupying log space near
377 * last_checkpoint). This function helps check how much log space is
380 * To reduce log space requirements, two mechanisms are used to give cache
402 struct r5l_log *log = READ_ONCE(conf->log);
404 if (!r5c_is_writeback(log))
408 ((conf->max_degraded + 1) * atomic_read(&log->stripe_in_journal_count) +
413 * evaluate log space usage and update R5C_LOG_TIGHT and R5C_LOG_CRITICAL
415 * R5C_LOG_TIGHT is set when free space on the log device is less than 3x of
416 * reclaim_required_space. R5C_LOG_CRITICAL is set when free space on the log
419 static inline void r5c_update_log_state(struct r5l_log *log)
421 struct r5conf *conf = log->rdev->mddev->private;
426 if (!r5c_is_writeback(log))
429 free_space = r5l_ring_distance(log, log->log_start,
430 log->last_checkpoint);
445 r5l_wake_reclaim(log, 0);
455 struct r5l_log *log = READ_ONCE(conf->log);
457 BUG_ON(!r5c_is_writeback(log));
493 * log device. This is called from r5l_log_endio() or r5l_log_flush_endio().
497 struct r5l_log *log = READ_ONCE(sh->raid_conf->log);
499 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
530 static void r5l_log_run_stripes(struct r5l_log *log)
534 lockdep_assert_held(&log->io_list_lock);
536 list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
541 list_move_tail(&io->log_sibling, &log->finished_ios);
546 static void r5l_move_to_end_ios(struct r5l_log *log)
550 lockdep_assert_held(&log->io_list_lock);
552 list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
556 list_move_tail(&io->log_sibling, &log->io_end_ios);
565 struct r5l_log *log = io->log;
571 md_error(log->rdev->mddev, log->rdev);
574 mempool_free(io->meta_page, &log->meta_pool);
576 spin_lock_irqsave(&log->io_list_lock, flags);
588 if (log->need_cache_flush && !list_empty(&io->stripe_list))
589 r5l_move_to_end_ios(log);
591 r5l_log_run_stripes(log);
592 if (!list_empty(&log->running_ios)) {
597 io_deferred = list_first_entry(&log->running_ios,
600 schedule_work(&log->deferred_io_work);
603 spin_unlock_irqrestore(&log->io_list_lock, flags);
605 if (log->need_cache_flush)
606 md_wakeup_thread(log->rdev->mddev->thread);
627 static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io)
631 spin_lock_irqsave(&log->io_list_lock, flags);
633 spin_unlock_irqrestore(&log->io_list_lock, flags);
664 struct r5l_log *log = container_of(work, struct r5l_log,
669 spin_lock_irqsave(&log->io_list_lock, flags);
670 if (!list_empty(&log->running_ios)) {
671 io = list_first_entry(&log->running_ios, struct r5l_io_unit,
678 spin_unlock_irqrestore(&log->io_list_lock, flags);
680 r5l_do_submit_io(log, io);
685 struct r5l_log *log = container_of(work, struct r5l_log,
687 struct mddev *mddev = log->rdev->mddev;
690 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
697 !READ_ONCE(conf->log) ||
700 log = READ_ONCE(conf->log);
701 if (log) {
703 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
708 static void r5l_submit_current_io(struct r5l_log *log)
710 struct r5l_io_unit *io = log->current_io;
721 crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
724 log->current_io = NULL;
725 spin_lock_irqsave(&log->io_list_lock, flags);
727 if (io != list_first_entry(&log->running_ios,
733 spin_unlock_irqrestore(&log->io_list_lock, flags);
735 r5l_do_submit_io(log, io);
738 static struct bio *r5l_bio_alloc(struct r5l_log *log)
740 struct bio *bio = bio_alloc_bioset(log->rdev->bdev, BIO_MAX_VECS,
741 REQ_OP_WRITE, GFP_NOIO, &log->bs);
743 bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
748 static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io)
750 log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
752 r5c_update_log_state(log);
754 * If we filled up the log device start from the beginning again,
757 * Note: for this to work properly the log size needs to me a multiple
760 if (log->log_start == 0)
763 io->log_end = log->log_start;
766 static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
771 io = mempool_alloc(&log->io_pool, GFP_ATOMIC);
776 io->log = log;
782 io->meta_page = mempool_alloc(&log->meta_pool, GFP_NOIO);
787 block->seq = cpu_to_le64(log->seq);
788 block->position = cpu_to_le64(log->log_start);
790 io->log_start = log->log_start;
792 io->seq = log->seq++;
794 io->current_bio = r5l_bio_alloc(log);
799 r5_reserve_log_entry(log, io);
801 spin_lock_irq(&log->io_list_lock);
802 list_add_tail(&io->log_sibling, &log->running_ios);
803 spin_unlock_irq(&log->io_list_lock);
808 static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size)
810 if (log->current_io &&
811 log->current_io->meta_offset + payload_size > PAGE_SIZE)
812 r5l_submit_current_io(log);
814 if (!log->current_io) {
815 log->current_io = r5l_new_meta(log);
816 if (!log->current_io)
823 static void r5l_append_payload_meta(struct r5l_log *log, u16 type,
828 struct r5l_io_unit *io = log->current_io;
845 static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
847 struct r5l_io_unit *io = log->current_io;
852 io->current_bio = r5l_bio_alloc(log);
860 r5_reserve_log_entry(log, io);
863 static void r5l_append_flush_payload(struct r5l_log *log, sector_t sect)
865 struct mddev *mddev = log->rdev->mddev;
879 mutex_lock(&log->io_mutex);
882 if (r5l_get_meta(log, meta_size)) {
883 mutex_unlock(&log->io_mutex);
888 io = log->current_io;
900 mutex_unlock(&log->io_mutex);
903 static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
917 ret = r5l_get_meta(log, meta_size);
921 io = log->current_io;
933 log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) {
941 r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA,
944 r5l_append_payload_page(log, sh->dev[i].page);
948 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
951 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
952 r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
954 r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
957 r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
965 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
971 spin_lock_irq(&log->stripe_in_journal_lock);
973 &log->stripe_in_journal_list);
974 spin_unlock_irq(&log->stripe_in_journal_lock);
975 atomic_inc(&log->stripe_in_journal_count);
981 static inline void r5l_add_no_space_stripe(struct r5l_log *log,
984 spin_lock(&log->no_space_stripes_lock);
985 list_add_tail(&sh->log_list, &log->no_space_stripes);
986 spin_unlock(&log->no_space_stripes_lock);
991 * data from log to raid disks), so we shouldn't wait for reclaim here
993 int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
1003 if (!log)
1008 /* the stripe is written to log, we start writing it to raid */
1027 sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
1042 mutex_lock(&log->io_mutex);
1046 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
1047 if (!r5l_has_free_space(log, reserve)) {
1048 r5l_add_no_space_stripe(log, sh);
1051 ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
1053 spin_lock_irq(&log->io_list_lock);
1055 &log->no_mem_stripes);
1056 spin_unlock_irq(&log->io_list_lock);
1061 * log space critical, do not process stripes that are
1066 r5l_add_no_space_stripe(log, sh);
1069 } else if (!r5l_has_free_space(log, reserve)) {
1070 if (sh->log_start == log->last_checkpoint)
1073 r5l_add_no_space_stripe(log, sh);
1075 ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
1077 spin_lock_irq(&log->io_list_lock);
1079 &log->no_mem_stripes);
1080 spin_unlock_irq(&log->io_list_lock);
1085 mutex_unlock(&log->io_mutex);
1087 r5l_wake_reclaim(log, reserve);
1091 void r5l_write_stripe_run(struct r5l_log *log)
1093 if (!log)
1095 mutex_lock(&log->io_mutex);
1096 r5l_submit_current_io(log);
1097 mutex_unlock(&log->io_mutex);
1100 int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
1102 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) {
1105 * we flush log disk cache first, then write stripe data to
1106 * raid disks. So if bio is finished, the log disk cache is
1108 * the bio from log disk, so we don't need to flush again
1118 mutex_lock(&log->io_mutex);
1119 r5l_get_meta(log, 0);
1120 bio_list_add(&log->current_io->flush_barriers, bio);
1121 log->current_io->has_flush = 1;
1122 log->current_io->has_null_flush = 1;
1123 atomic_inc(&log->current_io->pending_stripe);
1124 r5l_submit_current_io(log);
1125 mutex_unlock(&log->io_mutex);
1132 /* This will run after log space is reclaimed */
1133 static void r5l_run_no_space_stripes(struct r5l_log *log)
1137 spin_lock(&log->no_space_stripes_lock);
1138 while (!list_empty(&log->no_space_stripes)) {
1139 sh = list_first_entry(&log->no_space_stripes,
1145 spin_unlock(&log->no_space_stripes_lock);
1150 * for write through mode, returns log->next_checkpoint
1156 struct r5l_log *log = READ_ONCE(conf->log);
1160 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
1161 return log->next_checkpoint;
1163 spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
1164 if (list_empty(&log->stripe_in_journal_list)) {
1166 spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1167 return log->next_checkpoint;
1169 sh = list_first_entry(&log->stripe_in_journal_list,
1172 spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1176 static sector_t r5l_reclaimable_space(struct r5l_log *log)
1178 struct r5conf *conf = log->rdev->mddev->private;
1180 return r5l_ring_distance(log, log->last_checkpoint,
1184 static void r5l_run_no_mem_stripe(struct r5l_log *log)
1188 lockdep_assert_held(&log->io_list_lock);
1190 if (!list_empty(&log->no_mem_stripes)) {
1191 sh = list_first_entry(&log->no_mem_stripes,
1199 static bool r5l_complete_finished_ios(struct r5l_log *log)
1204 lockdep_assert_held(&log->io_list_lock);
1206 list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) {
1211 log->next_checkpoint = io->log_start;
1214 mempool_free(io, &log->io_pool);
1215 r5l_run_no_mem_stripe(log);
1225 struct r5l_log *log = io->log;
1226 struct r5conf *conf = log->rdev->mddev->private;
1229 spin_lock_irqsave(&log->io_list_lock, flags);
1232 if (!r5l_complete_finished_ios(log)) {
1233 spin_unlock_irqrestore(&log->io_list_lock, flags);
1237 if (r5l_reclaimable_space(log) > log->max_free_space ||
1239 r5l_wake_reclaim(log, 0);
1241 spin_unlock_irqrestore(&log->io_list_lock, flags);
1242 wake_up(&log->iounit_wait);
1258 struct r5l_log *log = container_of(bio, struct r5l_log,
1264 md_error(log->rdev->mddev, log->rdev);
1267 spin_lock_irqsave(&log->io_list_lock, flags);
1268 list_for_each_entry(io, &log->flushing_ios, log_sibling)
1270 list_splice_tail_init(&log->flushing_ios, &log->finished_ios);
1271 spin_unlock_irqrestore(&log->io_list_lock, flags);
1276 * io_unit(meta) consists of a log. There is one situation we want to avoid. A
1277 * broken meta in the middle of a log causes recovery can't find meta at the
1278 * head of log. If operations require meta at the head persistent in log, we
1279 * must make sure meta before it persistent in log too. A case is:
1281 * stripe data/parity is in log, we start write stripe to raid disks. stripe
1282 * data/parity must be persistent in log before we do the write to raid disks.
1286 * one whose data/parity is in log.
1288 void r5l_flush_stripe_to_raid(struct r5l_log *log)
1292 if (!log || !log->need_cache_flush)
1295 spin_lock_irq(&log->io_list_lock);
1297 if (!list_empty(&log->flushing_ios)) {
1298 spin_unlock_irq(&log->io_list_lock);
1301 list_splice_tail_init(&log->io_end_ios, &log->flushing_ios);
1302 do_flush = !list_empty(&log->flushing_ios);
1303 spin_unlock_irq(&log->io_list_lock);
1307 bio_init(&log->flush_bio, log->rdev->bdev, NULL, 0,
1309 log->flush_bio.bi_end_io = r5l_log_flush_endio;
1310 submit_bio(&log->flush_bio);
1313 static void r5l_write_super(struct r5l_log *log, sector_t cp);
1314 static void r5l_write_super_and_discard_space(struct r5l_log *log,
1317 struct block_device *bdev = log->rdev->bdev;
1320 r5l_write_super(log, end);
1325 mddev = log->rdev->mddev;
1328 * superblock is updated to new log tail. Updating superblock (either
1345 if (log->last_checkpoint < end) {
1347 log->last_checkpoint + log->rdev->data_offset,
1348 end - log->last_checkpoint, GFP_NOIO);
1351 log->last_checkpoint + log->rdev->data_offset,
1352 log->device_size - log->last_checkpoint,
1354 blkdev_issue_discard(bdev, log->rdev->data_offset, end,
1404 if (!READ_ONCE(conf->log))
1425 struct r5l_log *log = READ_ONCE(conf->log);
1433 if (!r5c_is_writeback(log))
1467 /* if log space is tight, flush stripes on stripe_in_journal_list */
1469 spin_lock_irqsave(&log->stripe_in_journal_lock, flags);
1471 list_for_each_entry(sh, &log->stripe_in_journal_list, r5c) {
1489 spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags);
1493 r5l_run_no_space_stripes(log);
1498 static void r5l_do_reclaim(struct r5l_log *log)
1500 struct r5conf *conf = log->rdev->mddev->private;
1501 sector_t reclaim_target = xchg(&log->reclaim_target, 0);
1506 spin_lock_irq(&log->io_list_lock);
1507 write_super = r5l_reclaimable_space(log) > log->max_free_space ||
1508 reclaim_target != 0 || !list_empty(&log->no_space_stripes);
1515 reclaimable = r5l_reclaimable_space(log);
1517 (list_empty(&log->running_ios) &&
1518 list_empty(&log->io_end_ios) &&
1519 list_empty(&log->flushing_ios) &&
1520 list_empty(&log->finished_ios)))
1523 md_wakeup_thread(log->rdev->mddev->thread);
1524 wait_event_lock_irq(log->iounit_wait,
1525 r5l_reclaimable_space(log) > reclaimable,
1526 log->io_list_lock);
1530 spin_unlock_irq(&log->io_list_lock);
1537 * here, because the log area might be reused soon and we don't want to
1540 r5l_write_super_and_discard_space(log, next_checkpoint);
1542 mutex_lock(&log->io_mutex);
1543 log->last_checkpoint = next_checkpoint;
1544 r5c_update_log_state(log);
1545 mutex_unlock(&log->io_mutex);
1547 r5l_run_no_space_stripes(log);
1554 struct r5l_log *log = READ_ONCE(conf->log);
1556 if (!log)
1559 r5l_do_reclaim(log);
1562 void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
1567 if (!log)
1570 target = READ_ONCE(log->reclaim_target);
1574 } while (!try_cmpxchg(&log->reclaim_target, &target, new));
1575 md_wakeup_thread(log->reclaim_thread);
1578 void r5l_quiesce(struct r5l_log *log, int quiesce)
1580 struct mddev *mddev = log->rdev->mddev;
1582 log->reclaim_thread, lockdep_is_held(&mddev->reconfig_mutex));
1588 r5l_wake_reclaim(log, MaxSector);
1589 r5l_do_reclaim(log);
1596 struct r5l_log *log = READ_ONCE(conf->log);
1599 if (!log)
1602 return test_bit(Faulty, &log->rdev->flags);
1618 * in recovery, log is read sequentially. It is not efficient to
1620 * reads multiple pages with one IO, so further log read can
1630 static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
1653 static void r5l_recovery_free_ra_pool(struct r5l_log *log,
1668 static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
1675 bio_init(&bio, log->rdev->bdev, ctx->ra_bvec,
1677 bio.bi_iter.bi_sector = log->rdev->data_offset + offset;
1687 offset = r5l_ring_add(log, offset, BLOCK_SECTORS);
1702 static int r5l_recovery_read_page(struct r5l_log *log,
1711 ret = r5l_recovery_fetch_ra_pool(log, ctx, offset);
1726 static int r5l_recovery_read_meta_block(struct r5l_log *log,
1734 ret = r5l_recovery_read_page(log, ctx, page, ctx->pos);
1748 crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
1761 r5l_recovery_create_empty_meta_block(struct r5l_log *log,
1776 static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
1785 r5l_recovery_create_empty_meta_block(log, page, pos, seq);
1787 mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
1789 if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE |
1805 static void r5l_recovery_load_data(struct r5l_log *log,
1811 struct mddev *mddev = log->rdev->mddev;
1818 r5l_recovery_read_page(log, ctx, sh->dev[dd_idx].page, log_offset);
1827 static void r5l_recovery_load_parity(struct r5l_log *log,
1833 struct mddev *mddev = log->rdev->mddev;
1837 r5l_recovery_read_page(log, ctx, sh->dev[sh->pd_idx].page, log_offset);
1844 log, ctx, sh->dev[sh->qd_idx].page,
1845 r5l_ring_add(log, log_offset, BLOCK_SECTORS));
1973 r5l_recovery_verify_data_checksum(struct r5l_log *log,
1981 r5l_recovery_read_page(log, ctx, page, log_offset);
1983 checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
1993 r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log,
1996 struct mddev *mddev = log->rdev->mddev;
2000 sector_t log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2015 log, ctx, page, log_offset,
2020 log, ctx, page, log_offset,
2025 log, ctx, page,
2026 r5l_ring_add(log, log_offset,
2040 log_offset = r5l_ring_add(log, log_offset,
2066 r5c_recovery_analyze_meta_block(struct r5l_log *log,
2070 struct mddev *mddev = log->rdev->mddev;
2086 ret = r5l_recovery_verify_data_checksum_for_mb(log, ctx);
2094 log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2177 r5l_recovery_load_data(log, sh, ctx, payload,
2180 r5l_recovery_load_parity(log, sh, ctx, payload,
2185 log_offset = r5l_ring_add(log, log_offset,
2200 static void r5c_recovery_load_one_stripe(struct r5l_log *log,
2216 * Scan through the log for all to-be-flushed data
2231 static int r5c_recovery_flush_log(struct r5l_log *log,
2237 /* scan through the log */
2239 if (r5l_recovery_read_meta_block(log, ctx))
2242 ret = r5c_recovery_analyze_meta_block(log, ctx,
2251 ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
2265 r5c_recovery_load_one_stripe(log, sh);
2274 * log will start here. but we can't let superblock point to last valid
2275 * meta block. The log might looks like:
2287 * Before recovery, the log looks like the following
2290 * | valid log | invalid log |
2293 * |- log->last_checkpoint
2294 * |- log->last_cp_seq
2296 * Now we scan through the log until we see invalid entry
2299 * | valid log | invalid log |
2302 * |- log->last_checkpoint |- ctx->pos
2303 * |- log->last_cp_seq |- ctx->seq
2309 * | valid log | invalid log |
2312 * |- log->last_checkpoint |- ctx->pos+1
2313 * |- log->last_cp_seq |- ctx->seq+10001
2320 * | valid log | data only stripes | invalid log |
2323 * |- log->last_checkpoint |- ctx->pos+n
2324 * |- log->last_cp_seq |- ctx->seq+10000+n
2327 * again from log->last_checkpoint.
2332 * | old log | data only stripes | invalid log |
2335 * |- log->last_checkpoint |- ctx->pos+n
2336 * |- log->last_cp_seq |- ctx->seq+10000+n
2339 * point on, the recovery will start from new log->last_checkpoint.
2342 r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
2346 struct mddev *mddev = log->rdev->mddev;
2366 r5l_recovery_create_empty_meta_block(log, page,
2370 write_pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2386 crc32c_le(log->uuid_checksum, addr,
2389 sync_page_io(log->rdev, write_pos, PAGE_SIZE,
2391 write_pos = r5l_ring_add(log, write_pos,
2399 mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum,
2401 sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
2404 list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
2405 atomic_inc(&log->stripe_in_journal_count);
2410 log->next_checkpoint = next_checkpoint;
2415 static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log,
2418 struct mddev *mddev = log->rdev->mddev;
2430 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_BACK;
2443 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
2448 static int r5l_recovery_log(struct r5l_log *log)
2450 struct mddev *mddev = log->rdev->mddev;
2459 ctx->pos = log->last_checkpoint;
2460 ctx->seq = log->last_cp_seq;
2469 if (r5l_recovery_allocate_ra_pool(log, ctx) != 0) {
2474 ret = r5c_recovery_flush_log(log, ctx);
2491 log->next_checkpoint = ctx->pos;
2492 r5l_log_write_empty_meta_block(log, ctx->pos, ctx->seq++);
2493 ctx->pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2494 } else if (r5c_recovery_rewrite_data_only_stripes(log, ctx)) {
2501 log->log_start = ctx->pos;
2502 log->seq = ctx->seq;
2503 log->last_checkpoint = pos;
2504 r5l_write_super(log, pos);
2506 r5c_recovery_flush_data_only_stripes(log, ctx);
2509 r5l_recovery_free_ra_pool(log, ctx);
2517 static void r5l_write_super(struct r5l_log *log, sector_t cp)
2519 struct mddev *mddev = log->rdev->mddev;
2521 log->rdev->journal_tail = cp;
2535 if (!conf || !conf->log)
2538 switch (conf->log->r5c_journal_mode) {
2575 if (!conf || !conf->log)
2582 conf->log->r5c_journal_mode = mode;
2632 struct r5l_log *log = READ_ONCE(conf->log);
2641 BUG_ON(!r5c_is_writeback(log));
2691 spin_lock(&log->tree_lock);
2692 pslot = radix_tree_lookup_slot(&log->big_stripe_tree,
2696 pslot, &log->tree_lock) >>
2699 &log->big_stripe_tree, pslot,
2707 &log->big_stripe_tree, tree_index,
2710 spin_unlock(&log->tree_lock);
2715 spin_unlock(&log->tree_lock);
2799 struct r5l_log *log = READ_ONCE(conf->log);
2806 if (!log || !test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags))
2812 if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
2834 spin_lock_irq(&log->stripe_in_journal_lock);
2836 spin_unlock_irq(&log->stripe_in_journal_lock);
2839 atomic_dec(&log->stripe_in_journal_count);
2840 r5c_update_log_state(log);
2846 spin_lock(&log->tree_lock);
2847 pslot = radix_tree_lookup_slot(&log->big_stripe_tree,
2851 pslot, &log->tree_lock) >>
2854 radix_tree_delete(&log->big_stripe_tree, tree_index);
2857 &log->big_stripe_tree, pslot,
2859 spin_unlock(&log->tree_lock);
2874 r5l_append_flush_payload(log, sh->sector);
2880 int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh)
2888 BUG_ON(!log);
2896 sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
2910 mutex_lock(&log->io_mutex);
2916 r5l_add_no_space_stripe(log, sh);
2917 else if (!r5l_has_free_space(log, reserve)) {
2918 if (sh->log_start == log->last_checkpoint)
2921 r5l_add_no_space_stripe(log, sh);
2923 ret = r5l_log_stripe(log, sh, pages, 0);
2925 spin_lock_irq(&log->io_list_lock);
2926 list_add_tail(&sh->log_list, &log->no_mem_stripes);
2927 spin_unlock_irq(&log->io_list_lock);
2931 mutex_unlock(&log->io_mutex);
2938 struct r5l_log *log = READ_ONCE(conf->log);
2942 if (!log)
2946 slot = radix_tree_lookup(&log->big_stripe_tree, tree_index);
2950 static int r5l_load_log(struct r5l_log *log)
2952 struct md_rdev *rdev = log->rdev;
2955 sector_t cp = log->rdev->journal_tail;
2980 expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
2991 log->last_cp_seq = get_random_u32();
2993 r5l_log_write_empty_meta_block(log, cp, log->last_cp_seq);
2996 * data very soon. If super hasn't correct log tail address,
2997 * recovery can't find the log
2999 r5l_write_super(log, cp);
3001 log->last_cp_seq = le64_to_cpu(mb->seq);
3003 log->device_size = round_down(rdev->sectors, BLOCK_SECTORS);
3004 log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT;
3005 if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
3006 log->max_free_space = RECLAIM_MAX_FREE_SPACE;
3007 log->last_checkpoint = cp;
3012 log->log_start = r5l_ring_add(log, cp, BLOCK_SECTORS);
3013 log->seq = log->last_cp_seq + 1;
3014 log->next_checkpoint = cp;
3016 ret = r5l_recovery_log(log);
3018 r5c_update_log_state(log);
3025 int r5l_start(struct r5l_log *log)
3029 if (!log)
3032 ret = r5l_load_log(log);
3034 struct mddev *mddev = log->rdev->mddev;
3045 struct r5l_log *log = READ_ONCE(conf->log);
3047 if (!log)
3052 log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK)
3053 schedule_work(&log->disable_writeback_work);
3058 struct r5l_log *log;
3083 log = kzalloc(sizeof(*log), GFP_KERNEL);
3084 if (!log)
3086 log->rdev = rdev;
3087 log->need_cache_flush = bdev_write_cache(rdev->bdev);
3088 log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
3091 mutex_init(&log->io_mutex);
3093 spin_lock_init(&log->io_list_lock);
3094 INIT_LIST_HEAD(&log->running_ios);
3095 INIT_LIST_HEAD(&log->io_end_ios);
3096 INIT_LIST_HEAD(&log->flushing_ios);
3097 INIT_LIST_HEAD(&log->finished_ios);
3099 log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
3100 if (!log->io_kc)
3103 ret = mempool_init_slab_pool(&log->io_pool, R5L_POOL_SIZE, log->io_kc);
3107 ret = bioset_init(&log->bs, R5L_POOL_SIZE, 0, BIOSET_NEED_BVECS);
3111 ret = mempool_init_page_pool(&log->meta_pool, R5L_POOL_SIZE, 0);
3115 spin_lock_init(&log->tree_lock);
3116 INIT_RADIX_TREE(&log->big_stripe_tree, GFP_NOWAIT | __GFP_NOWARN);
3118 thread = md_register_thread(r5l_reclaim_thread, log->rdev->mddev,
3124 rcu_assign_pointer(log->reclaim_thread, thread);
3126 init_waitqueue_head(&log->iounit_wait);
3128 INIT_LIST_HEAD(&log->no_mem_stripes);
3130 INIT_LIST_HEAD(&log->no_space_stripes);
3131 spin_lock_init(&log->no_space_stripes_lock);
3133 INIT_WORK(&log->deferred_io_work, r5l_submit_io_async);
3134 INIT_WORK(&log->disable_writeback_work, r5c_disable_writeback_async);
3136 log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
3137 INIT_LIST_HEAD(&log->stripe_in_journal_list);
3138 spin_lock_init(&log->stripe_in_journal_lock);
3139 atomic_set(&log->stripe_in_journal_count, 0);
3141 WRITE_ONCE(conf->log, log);
3147 mempool_exit(&log->meta_pool);
3149 bioset_exit(&log->bs);
3151 mempool_exit(&log->io_pool);
3153 kmem_cache_destroy(log->io_kc);
3155 kfree(log);
3161 struct r5l_log *log = conf->log;
3163 md_unregister_thread(conf->mddev, &log->reclaim_thread);
3166 * 'reconfig_mutex' is held by caller, set 'confg->log' to NULL to
3169 WRITE_ONCE(conf->log, NULL);
3171 flush_work(&log->disable_writeback_work);
3173 mempool_exit(&log->meta_pool);
3174 bioset_exit(&log->bs);
3175 mempool_exit(&log->io_pool);
3176 kmem_cache_destroy(log->io_kc);
3177 kfree(log);