Lines Matching refs:ic

107 #define journal_entry_tag(ic, je)		((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
302 struct dm_integrity_c *ic;
320 struct dm_integrity_c *ic;
332 struct dm_integrity_c *ic;
376 static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err)
379 atomic64_inc(&ic->number_of_mismatches);
380 if (!cmpxchg(&ic->failed, 0, err))
384 static int dm_integrity_failed(struct dm_integrity_c *ic)
386 return READ_ONCE(ic->failed);
389 static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic)
391 if (ic->legacy_recalculate)
393 if (!(ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) ?
394 ic->internal_hash_alg.key || ic->journal_mac_alg.key :
395 ic->internal_hash_alg.key && !ic->journal_mac_alg.key)
400 static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned int i,
407 return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j);
410 static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
413 if (!ic->meta_dev) {
414 __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
423 #define sector_to_block(ic, n) \
425 BUG_ON((n) & (unsigned int)((ic)->sectors_per_block - 1)); \
426 (n) >>= (ic)->sb->log2_sectors_per_block; \
429 static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area,
435 ms = area << ic->sb->log2_interleave_sectors;
436 if (likely(ic->log2_metadata_run >= 0))
437 ms += area << ic->log2_metadata_run;
439 ms += area * ic->metadata_run;
440 ms >>= ic->log2_buffer_sectors;
442 sector_to_block(ic, offset);
444 if (likely(ic->log2_tag_size >= 0)) {
445 ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size);
446 mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
448 ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors);
449 mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
455 static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset)
459 if (ic->meta_dev)
462 result = area << ic->sb->log2_interleave_sectors;
463 if (likely(ic->log2_metadata_run >= 0))
464 result += (area + 1) << ic->log2_metadata_run;
466 result += (area + 1) * ic->metadata_run;
468 result += (sector_t)ic->initial_sectors + offset;
469 result += ic->start;
474 static void wraparound_section(struct dm_integrity_c *ic, unsigned int *sec_ptr)
476 if (unlikely(*sec_ptr >= ic->journal_sections))
477 *sec_ptr -= ic->journal_sections;
480 static void sb_set_version(struct dm_integrity_c *ic)
482 if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC))
483 ic->sb->version = SB_VERSION_5;
484 else if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING))
485 ic->sb->version = SB_VERSION_4;
486 else if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP))
487 ic->sb->version = SB_VERSION_3;
488 else if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
489 ic->sb->version = SB_VERSION_2;
491 ic->sb->version = SB_VERSION_1;
494 static int sb_mac(struct dm_integrity_c *ic, bool wr)
496 SHASH_DESC_ON_STACK(desc, ic->journal_mac);
498 unsigned int mac_size = crypto_shash_digestsize(ic->journal_mac);
499 __u8 *sb = (__u8 *)ic->sb;
503 dm_integrity_io_error(ic, "digest is too long", -EINVAL);
507 desc->tfm = ic->journal_mac;
512 dm_integrity_io_error(ic, "crypto_shash_digest", r);
520 dm_integrity_io_error(ic, "crypto_shash_digest", r);
524 dm_integrity_io_error(ic, "superblock mac", -EILSEQ);
525 dm_audit_log_target(DM_MSG_PREFIX, "mac-superblock", ic->ti, 0);
533 static int sync_rw_sb(struct dm_integrity_c *ic, blk_opf_t opf)
542 io_req.mem.ptr.addr = ic->sb;
544 io_req.client = ic->io;
545 io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
546 io_loc.sector = ic->start;
550 sb_set_version(ic);
551 if (ic->journal_mac && ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
552 r = sb_mac(ic, true);
563 if (ic->mode != 'R' && ic->journal_mac && ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
564 r = sb_mac(ic, false);
578 static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap,
584 if (unlikely(((sector | n_sectors) & ((1 << ic->sb->log2_sectors_per_block) - 1)) != 0)) {
588 ic->sb->log2_sectors_per_block,
589 ic->log2_blocks_per_bitmap_bit,
597 bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
599 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
684 static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src)
686 unsigned int n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
697 static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector)
699 unsigned int bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
702 BUG_ON(bitmap_block >= ic->n_bitmap_blocks);
703 return &ic->bbs[bitmap_block];
706 static void access_journal_check(struct dm_integrity_c *ic, unsigned int section, unsigned int offset,
710 unsigned int limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
712 if (unlikely(section >= ic->journal_sections) ||
715 function, section, offset, ic->journal_sections, limit);
721 static void page_list_location(struct dm_integrity_c *ic, unsigned int section, unsigned int offset,
726 access_journal_check(ic, section, offset, false, "page_list_location");
728 sector = section * ic->journal_section_sectors + offset;
734 static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl,
740 page_list_location(ic, section, offset, &pl_index, &pl_offset);
750 static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned int section, unsigned int offset)
752 return access_page_list(ic, ic->journal, section, offset, NULL);
755 static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned int section, unsigned int n)
760 access_journal_check(ic, section, n, true, "access_journal_entry");
765 js = access_journal(ic, section, rel_sector);
766 return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size);
769 static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned int section, unsigned int n)
771 n <<= ic->sb->log2_sectors_per_block;
775 access_journal_check(ic, section, n, false, "access_journal_data");
777 return access_journal(ic, section, n);
780 static void section_mac(struct dm_integrity_c *ic, unsigned int section, __u8 result[JOURNAL_MAC_SIZE])
782 SHASH_DESC_ON_STACK(desc, ic->journal_mac);
786 desc->tfm = ic->journal_mac;
790 dm_integrity_io_error(ic, "crypto_shash_init", r);
794 if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
797 r = crypto_shash_update(desc, (__u8 *)&ic->sb->salt, SALT_SIZE);
799 dm_integrity_io_error(ic, "crypto_shash_update", r);
806 dm_integrity_io_error(ic, "crypto_shash_update", r);
811 for (j = 0; j < ic->journal_section_entries; j++) {
812 struct journal_entry *je = access_journal_entry(ic, section, j);
816 dm_integrity_io_error(ic, "crypto_shash_update", r);
821 size = crypto_shash_digestsize(ic->journal_mac);
826 dm_integrity_io_error(ic, "crypto_shash_final", r);
834 dm_integrity_io_error(ic, "digest_size", -EINVAL);
839 dm_integrity_io_error(ic, "crypto_shash_final", r);
850 static void rw_section_mac(struct dm_integrity_c *ic, unsigned int section, bool wr)
855 if (!ic->journal_mac)
858 section_mac(ic, section, result);
861 struct journal_sector *js = access_journal(ic, section, j);
867 dm_integrity_io_error(ic, "journal mac", -EILSEQ);
868 dm_audit_log_target(DM_MSG_PREFIX, "mac-journal", ic->ti, 0);
883 static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section,
887 size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT;
892 source_pl = ic->journal;
893 target_pl = ic->journal_io;
895 source_pl = ic->journal_io;
896 target_pl = ic->journal;
899 page_list_location(ic, section, 0, &pl_index, &pl_offset);
916 rw_section_mac(ic, section, true);
921 page_list_location(ic, section, 0, &section_index, &dummy);
927 src_pages[1] = ic->journal_xor[pl_index].page;
947 complete(&comp->ic->crypto_backoff);
950 dm_integrity_io_error(comp->ic, "asynchronous encrypt", err);
970 wait_for_completion(&comp->ic->crypto_backoff);
971 reinit_completion(&comp->ic->crypto_backoff);
974 dm_integrity_io_error(comp->ic, "encrypt", r);
978 static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section,
987 source_sg = ic->journal_scatterlist;
988 target_sg = ic->journal_io_scatterlist;
990 source_sg = ic->journal_io_scatterlist;
991 target_sg = ic->journal_scatterlist;
1000 rw_section_mac(ic, section, true);
1002 req = ic->sk_requests[section];
1003 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
1022 static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section,
1025 if (ic->journal_xor)
1026 return xor_journal(ic, encrypt, section, n_sections, comp);
1028 return crypt_journal(ic, encrypt, section, n_sections, comp);
1036 dm_integrity_io_error(comp->ic, "writing journal", -EIO);
1040 static void rw_journal_sectors(struct dm_integrity_c *ic, blk_opf_t opf,
1049 if (unlikely(dm_integrity_failed(ic))) {
1060 if (ic->journal_io)
1061 io_req.mem.ptr.pl = &ic->journal_io[pl_index];
1063 io_req.mem.ptr.pl = &ic->journal[pl_index];
1071 io_req.client = ic->io;
1072 io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
1073 io_loc.sector = ic->start + SB_SECTORS + sector;
1078 dm_integrity_io_error(ic, (opf & REQ_OP_MASK) == REQ_OP_READ ?
1087 static void rw_journal(struct dm_integrity_c *ic, blk_opf_t opf,
1093 sector = section * ic->journal_section_sectors;
1094 n_sectors = n_sections * ic->journal_section_sectors;
1096 rw_journal_sectors(ic, opf, sector, n_sectors, comp);
1099 static void write_journal(struct dm_integrity_c *ic, unsigned int commit_start, unsigned int commit_sections)
1106 io_comp.ic = ic;
1109 if (commit_start + commit_sections <= ic->journal_sections) {
1111 if (ic->journal_io) {
1112 crypt_comp_1.ic = ic;
1115 encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1);
1119 rw_section_mac(ic, commit_start + i, true);
1121 rw_journal(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, commit_start,
1127 to_end = ic->journal_sections - commit_start;
1128 if (ic->journal_io) {
1129 crypt_comp_1.ic = ic;
1132 encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1);
1134 rw_journal(ic, REQ_OP_WRITE | REQ_FUA,
1138 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1);
1141 crypt_comp_2.ic = ic;
1144 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2);
1146 rw_journal(ic, REQ_OP_WRITE | REQ_FUA, commit_start, to_end, &io_comp);
1151 rw_section_mac(ic, commit_start + i, true);
1152 rw_journal(ic, REQ_OP_WRITE | REQ_FUA, commit_start, to_end, &io_comp);
1154 rw_section_mac(ic, i, true);
1156 rw_journal(ic, REQ_OP_WRITE | REQ_FUA, 0, commit_sections - to_end, &io_comp);
1162 static void copy_from_journal(struct dm_integrity_c *ic, unsigned int section, unsigned int offset,
1170 BUG_ON((target | n_sectors | offset) & (unsigned int)(ic->sectors_per_block - 1));
1172 if (unlikely(dm_integrity_failed(ic))) {
1177 sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset;
1184 io_req.mem.ptr.pl = &ic->journal[pl_index];
1188 io_req.client = ic->io;
1189 io_loc.bdev = ic->dev->bdev;
1206 static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
1208 struct rb_node **n = &ic->in_progress.rb_node;
1211 BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned int)(ic->sectors_per_block - 1));
1216 list_for_each_entry(range, &ic->wait_list, wait_entry) {
1237 rb_insert_color(&new_range->node, &ic->in_progress);
1242 static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1244 rb_erase(&range->node, &ic->in_progress);
1245 while (unlikely(!list_empty(&ic->wait_list))) {
1247 list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
1252 if (!add_new_range(ic, last_range, false)) {
1254 list_add(&last_range->wait_entry, &ic->wait_list);
1262 static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1266 spin_lock_irqsave(&ic->endio_wait.lock, flags);
1267 remove_range_unlocked(ic, range);
1268 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1271 static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1274 list_add_tail(&new_range->wait_entry, &ic->wait_list);
1278 spin_unlock_irq(&ic->endio_wait.lock);
1280 spin_lock_irq(&ic->endio_wait.lock);
1284 static void add_new_range_and_wait(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1286 if (unlikely(!add_new_range(ic, new_range, true)))
1287 wait_and_add_new_range(ic, new_range);
1296 static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector)
1304 link = &ic->journal_tree_root.rb_node;
1319 rb_insert_color(&node->node, &ic->journal_tree_root);
1322 static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node)
1325 rb_erase(&node->node, &ic->journal_tree_root);
1331 static unsigned int find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
1333 struct rb_node *n = ic->journal_tree_root.rb_node;
1341 found = j - ic->journal_tree;
1353 static bool test_journal_node(struct dm_integrity_c *ic, unsigned int pos, sector_t sector)
1358 if (unlikely(pos >= ic->journal_entries))
1360 node = &ic->journal_tree[pos];
1374 static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node)
1391 next_section = (unsigned int)(next_node - ic->journal_tree) / ic->journal_section_entries;
1392 if (next_section >= ic->committed_section &&
1393 next_section < ic->committed_section + ic->n_committed_sections)
1395 if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections)
1405 static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
1411 unsigned int may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
1419 r = dm_integrity_failed(ic);
1423 data = dm_bufio_read(ic->bufio, *metadata_block, &b);
1427 to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size);
1439 if (likely(is_power_of_2(ic->tag_size))) {
1441 if (unlikely(!ic->discard) ||
1456 if (unlikely(hash_offset == ic->tag_size)) {
1462 may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
1471 if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) {
1476 if (unlikely(!is_power_of_2(ic->tag_size)))
1477 hash_offset = (hash_offset + to_copy) % ic->tag_size;
1490 struct dm_integrity_c *ic;
1499 dm_integrity_io_error(fr->ic, "flushing disk cache", -EIO);
1503 static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data)
1508 if (!ic->meta_dev)
1516 fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio),
1517 fr.io_reg.bdev = ic->dev->bdev,
1520 fr.ic = ic;
1526 r = dm_bufio_write_dirty_buffers(ic->bufio);
1528 dm_integrity_io_error(ic, "writing tags", r);
1534 static void sleep_on_endio_wait(struct dm_integrity_c *ic)
1538 __add_wait_queue(&ic->endio_wait, &wait);
1540 spin_unlock_irq(&ic->endio_wait.lock);
1542 spin_lock_irq(&ic->endio_wait.lock);
1543 __remove_wait_queue(&ic->endio_wait, &wait);
1548 struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer);
1550 if (likely(!dm_integrity_failed(ic)))
1551 queue_work(ic->commit_wq, &ic->commit_work);
1554 static void schedule_autocommit(struct dm_integrity_c *ic)
1556 if (!timer_pending(&ic->autocommit_timer))
1557 mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies);
1560 static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1565 spin_lock_irqsave(&ic->endio_wait.lock, flags);
1567 bio_list_add(&ic->flush_bio_list, bio);
1568 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1570 queue_work(ic->commit_wq, &ic->commit_work);
1573 static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
1577 r = dm_integrity_failed(ic);
1580 if (unlikely(ic->synchronous_mode) && bio_op(bio) == REQ_OP_WRITE) {
1583 spin_lock_irqsave(&ic->endio_wait.lock, flags);
1584 bio_list_add(&ic->synchronous_bios, bio);
1585 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
1586 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1592 static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1596 if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic)))
1597 submit_flush_bio(ic, dio);
1599 do_endio(ic, bio);
1605 struct dm_integrity_c *ic = dio->ic;
1608 remove_range(ic, &dio->range);
1611 schedule_autocommit(ic);
1620 queue_work(ic->offload_wq, &dio->work);
1623 do_endio_flush(ic, dio);
1641 static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
1645 SHASH_DESC_ON_STACK(req, ic->internal_hash);
1649 req->tfm = ic->internal_hash;
1653 dm_integrity_io_error(ic, "crypto_shash_init", r);
1657 if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
1658 r = crypto_shash_update(req, (__u8 *)&ic->sb->salt, SALT_SIZE);
1660 dm_integrity_io_error(ic, "crypto_shash_update", r);
1667 dm_integrity_io_error(ic, "crypto_shash_update", r);
1671 r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT);
1673 dm_integrity_io_error(ic, "crypto_shash_update", r);
1679 dm_integrity_io_error(ic, "crypto_shash_final", r);
1683 digest_size = crypto_shash_digestsize(ic->internal_hash);
1684 if (unlikely(digest_size < ic->tag_size))
1685 memset(result + digest_size, 0, ic->tag_size - digest_size);
1691 get_random_bytes(result, ic->tag_size);
1697 struct dm_integrity_c *ic = dio->ic;
1703 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1704 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset,
1706 sector = get_data_sector(ic, area, offset);
1709 page = mempool_alloc(&ic->recheck_pool, GFP_NOIO);
1725 io_req.client = ic->io;
1726 io_loc.bdev = ic->dev->bdev;
1728 io_loc.count = ic->sectors_per_block;
1744 integrity_sector_checksum(ic, logical_sector, buffer, checksum);
1745 r = dm_integrity_rw_tag(ic, checksum, &dio->metadata_block,
1746 &dio->metadata_offset, ic->tag_size, TAG_CMP);
1751 atomic64_inc(&ic->number_of_mismatches);
1761 memcpy(mem + pos, buffer, ic->sectors_per_block << SECTOR_SHIFT);
1764 pos += ic->sectors_per_block << SECTOR_SHIFT;
1765 sector += ic->sectors_per_block;
1766 logical_sector += ic->sectors_per_block;
1770 mempool_free(page, &ic->recheck_pool);
1776 struct dm_integrity_c *ic = dio->ic;
1780 if (ic->internal_hash) {
1783 unsigned int digest_size = crypto_shash_digestsize(ic->internal_hash);
1786 unsigned int extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
1791 if (unlikely(ic->mode == 'R'))
1795 checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
1811 unsigned int max_blocks = max_size / ic->tag_size;
1816 unsigned int this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
1819 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1820 this_step_blocks * ic->tag_size, TAG_WRITE);
1827 bi_size -= this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
1848 integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr);
1849 checksums_ptr += ic->tag_size;
1850 sectors_to_process -= ic->sectors_per_block;
1851 pos += ic->sectors_per_block << SECTOR_SHIFT;
1852 sector += ic->sectors_per_block;
1856 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1888 sector_to_block(ic, data_to_process);
1889 data_to_process *= ic->tag_size;
1898 r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
1918 struct dm_integrity_c *ic = ti->private;
1924 dio->ic = ic;
1944 submit_flush_bio(ic, dio);
1957 if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
1960 ic->provided_data_sectors);
1963 if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned int)(ic->sectors_per_block - 1))) {
1965 ic->sectors_per_block,
1970 if (ic->sectors_per_block > 1 && likely(dio->op != REQ_OP_DISCARD)) {
1975 if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
1977 bv.bv_offset, bv.bv_len, ic->sectors_per_block);
1984 if (!ic->internal_hash) {
1986 unsigned int wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
1988 if (ic->log2_tag_size >= 0)
1989 wanted_tag_size <<= ic->log2_tag_size;
1991 wanted_tag_size *= ic->tag_size;
2005 if (unlikely(ic->mode == 'R') && unlikely(dio->op != REQ_OP_READ))
2008 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
2009 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
2010 bio->bi_iter.bi_sector = get_data_sector(ic, area, offset);
2019 struct dm_integrity_c *ic = dio->ic;
2039 struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry);
2050 __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
2055 js = access_journal_data(ic, journal_section, journal_entry);
2063 } while (++s < ic->sectors_per_block);
2065 if (ic->internal_hash) {
2068 integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
2069 if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
2079 if (!ic->internal_hash) {
2081 unsigned int tag_todo = ic->tag_size;
2082 char *tag_ptr = journal_entry_tag(ic, je);
2108 js = access_journal_data(ic, journal_section, journal_entry);
2109 memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT);
2114 } while (++s < ic->sectors_per_block);
2116 if (ic->internal_hash) {
2117 unsigned int digest_size = crypto_shash_digestsize(ic->internal_hash);
2119 if (unlikely(digest_size > ic->tag_size)) {
2122 integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
2123 memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
2125 integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je));
2130 logical_sector += ic->sectors_per_block;
2133 if (unlikely(journal_entry == ic->journal_section_entries)) {
2136 wraparound_section(ic, &journal_section);
2139 bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT;
2140 } while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT);
2149 if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
2150 wake_up(&ic->copy_to_journal_wait);
2151 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
2152 queue_work(ic->commit_wq, &ic->commit_work);
2154 schedule_autocommit(ic);
2156 remove_range(ic, &dio->range);
2162 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
2163 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
2172 struct dm_integrity_c *ic = dio->ic;
2178 bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ;
2180 if (unlikely(dio->op == REQ_OP_DISCARD) && ic->mode != 'D')
2185 queue_work(ic->offload_wq, &dio->work);
2190 spin_lock_irq(&ic->endio_wait.lock);
2192 if (unlikely(dm_integrity_failed(ic))) {
2193 spin_unlock_irq(&ic->endio_wait.lock);
2194 do_endio(ic, bio);
2199 if (ic->mode == 'J' && likely(dio->op != REQ_OP_DISCARD)) {
2205 (sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block);
2209 sleep_on_endio_wait(ic);
2212 range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
2213 ic->free_sectors -= range_sectors;
2214 journal_section = ic->free_section;
2215 journal_entry = ic->free_section_entry;
2217 next_entry = ic->free_section_entry + range_sectors;
2218 ic->free_section_entry = next_entry % ic->journal_section_entries;
2219 ic->free_section += next_entry / ic->journal_section_entries;
2220 ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
2221 wraparound_section(ic, &ic->free_section);
2223 pos = journal_section * ic->journal_section_entries + journal_entry;
2230 add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i);
2232 if (unlikely(pos >= ic->journal_entries))
2235 je = access_journal_entry(ic, ws, we);
2239 if (unlikely(we == ic->journal_section_entries)) {
2242 wraparound_section(ic, &ws);
2244 } while ((i += ic->sectors_per_block) < dio->range.n_sectors);
2246 spin_unlock_irq(&ic->endio_wait.lock);
2251 journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2259 for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
2260 if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
2267 if (unlikely(!add_new_range(ic, &dio->range, true))) {
2275 spin_unlock_irq(&ic->endio_wait.lock);
2277 queue_work(ic->wait_wq, &dio->work);
2281 dio->range.n_sectors = ic->sectors_per_block;
2282 wait_and_add_new_range(ic, &dio->range);
2292 new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2294 remove_range_unlocked(ic, &dio->range);
2299 if (ic->mode == 'J' && likely(dio->op == REQ_OP_DISCARD) && !discard_retried) {
2303 new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2306 remove_range_unlocked(ic, &dio->range);
2307 spin_unlock_irq(&ic->endio_wait.lock);
2308 queue_work(ic->commit_wq, &ic->commit_work);
2309 flush_workqueue(ic->commit_wq);
2310 queue_work(ic->writer_wq, &ic->writer_work);
2311 flush_workqueue(ic->writer_wq);
2316 spin_unlock_irq(&ic->endio_wait.lock);
2319 journal_section = journal_read_pos / ic->journal_section_entries;
2320 journal_entry = journal_read_pos % ic->journal_section_entries;
2324 if (ic->mode == 'B' && (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))) {
2325 if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2329 bbs = sector_to_bitmap_block(ic, dio->range.logical_sector);
2333 queue_work(ic->writer_wq, &bbs->work);
2347 bio_set_dev(bio, ic->dev->bdev);
2353 if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) {
2355 dm_integrity_flush_buffers(ic, false);
2369 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
2370 dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
2372 if (ic->mode == 'B') {
2373 if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector,
2385 queue_work(ic->metadata_wq, &dio->work);
2394 do_endio_flush(ic, dio);
2405 static void pad_uncommitted(struct dm_integrity_c *ic)
2407 if (ic->free_section_entry) {
2408 ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry;
2409 ic->free_section_entry = 0;
2410 ic->free_section++;
2411 wraparound_section(ic, &ic->free_section);
2412 ic->n_uncommitted_sections++;
2414 if (WARN_ON(ic->journal_sections * ic->journal_section_entries !=
2415 (ic->n_uncommitted_sections + ic->n_committed_sections) *
2416 ic->journal_section_entries + ic->free_sectors)) {
2420 ic->journal_sections, ic->journal_section_entries,
2421 ic->n_uncommitted_sections, ic->n_committed_sections,
2422 ic->journal_section_entries, ic->free_sectors);
2428 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work);
2433 del_timer(&ic->autocommit_timer);
2435 spin_lock_irq(&ic->endio_wait.lock);
2436 flushes = bio_list_get(&ic->flush_bio_list);
2437 if (unlikely(ic->mode != 'J')) {
2438 spin_unlock_irq(&ic->endio_wait.lock);
2439 dm_integrity_flush_buffers(ic, true);
2443 pad_uncommitted(ic);
2444 commit_start = ic->uncommitted_section;
2445 commit_sections = ic->n_uncommitted_sections;
2446 spin_unlock_irq(&ic->endio_wait.lock);
2451 ic->wrote_to_journal = true;
2455 for (j = 0; j < ic->journal_section_entries; j++) {
2458 je = access_journal_entry(ic, i, j);
2459 io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
2461 for (j = 0; j < ic->journal_section_sectors; j++) {
2464 js = access_journal(ic, i, j);
2465 js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq);
2468 if (unlikely(i >= ic->journal_sections))
2469 ic->commit_seq = next_commit_seq(ic->commit_seq);
2470 wraparound_section(ic, &i);
2474 write_journal(ic, commit_start, commit_sections);
2476 spin_lock_irq(&ic->endio_wait.lock);
2477 ic->uncommitted_section += commit_sections;
2478 wraparound_section(ic, &ic->uncommitted_section);
2479 ic->n_uncommitted_sections -= commit_sections;
2480 ic->n_committed_sections += commit_sections;
2481 spin_unlock_irq(&ic->endio_wait.lock);
2483 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
2484 queue_work(ic->writer_wq, &ic->writer_work);
2491 do_endio(ic, flushes);
2500 struct dm_integrity_c *ic = comp->ic;
2502 remove_range(ic, &io->range);
2503 mempool_free(io, &ic->journal_io_mempool);
2505 dm_integrity_io_error(ic, "copying from journal", -EIO);
2509 static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js,
2517 } while (++s < ic->sectors_per_block);
2520 static void do_journal_write(struct dm_integrity_c *ic, unsigned int write_start,
2529 comp.ic = ic;
2534 for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) {
2538 rw_section_mac(ic, i, false);
2539 for (j = 0; j < ic->journal_section_entries; j++) {
2540 struct journal_entry *je = access_journal_entry(ic, i, j);
2552 if (unlikely(sec & (unsigned int)(ic->sectors_per_block - 1))) {
2553 dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
2554 sec &= ~(sector_t)(ic->sectors_per_block - 1);
2556 if (unlikely(sec >= ic->provided_data_sectors)) {
2561 get_area_and_offset(ic, sec, &area, &offset);
2562 restore_last_bytes(ic, access_journal_data(ic, i, j), je);
2563 for (k = j + 1; k < ic->journal_section_entries; k++) {
2564 struct journal_entry *je2 = access_journal_entry(ic, i, k);
2571 if (unlikely(sec2 >= ic->provided_data_sectors))
2573 get_area_and_offset(ic, sec2, &area2, &offset2);
2574 if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block))
2576 restore_last_bytes(ic, access_journal_data(ic, i, k), je2);
2580 io = mempool_alloc(&ic->journal_io_mempool, GFP_NOIO);
2583 io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block;
2585 spin_lock_irq(&ic->endio_wait.lock);
2586 add_new_range_and_wait(ic, &io->range);
2589 struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries];
2592 while (j < k && find_newer_committed_node(ic, &section_node[j])) {
2593 struct journal_entry *je2 = access_journal_entry(ic, i, j);
2596 remove_journal_node(ic, &section_node[j]);
2598 sec += ic->sectors_per_block;
2599 offset += ic->sectors_per_block;
2601 while (j < k && find_newer_committed_node(ic, &section_node[k - 1])) {
2602 struct journal_entry *je2 = access_journal_entry(ic, i, k - 1);
2605 remove_journal_node(ic, &section_node[k - 1]);
2609 remove_range_unlocked(ic, &io->range);
2610 spin_unlock_irq(&ic->endio_wait.lock);
2611 mempool_free(io, &ic->journal_io_mempool);
2615 remove_journal_node(ic, &section_node[l]);
2617 spin_unlock_irq(&ic->endio_wait.lock);
2619 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2622 struct journal_entry *je2 = access_journal_entry(ic, i, l);
2628 ic->internal_hash) {
2631 integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
2632 (char *)access_journal_data(ic, i, l), test_tag);
2633 if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size))) {
2634 dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
2635 dm_audit_log_target(DM_MSG_PREFIX, "integrity-replay-journal", ic->ti, 0);
2640 r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset,
2641 ic->tag_size, TAG_WRITE);
2643 dm_integrity_io_error(ic, "reading tags", r);
2647 copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block,
2648 (k - j) << ic->sb->log2_sectors_per_block,
2649 get_data_sector(ic, area, offset),
2656 dm_bufio_write_dirty_buffers_async(ic->bufio);
2663 dm_integrity_flush_buffers(ic, true);
2668 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work);
2672 spin_lock_irq(&ic->endio_wait.lock);
2673 write_start = ic->committed_section;
2674 write_sections = ic->n_committed_sections;
2675 spin_unlock_irq(&ic->endio_wait.lock);
2680 do_journal_write(ic, write_start, write_sections, false);
2682 spin_lock_irq(&ic->endio_wait.lock);
2684 ic->committed_section += write_sections;
2685 wraparound_section(ic, &ic->committed_section);
2686 ic->n_committed_sections -= write_sections;
2688 prev_free_sectors = ic->free_sectors;
2689 ic->free_sectors += write_sections * ic->journal_section_entries;
2691 wake_up_locked(&ic->endio_wait);
2693 spin_unlock_irq(&ic->endio_wait.lock);
2696 static void recalc_write_super(struct dm_integrity_c *ic)
2700 dm_integrity_flush_buffers(ic, false);
2701 if (dm_integrity_failed(ic))
2704 r = sync_rw_sb(ic, REQ_OP_WRITE);
2706 dm_integrity_io_error(ic, "writing superblock", r);
2711 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work);
2733 if (recalc_sectors >= 1U << ic->sb->log2_sectors_per_block)
2738 recalc_tags_size = (recalc_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
2739 if (crypto_shash_digestsize(ic->internal_hash) > ic->tag_size)
2740 recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tag_size;
2748 DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector));
2750 spin_lock_irq(&ic->endio_wait.lock);
2754 if (unlikely(dm_post_suspending(ic->ti)))
2757 range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
2758 if (unlikely(range.logical_sector >= ic->provided_data_sectors)) {
2759 if (ic->mode == 'B') {
2760 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
2762 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
2767 get_area_and_offset(ic, range.logical_sector, &area, &offset);
2768 range.n_sectors = min((sector_t)recalc_sectors, ic->provided_data_sectors - range.logical_sector);
2769 if (!ic->meta_dev)
2770 range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned int)offset);
2772 add_new_range_and_wait(ic, &range);
2773 spin_unlock_irq(&ic->endio_wait.lock);
2777 if (ic->mode == 'B') {
2778 if (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR))
2781 while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector,
2782 ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2783 logical_sector += ic->sectors_per_block;
2784 n_sectors -= ic->sectors_per_block;
2787 while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector + n_sectors - ic->sectors_per_block,
2788 ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2789 n_sectors -= ic->sectors_per_block;
2792 get_area_and_offset(ic, logical_sector, &area, &offset);
2798 recalc_write_super(ic);
2799 if (ic->mode == 'B')
2800 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2805 if (unlikely(dm_integrity_failed(ic)))
2812 io_req.client = ic->io;
2813 io_loc.bdev = ic->dev->bdev;
2814 io_loc.sector = get_data_sector(ic, area, offset);
2819 dm_integrity_io_error(ic, "reading data", r);
2824 for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
2825 integrity_sector_checksum(ic, logical_sector + i, recalc_buffer + (i << SECTOR_SHIFT), t);
2826 t += ic->tag_size;
2829 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2831 r = dm_integrity_rw_tag(ic, recalc_tags, &metadata_block, &metadata_offset, t - recalc_tags, TAG_WRITE);
2833 dm_integrity_io_error(ic, "writing tags", r);
2837 if (ic->mode == 'B') {
2841 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
2842 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2844 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
2845 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2846 block_bitmap_op(ic, ic->recalc_bitmap, start, end - start, BITMAP_OP_CLEAR);
2852 spin_lock_irq(&ic->endio_wait.lock);
2853 remove_range_unlocked(ic, &range);
2854 ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors);
2858 remove_range(ic, &range);
2862 spin_unlock_irq(&ic->endio_wait.lock);
2864 recalc_write_super(ic);
2874 struct dm_integrity_c *ic = bbs->ic;
2891 if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2893 remove_range(ic, &dio->range);
2895 queue_work(ic->offload_wq, &dio->work);
2897 block_bitmap_op(ic, ic->journal, dio->range.logical_sector,
2906 rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC,
2913 block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2916 remove_range(ic, &dio->range);
2918 queue_work(ic->offload_wq, &dio->work);
2921 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2926 struct dm_integrity_c *ic = container_of(work, struct dm_integrity_c, bitmap_flush_work.work);
2931 dm_integrity_flush_buffers(ic, false);
2934 range.n_sectors = ic->provided_data_sectors;
2936 spin_lock_irq(&ic->endio_wait.lock);
2937 add_new_range_and_wait(ic, &range);
2938 spin_unlock_irq(&ic->endio_wait.lock);
2940 dm_integrity_flush_buffers(ic, true);
2942 limit = ic->provided_data_sectors;
2943 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
2944 limit = le64_to_cpu(ic->sb->recalc_sector)
2945 >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)
2946 << (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2949 block_bitmap_op(ic, ic->journal, 0, limit, BITMAP_OP_CLEAR);
2950 block_bitmap_op(ic, ic->may_write_bitmap, 0, limit, BITMAP_OP_CLEAR);
2952 rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0,
2953 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2955 spin_lock_irq(&ic->endio_wait.lock);
2956 remove_range_unlocked(ic, &range);
2957 while (unlikely((bio = bio_list_pop(&ic->synchronous_bios)) != NULL)) {
2959 spin_unlock_irq(&ic->endio_wait.lock);
2960 spin_lock_irq(&ic->endio_wait.lock);
2962 spin_unlock_irq(&ic->endio_wait.lock);
2966 static void init_journal(struct dm_integrity_c *ic, unsigned int start_section,
2976 wraparound_section(ic, &i);
2977 for (j = 0; j < ic->journal_section_sectors; j++) {
2978 struct journal_sector *js = access_journal(ic, i, j);
2982 js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
2984 for (j = 0; j < ic->journal_section_entries; j++) {
2985 struct journal_entry *je = access_journal_entry(ic, i, j);
2991 write_journal(ic, start_section, n_sections);
2994 static int find_commit_seq(struct dm_integrity_c *ic, unsigned int i, unsigned int j, commit_id_t id)
2999 if (dm_integrity_commit_id(ic, i, j, k) == id)
3002 dm_integrity_io_error(ic, "journal commit id", -EIO);
3006 static void replay_journal(struct dm_integrity_c *ic)
3016 if (ic->mode == 'R')
3019 if (ic->journal_uptodate)
3025 if (!ic->just_formatted) {
3027 rw_journal(ic, REQ_OP_READ, 0, ic->journal_sections, NULL);
3028 if (ic->journal_io)
3029 DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal");
3030 if (ic->journal_io) {
3033 crypt_comp.ic = ic;
3036 encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp);
3039 DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal");
3042 if (dm_integrity_failed(ic))
3048 for (i = 0; i < ic->journal_sections; i++) {
3049 for (j = 0; j < ic->journal_section_sectors; j++) {
3051 struct journal_sector *js = access_journal(ic, i, j);
3053 k = find_commit_seq(ic, i, j, js->commit_id);
3060 for (j = 0; j < ic->journal_section_entries; j++) {
3061 struct journal_entry *je = access_journal_entry(ic, i, j);
3080 dm_integrity_io_error(ic, "journal commit ids", -EIO);
3095 if (unlikely(write_start >= ic->journal_sections))
3097 wraparound_section(ic, &write_start);
3100 for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) {
3101 for (j = 0; j < ic->journal_section_sectors; j++) {
3102 struct journal_sector *js = access_journal(ic, i, j);
3104 if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) {
3111 i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq);
3116 if (unlikely(i >= ic->journal_sections))
3118 wraparound_section(ic, &i);
3125 do_journal_write(ic, write_start, write_sections, true);
3128 if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) {
3130 ic->commit_seq = want_commit_seq;
3131 DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq);
3141 init_journal(ic, s, 1, erase_seq);
3143 wraparound_section(ic, &s);
3144 if (ic->journal_sections >= 2) {
3145 init_journal(ic, s, ic->journal_sections - 2, erase_seq);
3146 s += ic->journal_sections - 2;
3147 wraparound_section(ic, &s);
3148 init_journal(ic, s, 1, erase_seq);
3152 ic->commit_seq = next_commit_seq(erase_seq);
3155 ic->committed_section = continue_section;
3156 ic->n_committed_sections = 0;
3158 ic->uncommitted_section = continue_section;
3159 ic->n_uncommitted_sections = 0;
3161 ic->free_section = continue_section;
3162 ic->free_section_entry = 0;
3163 ic->free_sectors = ic->journal_entries;
3165 ic->journal_tree_root = RB_ROOT;
3166 for (i = 0; i < ic->journal_entries; i++)
3167 init_journal_node(&ic->journal_tree[i]);
3170 static void dm_integrity_enter_synchronous_mode(struct dm_integrity_c *ic)
3174 if (ic->mode == 'B') {
3175 ic->bitmap_flush_interval = msecs_to_jiffies(10) + 1;
3176 ic->synchronous_mode = 1;
3178 cancel_delayed_work_sync(&ic->bitmap_flush_work);
3179 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
3180 flush_workqueue(ic->commit_wq);
3186 struct dm_integrity_c *ic = container_of(n, struct dm_integrity_c, reboot_notifier);
3190 dm_integrity_enter_synchronous_mode(ic);
3197 struct dm_integrity_c *ic = ti->private;
3200 WARN_ON(unregister_reboot_notifier(&ic->reboot_notifier));
3202 del_timer_sync(&ic->autocommit_timer);
3204 if (ic->recalc_wq)
3205 drain_workqueue(ic->recalc_wq);
3207 if (ic->mode == 'B')
3208 cancel_delayed_work_sync(&ic->bitmap_flush_work);
3210 queue_work(ic->commit_wq, &ic->commit_work);
3211 drain_workqueue(ic->commit_wq);
3213 if (ic->mode == 'J') {
3214 queue_work(ic->writer_wq, &ic->writer_work);
3215 drain_workqueue(ic->writer_wq);
3216 dm_integrity_flush_buffers(ic, true);
3217 if (ic->wrote_to_journal) {
3218 init_journal(ic, ic->free_section,
3219 ic->journal_sections - ic->free_section, ic->commit_seq);
3220 if (ic->free_section) {
3221 init_journal(ic, 0, ic->free_section,
3222 next_commit_seq(ic->commit_seq));
3227 if (ic->mode == 'B') {
3228 dm_integrity_flush_buffers(ic, true);
3231 init_journal(ic, 0, ic->journal_sections, 0);
3232 ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3233 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
3235 dm_integrity_io_error(ic, "writing superblock", r);
3239 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
3241 ic->journal_uptodate = true;
3246 struct dm_integrity_c *ic = ti->private;
3247 __u64 old_provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors);
3252 ic->wrote_to_journal = false;
3254 if (ic->provided_data_sectors != old_provided_data_sectors) {
3255 if (ic->provided_data_sectors > old_provided_data_sectors &&
3256 ic->mode == 'B' &&
3257 ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
3258 rw_journal_sectors(ic, REQ_OP_READ, 0,
3259 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3260 block_bitmap_op(ic, ic->journal, old_provided_data_sectors,
3261 ic->provided_data_sectors - old_provided_data_sectors, BITMAP_OP_SET);
3262 rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0,
3263 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3266 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
3267 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
3269 dm_integrity_io_error(ic, "writing superblock", r);
3272 if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) {
3274 rw_journal_sectors(ic, REQ_OP_READ, 0,
3275 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3276 if (ic->mode == 'B') {
3277 if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
3278 !ic->reset_recalculate_flag) {
3279 block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal);
3280 block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal);
3281 if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors,
3283 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3284 ic->sb->recalc_sector = cpu_to_le64(0);
3288 ic->sb->log2_blocks_per_bitmap_bit, ic->log2_blocks_per_bitmap_bit);
3289 ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
3290 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3291 block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3292 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3293 rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0,
3294 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3295 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3296 ic->sb->recalc_sector = cpu_to_le64(0);
3299 if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
3300 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR)) ||
3301 ic->reset_recalculate_flag) {
3302 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3303 ic->sb->recalc_sector = cpu_to_le64(0);
3305 init_journal(ic, 0, ic->journal_sections, 0);
3306 replay_journal(ic);
3307 ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3309 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
3311 dm_integrity_io_error(ic, "writing superblock", r);
3313 replay_journal(ic);
3314 if (ic->reset_recalculate_flag) {
3315 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3316 ic->sb->recalc_sector = cpu_to_le64(0);
3318 if (ic->mode == 'B') {
3319 ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3320 ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
3321 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
3323 dm_integrity_io_error(ic, "writing superblock", r);
3325 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3326 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3327 block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3328 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
3329 le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors) {
3330 block_bitmap_op(ic, ic->journal, le64_to_cpu(ic->sb->recalc_sector),
3331 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3332 block_bitmap_op(ic, ic->recalc_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3333 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3334 block_bitmap_op(ic, ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3335 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3337 rw_journal_sectors(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, 0,
3338 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3342 DEBUG_print("testing recalc: %x\n", ic->sb->flags);
3343 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
3344 __u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector);
3346 DEBUG_print("recalc pos: %llx / %llx\n", recalc_pos, ic->provided_data_sectors);
3347 if (recalc_pos < ic->provided_data_sectors) {
3348 queue_work(ic->recalc_wq, &ic->recalc_work);
3349 } else if (recalc_pos > ic->provided_data_sectors) {
3350 ic->sb->recalc_sector = cpu_to_le64(ic->provided_data_sectors);
3351 recalc_write_super(ic);
3355 ic->reboot_notifier.notifier_call = dm_integrity_reboot;
3356 ic->reboot_notifier.next = NULL;
3357 ic->reboot_notifier.priority = INT_MAX - 1; /* be notified after md and before hardware drivers */
3358 WARN_ON(register_reboot_notifier(&ic->reboot_notifier));
3362 dm_integrity_enter_synchronous_mode(ic);
3369 struct dm_integrity_c *ic = ti->private;
3376 (unsigned long long)atomic64_read(&ic->number_of_mismatches),
3377 ic->provided_data_sectors);
3378 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
3379 DMEMIT(" %llu", le64_to_cpu(ic->sb->recalc_sector));
3385 __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
3387 watermark_percentage += ic->journal_entries / 2;
3388 do_div(watermark_percentage, ic->journal_entries);
3390 arg_count += !!ic->meta_dev;
3391 arg_count += ic->sectors_per_block != 1;
3392 arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
3393 arg_count += ic->reset_recalculate_flag;
3394 arg_count += ic->discard;
3395 arg_count += ic->mode == 'J';
3396 arg_count += ic->mode == 'J';
3397 arg_count += ic->mode == 'B';
3398 arg_count += ic->mode == 'B';
3399 arg_count += !!ic->internal_hash_alg.alg_string;
3400 arg_count += !!ic->journal_crypt_alg.alg_string;
3401 arg_count += !!ic->journal_mac_alg.alg_string;
3402 arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0;
3403 arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0;
3404 arg_count += ic->legacy_recalculate;
3405 DMEMIT("%s %llu %u %c %u", ic->dev->name, ic->start,
3406 ic->tag_size, ic->mode, arg_count);
3407 if (ic->meta_dev)
3408 DMEMIT(" meta_device:%s", ic->meta_dev->name);
3409 if (ic->sectors_per_block != 1)
3410 DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
3411 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
3413 if (ic->reset_recalculate_flag)
3415 if (ic->discard)
3417 DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
3418 DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
3419 DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
3420 if (ic->mode == 'J') {
3422 DMEMIT(" commit_time:%u", ic->autocommit_msec);
3424 if (ic->mode == 'B') {
3425 DMEMIT(" sectors_per_bit:%llu", (sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit);
3426 DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval));
3428 if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0)
3430 if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0)
3432 if (ic->legacy_recalculate)
3437 if (ic->a.alg_string) { \
3438 DMEMIT(" %s:%s", n, ic->a.alg_string); \
3439 if (ic->a.key_string) \
3440 DMEMIT(":%s", ic->a.key_string);\
3451 ic->dev->name, ic->start, ic->tag_size, ic->mode);
3453 if (ic->meta_dev)
3454 DMEMIT(",meta_device=%s", ic->meta_dev->name);
3455 if (ic->sectors_per_block != 1)
3456 DMEMIT(",block_size=%u", ic->sectors_per_block << SECTOR_SHIFT);
3458 DMEMIT(",recalculate=%c", (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) ?
3460 DMEMIT(",allow_discards=%c", ic->discard ? 'y' : 'n');
3462 ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0) ? 'y' : 'n');
3464 ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0) ? 'y' : 'n');
3465 DMEMIT(",legacy_recalculate=%c", ic->legacy_recalculate ? 'y' : 'n');
3467 DMEMIT(",journal_sectors=%u", ic->initial_sectors - SB_SECTORS);
3468 DMEMIT(",interleave_sectors=%u", 1U << ic->sb->log2_interleave_sectors);
3469 DMEMIT(",buffer_sectors=%u", 1U << ic->log2_buffer_sectors);
3478 struct dm_integrity_c *ic = ti->private;
3480 if (!ic->meta_dev)
3481 return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data);
3483 return fn(ti, ic->dev, 0, ti->len, data);
3488 struct dm_integrity_c *ic = ti->private;
3490 if (ic->sectors_per_block > 1) {
3491 limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3492 limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3493 blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
3495 limits->discard_granularity = ic->sectors_per_block << SECTOR_SHIFT;
3500 static void calculate_journal_section_size(struct dm_integrity_c *ic)
3504 ic->journal_sections = le32_to_cpu(ic->sb->journal_sections);
3505 ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size,
3508 if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC))
3510 ic->journal_entries_per_sector = sector_space / ic->journal_entry_size;
3511 ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS;
3512 ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JOURNAL_BLOCK_SECTORS;
3513 ic->journal_entries = ic->journal_section_entries * ic->journal_sections;
3516 static int calculate_device_limits(struct dm_integrity_c *ic)
3520 calculate_journal_section_size(ic);
3521 initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections;
3522 if (initial_sectors + METADATA_PADDING_SECTORS >= ic->meta_device_sectors || initial_sectors > UINT_MAX)
3524 ic->initial_sectors = initial_sectors;
3526 if (!ic->meta_dev) {
3531 ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING) ?
3535 ic->metadata_run = round_up((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
3537 if (!(ic->metadata_run & (ic->metadata_run - 1)))
3538 ic->log2_metadata_run = __ffs(ic->metadata_run);
3540 ic->log2_metadata_run = -1;
3542 get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset);
3543 last_sector = get_data_sector(ic, last_area, last_offset);
3544 if (last_sector < ic->start || last_sector >= ic->meta_device_sectors)
3547 __u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
3549 meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
3550 >> (ic->log2_buffer_sectors + SECTOR_SHIFT);
3551 meta_size <<= ic->log2_buffer_sectors;
3552 if (ic->initial_sectors + meta_size < ic->initial_sectors ||
3553 ic->initial_sectors + meta_size > ic->meta_device_sectors)
3555 ic->metadata_run = 1;
3556 ic->log2_metadata_run = 0;
3562 static void get_provided_data_sectors(struct dm_integrity_c *ic)
3564 if (!ic->meta_dev) {
3567 ic->provided_data_sectors = 0;
3568 for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) {
3569 __u64 prev_data_sectors = ic->provided_data_sectors;
3571 ic->provided_data_sectors |= (sector_t)1 << test_bit;
3572 if (calculate_device_limits(ic))
3573 ic->provided_data_sectors = prev_data_sectors;
3576 ic->provided_data_sectors = ic->data_device_sectors;
3577 ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1);
3581 static int initialize_superblock(struct dm_integrity_c *ic,
3587 memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
3588 memcpy(ic->sb->magic, SB_MAGIC, 8);
3589 ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size);
3590 ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block);
3591 if (ic->journal_mac_alg.alg_string)
3592 ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC);
3594 calculate_journal_section_size(ic);
3595 journal_sections = journal_sectors / ic->journal_section_sectors;
3599 if (ic->fix_hmac && (ic->internal_hash_alg.alg_string || ic->journal_mac_alg.alg_string)) {
3600 ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_HMAC);
3601 get_random_bytes(ic->sb->salt, SALT_SIZE);
3604 if (!ic->meta_dev) {
3605 if (ic->fix_padding)
3606 ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_PADDING);
3607 ic->sb->journal_sections = cpu_to_le32(journal_sections);
3610 ic->sb->log2_interleave_sectors = __fls(interleave_sectors);
3611 ic->sb->log2_interleave_sectors = max_t(__u8, MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3612 ic->sb->log2_interleave_sectors = min_t(__u8, MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3614 get_provided_data_sectors(ic);
3615 if (!ic->provided_data_sectors)
3618 ic->sb->log2_interleave_sectors = 0;
3620 get_provided_data_sectors(ic);
3621 if (!ic->provided_data_sectors)
3625 ic->sb->journal_sections = cpu_to_le32(0);
3627 __u32 prev_journal_sections = le32_to_cpu(ic->sb->journal_sections);
3632 ic->sb->journal_sections = cpu_to_le32(test_journal_sections);
3633 if (calculate_device_limits(ic))
3634 ic->sb->journal_sections = cpu_to_le32(prev_journal_sections);
3637 if (!le32_to_cpu(ic->sb->journal_sections)) {
3638 if (ic->log2_buffer_sectors > 3) {
3639 ic->log2_buffer_sectors--;
3646 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
3648 sb_set_version(ic);
3653 static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
3660 bi.tuple_size = ic->tag_size;
3662 bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
3702 static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)
3706 for (i = 0; i < ic->journal_sections; i++)
3711 static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic,
3717 sl = kvmalloc_array(ic->journal_sections,
3723 for (i = 0; i < ic->journal_sections; i++) {
3730 page_list_location(ic, i, 0, &start_index, &start_offset);
3731 page_list_location(ic, i, ic->journal_section_sectors - 1,
3739 dm_integrity_free_journal_scatterlist(ic, sl);
3831 static int create_journal(struct dm_integrity_c *ic, char **error)
3839 ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
3840 ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
3841 ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL);
3842 ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL);
3844 journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors,
3852 ic->journal_pages = journal_pages;
3854 ic->journal = dm_integrity_alloc_page_list(ic->journal_pages);
3855 if (!ic->journal) {
3860 if (ic->journal_crypt_alg.alg_string) {
3864 comp.ic = ic;
3865 ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
3866 if (IS_ERR(ic->journal_crypt)) {
3868 r = PTR_ERR(ic->journal_crypt);
3869 ic->journal_crypt = NULL;
3872 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
3873 blocksize = crypto_skcipher_blocksize(ic->journal_crypt);
3875 if (ic->journal_crypt_alg.key) {
3876 r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key,
3877 ic->journal_crypt_alg.key_size);
3884 ic->journal_crypt_alg.alg_string, blocksize, ivsize);
3886 ic->journal_io = dm_integrity_alloc_page_list(ic->journal_pages);
3887 if (!ic->journal_io) {
3896 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3910 ic->journal_xor = dm_integrity_alloc_page_list(ic->journal_pages);
3911 if (!ic->journal_xor) {
3917 sg = kvmalloc_array(ic->journal_pages + 1,
3925 sg_init_table(sg, ic->journal_pages + 1);
3926 for (i = 0; i < ic->journal_pages; i++) {
3927 char *va = lowmem_page_address(ic->journal_xor[i].page);
3932 sg_set_buf(&sg[i], &ic->commit_ids, sizeof(ic->commit_ids));
3935 PAGE_SIZE * ic->journal_pages + sizeof(ic->commit_ids), crypt_iv);
3941 r = dm_integrity_failed(ic);
3946 DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data");
3948 crypto_free_skcipher(ic->journal_crypt);
3949 ic->journal_crypt = NULL;
3953 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3974 ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
3975 if (!ic->journal_scatterlist) {
3980 ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io);
3981 if (!ic->journal_io_scatterlist) {
3986 ic->sk_requests = kvmalloc_array(ic->journal_sections,
3989 if (!ic->sk_requests) {
3994 for (i = 0; i < ic->journal_sections; i++) {
4010 r = dm_integrity_failed(ic);
4016 section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
4031 section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT;
4032 ic->sk_requests[i] = section_req;
4043 if (ic->commit_ids[j] == ic->commit_ids[i]) {
4044 ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1);
4048 DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]);
4051 journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node);
4057 ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL);
4058 if (!ic->journal_tree) {
4096 struct dm_integrity_c *ic;
4120 ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL);
4121 if (!ic) {
4125 ti->private = ic;
4127 ic->ti = ti;
4129 ic->in_progress = RB_ROOT;
4130 INIT_LIST_HEAD(&ic->wait_list);
4131 init_waitqueue_head(&ic->endio_wait);
4132 bio_list_init(&ic->flush_bio_list);
4133 init_waitqueue_head(&ic->copy_to_journal_wait);
4134 init_completion(&ic->crypto_backoff);
4135 atomic64_set(&ic->number_of_mismatches, 0);
4136 ic->bitmap_flush_interval = BITMAP_FLUSH_INTERVAL;
4138 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev);
4149 ic->start = start;
4152 if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) {
4161 ic->mode = argv[3][0];
4173 ic->sectors_per_block = 1;
4203 if (ic->meta_dev) {
4204 dm_put_device(ti, ic->meta_dev);
4205 ic->meta_dev = NULL;
4208 dm_table_get_mode(ti->table), &ic->meta_dev);
4221 ic->sectors_per_block = val >> SECTOR_SHIFT;
4230 ic->bitmap_flush_interval = msecs_to_jiffies(val);
4232 r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
4237 r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
4242 r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
4247 ic->recalculate_flag = true;
4249 ic->recalculate_flag = true;
4250 ic->reset_recalculate_flag = true;
4252 ic->discard = true;
4254 ic->fix_padding = true;
4256 ic->fix_hmac = true;
4258 ic->legacy_recalculate = true;
4266 ic->data_device_sectors = bdev_nr_sectors(ic->dev->bdev);
4267 if (!ic->meta_dev)
4268 ic->meta_device_sectors = ic->data_device_sectors;
4270 ic->meta_device_sectors = bdev_nr_sectors(ic->meta_dev->bdev);
4274 ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
4279 ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT);
4281 r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error,
4286 r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error,
4291 if (!ic->tag_size) {
4292 if (!ic->internal_hash) {
4297 ic->tag_size = crypto_shash_digestsize(ic->internal_hash);
4299 if (ic->tag_size > MAX_TAG_SIZE) {
4304 if (!(ic->tag_size & (ic->tag_size - 1)))
4305 ic->log2_tag_size = __ffs(ic->tag_size);
4307 ic->log2_tag_size = -1;
4309 if (ic->mode == 'B' && !ic->internal_hash) {
4315 if (ic->discard && !ic->internal_hash) {
4321 ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
4322 ic->autocommit_msec = sync_msec;
4323 timer_setup(&ic->autocommit_timer, autocommit_fn, 0);
4325 ic->io = dm_io_client_create();
4326 if (IS_ERR(ic->io)) {
4327 r = PTR_ERR(ic->io);
4328 ic->io = NULL;
4333 r = mempool_init_slab_pool(&ic->journal_io_mempool, JOURNAL_IO_MEMPOOL, journal_io_cache);
4339 r = mempool_init_page_pool(&ic->recheck_pool, 1, 0);
4345 ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
4347 if (!ic->metadata_wq) {
4357 ic->wait_wq = alloc_ordered_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM);
4358 if (!ic->wait_wq) {
4364 ic->offload_wq = alloc_workqueue("dm-integrity-offload", WQ_MEM_RECLAIM,
4366 if (!ic->offload_wq) {
4372 ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
4373 if (!ic->commit_wq) {
4378 INIT_WORK(&ic->commit_work, integrity_commit);
4380 if (ic->mode == 'J' || ic->mode == 'B') {
4381 ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1);
4382 if (!ic->writer_wq) {
4387 INIT_WORK(&ic->writer_work, integrity_writer);
4390 ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL);
4391 if (!ic->sb) {
4397 r = sync_rw_sb(ic, REQ_OP_READ);
4403 if (memcmp(ic->sb->magic, SB_MAGIC, 8)) {
4404 if (ic->mode != 'R') {
4405 if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) {
4412 r = initialize_superblock(ic, journal_sectors, interleave_sectors);
4417 if (ic->mode != 'R')
4421 if (!ic->sb->version || ic->sb->version > SB_VERSION_5) {
4426 if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) {
4431 if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) {
4436 if (!le32_to_cpu(ic->sb->journal_sections)) {
4442 if (!ic->meta_dev) {
4443 if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
4444 ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
4450 if (ic->sb->log2_interleave_sectors) {
4456 if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) {
4462 get_provided_data_sectors(ic);
4463 if (!ic->provided_data_sectors) {
4470 r = calculate_device_limits(ic);
4472 if (ic->meta_dev) {
4473 if (ic->log2_buffer_sectors > 3) {
4474 ic->log2_buffer_sectors--;
4484 if (log2_sectors_per_bitmap_bit < ic->sb->log2_sectors_per_block)
4485 log2_sectors_per_bitmap_bit = ic->sb->log2_sectors_per_block;
4487 bits_in_journal = ((__u64)ic->journal_section_sectors * ic->journal_sections) << (SECTOR_SHIFT + 3);
4490 while (bits_in_journal < (ic->provided_data_sectors + ((sector_t)1 << log2_sectors_per_bitmap_bit) - 1) >> log2_sectors_per_bitmap_bit)
4493 log2_blocks_per_bitmap_bit = log2_sectors_per_bitmap_bit - ic->sb->log2_sectors_per_block;
4494 ic->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
4496 ic->sb->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
4498 n_bitmap_bits = ((ic->provided_data_sectors >> ic->sb->log2_sectors_per_block)
4500 ic->n_bitmap_blocks = DIV_ROUND_UP(n_bitmap_bits, BITMAP_BLOCK_SIZE * 8);
4502 if (!ic->meta_dev)
4503 ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run));
4505 if (ti->len > ic->provided_data_sectors) {
4512 threshold = (__u64)ic->journal_entries * (100 - journal_watermark);
4515 ic->free_sectors_threshold = threshold;
4518 DEBUG_print(" integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size));
4519 DEBUG_print(" journal_entry_size %u\n", ic->journal_entry_size);
4520 DEBUG_print(" journal_entries_per_sector %u\n", ic->journal_entries_per_sector);
4521 DEBUG_print(" journal_section_entries %u\n", ic->journal_section_entries);
4522 DEBUG_print(" journal_section_sectors %u\n", ic->journal_section_sectors);
4523 DEBUG_print(" journal_sections %u\n", (unsigned int)le32_to_cpu(ic->sb->journal_sections));
4524 DEBUG_print(" journal_entries %u\n", ic->journal_entries);
4525 DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
4526 DEBUG_print(" data_device_sectors 0x%llx\n", bdev_nr_sectors(ic->dev->bdev));
4527 DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors);
4528 DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run);
4529 DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run);
4530 DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", ic->provided_data_sectors, ic->provided_data_sectors);
4531 DEBUG_print(" log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
4534 if (ic->recalculate_flag && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) {
4535 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
4536 ic->sb->recalc_sector = cpu_to_le64(0);
4539 if (ic->internal_hash) {
4540 ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
4541 if (!ic->recalc_wq) {
4546 INIT_WORK(&ic->recalc_work, integrity_recalc);
4548 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
4555 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
4556 le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors &&
4557 dm_integrity_disable_recalculate(ic)) {
4563 ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
4564 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL, 0);
4565 if (IS_ERR(ic->bufio)) {
4566 r = PTR_ERR(ic->bufio);
4568 ic->bufio = NULL;
4571 dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors);
4573 if (ic->mode != 'R') {
4574 r = create_journal(ic, &ti->error);
4580 if (ic->mode == 'B') {
4582 unsigned int n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
4584 ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4585 if (!ic->recalc_bitmap) {
4589 ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4590 if (!ic->may_write_bitmap) {
4594 ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL);
4595 if (!ic->bbs) {
4599 INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work);
4600 for (i = 0; i < ic->n_bitmap_blocks; i++) {
4601 struct bitmap_block_status *bbs = &ic->bbs[i];
4605 bbs->ic = ic;
4614 bbs->bitmap = lowmem_page_address(ic->journal[pl_index].page) + pl_offset;
4619 init_journal(ic, 0, ic->journal_sections, 0);
4620 r = dm_integrity_failed(ic);
4625 r = sync_rw_sb(ic, REQ_OP_WRITE | REQ_FUA);
4630 ic->just_formatted = true;
4633 if (!ic->meta_dev) {
4634 r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors);
4638 if (ic->mode == 'B') {
4641 max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8);
4652 if (!ic->internal_hash)
4653 dm_integrity_set(ti, ic);
4657 if (ic->discard)
4671 struct dm_integrity_c *ic = ti->private;
4673 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
4674 BUG_ON(!list_empty(&ic->wait_list));
4676 if (ic->mode == 'B')
4677 cancel_delayed_work_sync(&ic->bitmap_flush_work);
4678 if (ic->metadata_wq)
4679 destroy_workqueue(ic->metadata_wq);
4680 if (ic->wait_wq)
4681 destroy_workqueue(ic->wait_wq);
4682 if (ic->offload_wq)
4683 destroy_workqueue(ic->offload_wq);
4684 if (ic->commit_wq)
4685 destroy_workqueue(ic->commit_wq);
4686 if (ic->writer_wq)
4687 destroy_workqueue(ic->writer_wq);
4688 if (ic->recalc_wq)
4689 destroy_workqueue(ic->recalc_wq);
4690 kvfree(ic->bbs);
4691 if (ic->bufio)
4692 dm_bufio_client_destroy(ic->bufio);
4693 mempool_exit(&ic->recheck_pool);
4694 mempool_exit(&ic->journal_io_mempool);
4695 if (ic->io)
4696 dm_io_client_destroy(ic->io);
4697 if (ic->dev)
4698 dm_put_device(ti, ic->dev);
4699 if (ic->meta_dev)
4700 dm_put_device(ti, ic->meta_dev);
4701 dm_integrity_free_page_list(ic->journal);
4702 dm_integrity_free_page_list(ic->journal_io);
4703 dm_integrity_free_page_list(ic->journal_xor);
4704 dm_integrity_free_page_list(ic->recalc_bitmap);
4705 dm_integrity_free_page_list(ic->may_write_bitmap);
4706 if (ic->journal_scatterlist)
4707 dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist);
4708 if (ic->journal_io_scatterlist)
4709 dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist);
4710 if (ic->sk_requests) {
4713 for (i = 0; i < ic->journal_sections; i++) {
4716 req = ic->sk_requests[i];
4722 kvfree(ic->sk_requests);
4724 kvfree(ic->journal_tree);
4725 if (ic->sb)
4726 free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT);
4728 if (ic->internal_hash)
4729 crypto_free_shash(ic->internal_hash);
4730 free_alg(&ic->internal_hash_alg);
4732 if (ic->journal_crypt)
4733 crypto_free_skcipher(ic->journal_crypt);
4734 free_alg(&ic->journal_crypt_alg);
4736 if (ic->journal_mac)
4737 crypto_free_shash(ic->journal_mac);
4738 free_alg(&ic->journal_mac_alg);
4740 kfree(ic);