Lines Matching defs:cc

76 	struct crypt_config *cc;
104 int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
106 void (*dtr)(struct crypt_config *cc);
107 int (*init)(struct crypt_config *cc);
108 int (*wipe)(struct crypt_config *cc);
109 int (*generator)(struct crypt_config *cc, u8 *iv,
111 int (*post)(struct crypt_config *cc, u8 *iv,
247 static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
250 static bool crypt_integrity_aead(struct crypt_config *cc);
255 static struct crypto_skcipher *any_tfm(struct crypt_config *cc)
257 return cc->cipher_tfm.tfms[0];
260 static struct crypto_aead *any_tfm_aead(struct crypt_config *cc)
262 return cc->cipher_tfm.tfms_aead[0];
321 static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
324 memset(iv, 0, cc->iv_size);
330 static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
333 memset(iv, 0, cc->iv_size);
339 static int crypt_iv_plain64be_gen(struct crypt_config *cc, u8 *iv,
342 memset(iv, 0, cc->iv_size);
344 *(__be64 *)&iv[cc->iv_size - sizeof(u64)] = cpu_to_be64(dmreq->iv_sector);
349 static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
356 memset(iv, 0, cc->iv_size);
362 static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
368 if (crypt_integrity_aead(cc))
369 bs = crypto_aead_blocksize(any_tfm_aead(cc));
371 bs = crypto_skcipher_blocksize(any_tfm(cc));
388 cc->iv_gen_private.benbi.shift = 9 - log;
393 static void crypt_iv_benbi_dtr(struct crypt_config *cc)
397 static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
402 memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
404 val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
405 put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
410 static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
413 memset(iv, 0, cc->iv_size);
418 static void crypt_iv_lmk_dtr(struct crypt_config *cc)
420 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
430 static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
433 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
435 if (cc->sector_size != (1 << SECTOR_SHIFT)) {
448 if (cc->key_parts == cc->tfms_count) {
455 crypt_iv_lmk_dtr(cc);
463 static int crypt_iv_lmk_init(struct crypt_config *cc)
465 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
466 int subkey_size = cc->key_size / cc->key_parts;
470 memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
476 static int crypt_iv_lmk_wipe(struct crypt_config *cc)
478 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
486 static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
490 struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
529 memcpy(iv, &md5state.hash, cc->iv_size);
534 static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
542 sg = crypt_get_sg_data(cc, dmreq->sg_in);
544 r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset);
547 memset(iv, 0, cc->iv_size);
552 static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
562 sg = crypt_get_sg_data(cc, dmreq->sg_out);
564 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset);
568 crypto_xor(dst + sg->offset, iv, cc->iv_size);
574 static void crypt_iv_tcw_dtr(struct crypt_config *cc)
576 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
588 static int crypt_iv_tcw_ctr(struct crypt_config *cc, struct dm_target *ti,
591 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
593 if (cc->sector_size != (1 << SECTOR_SHIFT)) {
598 if (cc->key_size <= (cc->iv_size + TCW_WHITENING_SIZE)) {
610 tcw->iv_seed = kzalloc(cc->iv_size, GFP_KERNEL);
613 crypt_iv_tcw_dtr(cc);
621 static int crypt_iv_tcw_init(struct crypt_config *cc)
623 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
624 int key_offset = cc->key_size - cc->iv_size - TCW_WHITENING_SIZE;
626 memcpy(tcw->iv_seed, &cc->key[key_offset], cc->iv_size);
627 memcpy(tcw->whitening, &cc->key[key_offset + cc->iv_size],
633 static int crypt_iv_tcw_wipe(struct crypt_config *cc)
635 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
637 memset(tcw->iv_seed, 0, cc->iv_size);
643 static int crypt_iv_tcw_whitening(struct crypt_config *cc,
647 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
675 static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
679 struct iv_tcw_private *tcw = &cc->iv_gen_private.tcw;
686 sg = crypt_get_sg_data(cc, dmreq->sg_in);
688 r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset);
694 if (cc->iv_size > 8)
696 cc->iv_size - 8);
701 static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
712 sg = crypt_get_sg_data(cc, dmreq->sg_out);
714 r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset);
720 static int crypt_iv_random_gen(struct crypt_config *cc, u8 *iv,
724 get_random_bytes(iv, cc->iv_size);
728 static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti,
731 if (crypt_integrity_aead(cc)) {
736 if (crypto_skcipher_blocksize(any_tfm(cc)) != cc->iv_size) {
744 static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
747 struct crypto_skcipher *tfm = any_tfm(cc);
758 req = kmalloc(reqsize + cc->iv_size, GFP_NOIO);
765 memset(buf, 0, cc->iv_size);
766 *(__le64 *)buf = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
768 sg_init_one(&src, page_address(ZERO_PAGE(0)), cc->iv_size);
769 sg_init_one(&dst, iv, cc->iv_size);
770 skcipher_request_set_crypt(req, &src, &dst, cc->iv_size, buf);
778 static void crypt_iv_elephant_dtr(struct crypt_config *cc)
780 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
786 static int crypt_iv_elephant_ctr(struct crypt_config *cc, struct dm_target *ti,
789 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
800 r = crypt_iv_eboiv_ctr(cc, ti, NULL);
802 crypt_iv_elephant_dtr(cc);
950 static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *dmreq)
952 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
968 *(__le64 *)es = cpu_to_le64(dmreq->iv_sector * cc->sector_size);
986 sg = crypt_get_sg_data(cc, dmreq->sg_out);
992 sg2 = crypt_get_sg_data(cc, dmreq->sg_in);
994 memcpy(data_offset, data2 + sg2->offset, cc->sector_size);
999 diffuser_disk_to_cpu((u32 *)data_offset, cc->sector_size / sizeof(u32));
1000 diffuser_b_decrypt((u32 *)data_offset, cc->sector_size / sizeof(u32));
1001 diffuser_a_decrypt((u32 *)data_offset, cc->sector_size / sizeof(u32));
1002 diffuser_cpu_to_disk((__le32 *)data_offset, cc->sector_size / sizeof(u32));
1005 for (i = 0; i < (cc->sector_size / 32); i++)
1009 diffuser_disk_to_cpu((u32 *)data_offset, cc->sector_size / sizeof(u32));
1010 diffuser_a_encrypt((u32 *)data_offset, cc->sector_size / sizeof(u32));
1011 diffuser_b_encrypt((u32 *)data_offset, cc->sector_size / sizeof(u32));
1012 diffuser_cpu_to_disk((__le32 *)data_offset, cc->sector_size / sizeof(u32));
1023 static int crypt_iv_elephant_gen(struct crypt_config *cc, u8 *iv,
1029 r = crypt_iv_elephant(cc, dmreq);
1034 return crypt_iv_eboiv_gen(cc, iv, dmreq);
1037 static int crypt_iv_elephant_post(struct crypt_config *cc, u8 *iv,
1041 return crypt_iv_elephant(cc, dmreq);
1046 static int crypt_iv_elephant_init(struct crypt_config *cc)
1048 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
1049 int key_offset = cc->key_size - cc->key_extra_size;
1051 return crypto_skcipher_setkey(elephant->tfm, &cc->key[key_offset], cc->key_extra_size);
1054 static int crypt_iv_elephant_wipe(struct crypt_config *cc)
1056 struct iv_elephant_private *elephant = &cc->iv_gen_private.elephant;
1059 memset(key, 0, cc->key_extra_size);
1060 return crypto_skcipher_setkey(elephant->tfm, key, cc->key_extra_size);
1128 static bool crypt_integrity_aead(struct crypt_config *cc)
1130 return test_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
1133 static bool crypt_integrity_hmac(struct crypt_config *cc)
1135 return crypt_integrity_aead(cc) && cc->key_mac_size;
1139 static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
1142 if (unlikely(crypt_integrity_aead(cc)))
1154 if (!bio_sectors(bio) || !io->cc->on_disk_tag_size)
1161 tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift);
1163 bip->bip_iter.bi_sector = io->cc->start + io->sector;
1173 static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
1176 struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk);
1185 if (bi->tag_size != cc->on_disk_tag_size ||
1186 bi->tuple_size != cc->on_disk_tag_size) {
1190 if (1 << bi->interval_exp != cc->sector_size) {
1195 if (crypt_integrity_aead(cc)) {
1196 cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size;
1198 cc->integrity_tag_size, cc->integrity_iv_size);
1200 if (crypto_aead_setauthsize(any_tfm_aead(cc), cc->integrity_tag_size)) {
1204 } else if (cc->integrity_iv_size)
1206 cc->integrity_iv_size);
1208 if ((cc->integrity_tag_size + cc->integrity_iv_size) != bi->tag_size) {
1220 static void crypt_convert_init(struct crypt_config *cc,
1231 ctx->cc_sector = sector + cc->iv_offset;
1235 static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
1238 return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
1241 static void *req_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq)
1243 return (void *)((char *)dmreq - cc->dmreq_start);
1246 static u8 *iv_of_dmreq(struct crypt_config *cc,
1249 if (crypt_integrity_aead(cc))
1251 crypto_aead_alignmask(any_tfm_aead(cc)) + 1);
1254 crypto_skcipher_alignmask(any_tfm(cc)) + 1);
1257 static u8 *org_iv_of_dmreq(struct crypt_config *cc,
1260 return iv_of_dmreq(cc, dmreq) + cc->iv_size;
1263 static __le64 *org_sector_of_dmreq(struct crypt_config *cc,
1266 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size;
1271 static unsigned int *org_tag_of_dmreq(struct crypt_config *cc,
1274 u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size +
1275 cc->iv_size + sizeof(uint64_t);
1280 static void *tag_from_dmreq(struct crypt_config *cc,
1286 return &io->integrity_metadata[*org_tag_of_dmreq(cc, dmreq) *
1287 cc->on_disk_tag_size];
1290 static void *iv_tag_from_dmreq(struct crypt_config *cc,
1293 return tag_from_dmreq(cc, dmreq) + cc->integrity_tag_size;
1296 static int crypt_convert_block_aead(struct crypt_config *cc,
1308 BUG_ON(cc->integrity_iv_size && cc->integrity_iv_size != cc->iv_size);
1311 if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
1314 dmreq = dmreq_of_req(cc, req);
1316 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
1317 dmreq->iv_sector >>= cc->sector_shift;
1320 *org_tag_of_dmreq(cc, dmreq) = tag_offset;
1322 sector = org_sector_of_dmreq(cc, dmreq);
1323 *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset);
1325 iv = iv_of_dmreq(cc, dmreq);
1326 org_iv = org_iv_of_dmreq(cc, dmreq);
1327 tag = tag_from_dmreq(cc, dmreq);
1328 tag_iv = iv_tag_from_dmreq(cc, dmreq);
1337 sg_set_buf(&dmreq->sg_in[1], org_iv, cc->iv_size);
1338 sg_set_page(&dmreq->sg_in[2], bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
1339 sg_set_buf(&dmreq->sg_in[3], tag, cc->integrity_tag_size);
1343 sg_set_buf(&dmreq->sg_out[1], org_iv, cc->iv_size);
1344 sg_set_page(&dmreq->sg_out[2], bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
1345 sg_set_buf(&dmreq->sg_out[3], tag, cc->integrity_tag_size);
1347 if (cc->iv_gen_ops) {
1349 if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) {
1350 memcpy(org_iv, tag_iv, cc->iv_size);
1352 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
1356 if (cc->integrity_iv_size)
1357 memcpy(tag_iv, org_iv, cc->iv_size);
1360 memcpy(iv, org_iv, cc->iv_size);
1363 aead_request_set_ad(req, sizeof(uint64_t) + cc->iv_size);
1366 cc->sector_size, iv);
1368 if (cc->integrity_tag_size + cc->integrity_iv_size != cc->on_disk_tag_size)
1369 memset(tag + cc->integrity_tag_size + cc->integrity_iv_size, 0,
1370 cc->on_disk_tag_size - (cc->integrity_tag_size + cc->integrity_iv_size));
1373 cc->sector_size + cc->integrity_tag_size, iv);
1389 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
1390 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
1392 bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
1393 bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
1398 static int crypt_convert_block_skcipher(struct crypt_config *cc,
1412 if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
1415 dmreq = dmreq_of_req(cc, req);
1417 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
1418 dmreq->iv_sector >>= cc->sector_shift;
1421 *org_tag_of_dmreq(cc, dmreq) = tag_offset;
1423 iv = iv_of_dmreq(cc, dmreq);
1424 org_iv = org_iv_of_dmreq(cc, dmreq);
1425 tag_iv = iv_tag_from_dmreq(cc, dmreq);
1427 sector = org_sector_of_dmreq(cc, dmreq);
1428 *sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset);
1435 sg_set_page(sg_in, bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
1438 sg_set_page(sg_out, bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
1440 if (cc->iv_gen_ops) {
1442 if (cc->integrity_iv_size && bio_data_dir(ctx->bio_in) != WRITE) {
1443 memcpy(org_iv, tag_iv, cc->integrity_iv_size);
1445 r = cc->iv_gen_ops->generator(cc, org_iv, dmreq);
1449 if (test_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags))
1452 if (cc->integrity_iv_size)
1453 memcpy(tag_iv, org_iv, cc->integrity_iv_size);
1456 memcpy(iv, org_iv, cc->iv_size);
1459 skcipher_request_set_crypt(req, sg_in, sg_out, cc->sector_size, iv);
1466 if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
1467 r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
1469 bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
1470 bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
1477 static int crypt_alloc_req_skcipher(struct crypt_config *cc,
1480 unsigned int key_index = ctx->cc_sector & (cc->tfms_count - 1);
1483 ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
1488 skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]);
1496 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
1501 static int crypt_alloc_req_aead(struct crypt_config *cc,
1505 ctx->r.req_aead = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
1510 aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]);
1518 kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
1523 static int crypt_alloc_req(struct crypt_config *cc,
1526 if (crypt_integrity_aead(cc))
1527 return crypt_alloc_req_aead(cc, ctx);
1529 return crypt_alloc_req_skcipher(cc, ctx);
1532 static void crypt_free_req_skcipher(struct crypt_config *cc,
1535 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
1538 mempool_free(req, &cc->req_pool);
1541 static void crypt_free_req_aead(struct crypt_config *cc,
1544 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
1547 mempool_free(req, &cc->req_pool);
1550 static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_bio)
1552 if (crypt_integrity_aead(cc))
1553 crypt_free_req_aead(cc, req, base_bio);
1555 crypt_free_req_skcipher(cc, req, base_bio);
1561 static blk_status_t crypt_convert(struct crypt_config *cc,
1565 unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT;
1578 r = crypt_alloc_req(cc, ctx);
1586 if (crypt_integrity_aead(cc))
1587 r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, tag_offset);
1589 r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, tag_offset);
1655 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
1679 struct crypt_config *cc = io->cc;
1688 mutex_lock(&cc->bio_alloc_lock);
1690 clone = bio_alloc_bioset(cc->dev->bdev, nr_iovecs, io->base_bio->bi_opf,
1691 GFP_NOIO, &cc->bs);
1705 if (unlikely(percpu_counter_read_positive(&cc->n_allocated_pages) +
1712 percpu_counter_add(&cc->n_allocated_pages, 1 << order);
1719 pages = mempool_alloc(&cc->page_pool, gfp_mask);
1721 crypt_free_buffer_pages(cc, clone);
1736 crypt_free_buffer_pages(cc, clone);
1742 mutex_unlock(&cc->bio_alloc_lock);
1747 static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
1754 percpu_counter_sub(&cc->n_allocated_pages,
1758 mempool_free(&fi.folio->page, &cc->page_pool);
1764 static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
1767 io->cc = cc;
1792 struct crypt_config *cc = io->cc;
1800 cc->on_disk_tag_size && bio_data_dir(base_bio) == READ) {
1809 crypt_free_req(cc, io->ctx.r.req, base_bio);
1812 mempool_free(io->integrity_metadata, &io->cc->tag_pool);
1841 struct crypt_config *cc = io->cc;
1854 crypt_free_buffer_pages(cc, clone);
1873 struct crypt_config *cc = io->cc;
1885 clone->bi_iter.bi_sector = cc->start + io->sector;
1886 crypt_convert_init(cc, &io->ctx, clone, clone, io->sector);
1898 clone = bio_alloc_clone(cc->dev->bdev, io->base_bio, gfp, &cc->bs);
1906 clone->bi_iter.bi_sector = cc->start + io->sector;
1930 struct crypt_config *cc = io->cc;
1933 queue_work(cc->io_queue, &io->work);
1947 struct crypt_config *cc = data;
1954 spin_lock_irq(&cc->write_thread_lock);
1957 if (!RB_EMPTY_ROOT(&cc->write_tree))
1962 spin_unlock_irq(&cc->write_thread_lock);
1971 spin_lock_irq(&cc->write_thread_lock);
1975 write_tree = cc->write_tree;
1976 cc->write_tree = RB_ROOT;
1977 spin_unlock_irq(&cc->write_thread_lock);
2000 struct crypt_config *cc = io->cc;
2006 crypt_free_buffer_pages(cc, clone);
2015 clone->bi_iter.bi_sector = cc->start + io->sector;
2017 if ((likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) ||
2018 test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) {
2023 spin_lock_irqsave(&cc->write_thread_lock, flags);
2024 if (RB_EMPTY_ROOT(&cc->write_tree))
2025 wake_up_process(cc->write_thread);
2026 rbp = &cc->write_tree.rb_node;
2037 rb_insert_color(&io->rb_node, &cc->write_tree);
2038 spin_unlock_irqrestore(&cc->write_thread_lock, flags);
2041 static bool kcryptd_crypt_write_inline(struct crypt_config *cc,
2045 if (!test_bit(DM_CRYPT_WRITE_INLINE, &cc->flags))
2065 struct crypt_config *cc = io->cc;
2074 r = crypt_convert(cc, &io->ctx, true, false);
2078 if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) {
2095 struct crypt_config *cc = io->cc;
2106 crypt_convert_init(cc, ctx, NULL, io->base_bio, sector);
2117 if (crypt_integrity_aead(cc)) {
2126 r = crypt_convert(cc, ctx,
2127 test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags), true);
2135 queue_work(cc->crypt_queue, &io->work);
2141 if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) {
2164 crypt_free_buffer_pages(io->cc, io->ctx.bio_in);
2173 struct crypt_config *cc = io->cc;
2179 r = crypt_convert(cc, &io->ctx, true, false);
2191 struct crypt_config *cc = io->cc;
2197 io->ctx.cc_sector = io->sector + cc->iv_offset;
2198 r = crypt_convert(cc, &io->ctx,
2199 test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
2201 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
2204 r = crypt_convert(cc, &io->ctx,
2205 test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true);
2213 queue_work(cc->crypt_queue, &io->work);
2230 struct crypt_config *cc = io->cc;
2242 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
2243 error = cc->iv_gen_ops->post(cc, org_iv_of_dmreq(cc, dmreq), dmreq);
2246 sector_t s = le64_to_cpu(*org_sector_of_dmreq(cc, dmreq));
2259 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
2273 if (kcryptd_crypt_write_inline(cc, ctx)) {
2293 struct crypt_config *cc = io->cc;
2295 if ((bio_data_dir(io->base_bio) == READ && test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags)) ||
2296 (bio_data_dir(io->base_bio) == WRITE && test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))) {
2313 queue_work(cc->crypt_queue, &io->work);
2316 static void crypt_free_tfms_aead(struct crypt_config *cc)
2318 if (!cc->cipher_tfm.tfms_aead)
2321 if (cc->cipher_tfm.tfms_aead[0] && !IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
2322 crypto_free_aead(cc->cipher_tfm.tfms_aead[0]);
2323 cc->cipher_tfm.tfms_aead[0] = NULL;
2326 kfree(cc->cipher_tfm.tfms_aead);
2327 cc->cipher_tfm.tfms_aead = NULL;
2330 static void crypt_free_tfms_skcipher(struct crypt_config *cc)
2334 if (!cc->cipher_tfm.tfms)
2337 for (i = 0; i < cc->tfms_count; i++)
2338 if (cc->cipher_tfm.tfms[i] && !IS_ERR(cc->cipher_tfm.tfms[i])) {
2339 crypto_free_skcipher(cc->cipher_tfm.tfms[i]);
2340 cc->cipher_tfm.tfms[i] = NULL;
2343 kfree(cc->cipher_tfm.tfms);
2344 cc->cipher_tfm.tfms = NULL;
2347 static void crypt_free_tfms(struct crypt_config *cc)
2349 if (crypt_integrity_aead(cc))
2350 crypt_free_tfms_aead(cc);
2352 crypt_free_tfms_skcipher(cc);
2355 static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode)
2360 cc->cipher_tfm.tfms = kcalloc(cc->tfms_count,
2363 if (!cc->cipher_tfm.tfms)
2366 for (i = 0; i < cc->tfms_count; i++) {
2367 cc->cipher_tfm.tfms[i] = crypto_alloc_skcipher(ciphermode, 0,
2369 if (IS_ERR(cc->cipher_tfm.tfms[i])) {
2370 err = PTR_ERR(cc->cipher_tfm.tfms[i]);
2371 crypt_free_tfms(cc);
2382 crypto_skcipher_alg(any_tfm(cc))->base.cra_driver_name);
2386 static int crypt_alloc_tfms_aead(struct crypt_config *cc, char *ciphermode)
2390 cc->cipher_tfm.tfms = kmalloc(sizeof(struct crypto_aead *), GFP_KERNEL);
2391 if (!cc->cipher_tfm.tfms)
2394 cc->cipher_tfm.tfms_aead[0] = crypto_alloc_aead(ciphermode, 0,
2396 if (IS_ERR(cc->cipher_tfm.tfms_aead[0])) {
2397 err = PTR_ERR(cc->cipher_tfm.tfms_aead[0]);
2398 crypt_free_tfms(cc);
2403 crypto_aead_alg(any_tfm_aead(cc))->base.cra_driver_name);
2407 static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
2409 if (crypt_integrity_aead(cc))
2410 return crypt_alloc_tfms_aead(cc, ciphermode);
2412 return crypt_alloc_tfms_skcipher(cc, ciphermode);
2415 static unsigned int crypt_subkey_size(struct crypt_config *cc)
2417 return (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
2420 static unsigned int crypt_authenckey_size(struct crypt_config *cc)
2422 return crypt_subkey_size(cc) + RTA_SPACE(sizeof(struct crypto_authenc_key_param));
2428 * This funcion converts cc->key to this special format.
2447 static int crypt_setkey(struct crypt_config *cc)
2453 subkey_size = crypt_subkey_size(cc);
2455 if (crypt_integrity_hmac(cc)) {
2456 if (subkey_size < cc->key_mac_size)
2459 crypt_copy_authenckey(cc->authenc_key, cc->key,
2460 subkey_size - cc->key_mac_size,
2461 cc->key_mac_size);
2464 for (i = 0; i < cc->tfms_count; i++) {
2465 if (crypt_integrity_hmac(cc))
2466 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
2467 cc->authenc_key, crypt_authenckey_size(cc));
2468 else if (crypt_integrity_aead(cc))
2469 r = crypto_aead_setkey(cc->cipher_tfm.tfms_aead[i],
2470 cc->key + (i * subkey_size),
2473 r = crypto_skcipher_setkey(cc->cipher_tfm.tfms[i],
2474 cc->key + (i * subkey_size),
2480 if (crypt_integrity_hmac(cc))
2481 memzero_explicit(cc->authenc_key, crypt_authenckey_size(cc));
2496 static int set_key_user(struct crypt_config *cc, struct key *key)
2504 if (cc->key_size != ukp->datalen)
2507 memcpy(cc->key, ukp->data, cc->key_size);
2512 static int set_key_encrypted(struct crypt_config *cc, struct key *key)
2520 if (cc->key_size != ekp->decrypted_datalen)
2523 memcpy(cc->key, ekp->decrypted_data, cc->key_size);
2528 static int set_key_trusted(struct crypt_config *cc, struct key *key)
2536 if (cc->key_size != tkp->key_len)
2539 memcpy(cc->key, tkp->key, cc->key_size);
2544 static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
2550 int (*set_key)(struct crypt_config *cc, struct key *key);
2596 ret = set_key(cc, key);
2608 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2610 ret = crypt_setkey(cc);
2613 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2614 kfree_sensitive(cc->key_string);
2615 cc->key_string = new_key_string;
2647 static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string)
2659 static int crypt_set_key(struct crypt_config *cc, char *key)
2665 if (!cc->key_size && strcmp(key, "-"))
2670 r = crypt_set_keyring_key(cc, key + 1);
2675 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2678 kfree_sensitive(cc->key_string);
2679 cc->key_string = NULL;
2682 if (cc->key_size && hex2bin(cc->key, key, cc->key_size) < 0)
2685 r = crypt_setkey(cc);
2687 set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2696 static int crypt_wipe_key(struct crypt_config *cc)
2700 clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
2701 get_random_bytes(&cc->key, cc->key_size);
2704 if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
2705 r = cc->iv_gen_ops->wipe(cc);
2710 kfree_sensitive(cc->key_string);
2711 cc->key_string = NULL;
2712 r = crypt_setkey(cc);
2713 memset(&cc->key, 0, cc->key_size * sizeof(u8));
2733 struct crypt_config *cc = pool_data;
2741 if (unlikely(percpu_counter_read_positive(&cc->n_allocated_pages) >= dm_crypt_pages_per_client) &&
2747 percpu_counter_add(&cc->n_allocated_pages, 1);
2754 struct crypt_config *cc = pool_data;
2757 percpu_counter_sub(&cc->n_allocated_pages, 1);
2762 struct crypt_config *cc = ti->private;
2766 if (!cc)
2769 if (cc->write_thread)
2770 kthread_stop(cc->write_thread);
2772 if (cc->io_queue)
2773 destroy_workqueue(cc->io_queue);
2774 if (cc->crypt_queue)
2775 destroy_workqueue(cc->crypt_queue);
2777 if (cc->workqueue_id)
2778 ida_free(&workqueue_ida, cc->workqueue_id);
2780 crypt_free_tfms(cc);
2782 bioset_exit(&cc->bs);
2784 mempool_exit(&cc->page_pool);
2785 mempool_exit(&cc->req_pool);
2786 mempool_exit(&cc->tag_pool);
2788 WARN_ON(percpu_counter_sum(&cc->n_allocated_pages) != 0);
2789 percpu_counter_destroy(&cc->n_allocated_pages);
2791 if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
2792 cc->iv_gen_ops->dtr(cc);
2794 if (cc->dev)
2795 dm_put_device(ti, cc->dev);
2797 kfree_sensitive(cc->cipher_string);
2798 kfree_sensitive(cc->key_string);
2799 kfree_sensitive(cc->cipher_auth);
2800 kfree_sensitive(cc->authenc_key);
2802 mutex_destroy(&cc->bio_alloc_lock);
2805 kfree_sensitive(cc);
2818 struct crypt_config *cc = ti->private;
2820 if (crypt_integrity_aead(cc))
2821 cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
2823 cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
2825 if (cc->iv_size)
2827 cc->iv_size = max(cc->iv_size,
2836 cc->iv_gen_ops = NULL;
2838 cc->iv_gen_ops = &crypt_iv_plain_ops;
2840 cc->iv_gen_ops = &crypt_iv_plain64_ops;
2842 cc->iv_gen_ops = &crypt_iv_plain64be_ops;
2844 cc->iv_gen_ops = &crypt_iv_essiv_ops;
2846 cc->iv_gen_ops = &crypt_iv_benbi_ops;
2848 cc->iv_gen_ops = &crypt_iv_null_ops;
2850 cc->iv_gen_ops = &crypt_iv_eboiv_ops;
2852 cc->iv_gen_ops = &crypt_iv_elephant_ops;
2853 cc->key_parts = 2;
2854 cc->key_extra_size = cc->key_size / 2;
2855 if (cc->key_extra_size > ELEPHANT_MAX_KEY_SIZE)
2857 set_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags);
2859 cc->iv_gen_ops = &crypt_iv_lmk_ops;
2866 if (cc->key_size % cc->key_parts) {
2867 cc->key_parts++;
2868 cc->key_extra_size = cc->key_size / cc->key_parts;
2871 cc->iv_gen_ops = &crypt_iv_tcw_ops;
2872 cc->key_parts += 2; /* IV + whitening */
2873 cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
2875 cc->iv_gen_ops = &crypt_iv_random_ops;
2877 cc->integrity_iv_size = cc->iv_size;
2891 static int crypt_ctr_auth_cipher(struct crypt_config *cc, char *cipher_api)
2914 cc->key_mac_size = crypto_ahash_digestsize(mac);
2917 cc->authenc_key = kmalloc(crypt_authenckey_size(cc), GFP_KERNEL);
2918 if (!cc->authenc_key)
2927 struct crypt_config *cc = ti->private;
2931 cc->tfms_count = 1;
2955 if (crypt_integrity_aead(cc)) {
2956 ret = crypt_ctr_auth_cipher(cc, cipher_api);
2964 cc->tfms_count = 64;
2980 cc->key_parts = cc->tfms_count;
2983 ret = crypt_alloc_tfms(cc, cipher_api);
2989 if (crypt_integrity_aead(cc))
2990 cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
2992 cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
3000 struct crypt_config *cc = ti->private;
3006 if (strchr(cipher_in, '(') || crypt_integrity_aead(cc)) {
3020 cc->tfms_count = 1;
3021 else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 ||
3022 !is_power_of_2(cc->tfms_count)) {
3026 cc->key_parts = cc->tfms_count;
3068 ret = crypt_alloc_tfms(cc, cipher_api);
3084 struct crypt_config *cc = ti->private;
3088 cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
3089 if (!cc->cipher_string) {
3107 ret = crypt_set_key(cc, key);
3114 if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
3115 ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
3123 if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
3124 ret = cc->iv_gen_ops->init(cc);
3132 if (cc->key_string)
3133 memset(cc->key, 0, cc->key_size * sizeof(u8));
3140 struct crypt_config *cc = ti->private;
3169 set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
3171 set_bit(DM_CRYPT_HIGH_PRIORITY, &cc->flags);
3174 set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
3176 set_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags);
3178 set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
3184 cc->on_disk_tag_size = val;
3187 set_bit(CRYPT_MODE_INTEGRITY_AEAD, &cc->cipher_flags);
3193 cc->cipher_auth = kstrdup(sval, GFP_KERNEL);
3194 if (!cc->cipher_auth)
3196 } else if (sscanf(opt_string, "sector_size:%hu%c", &cc->sector_size, &dummy) == 1) {
3197 if (cc->sector_size < (1 << SECTOR_SHIFT) ||
3198 cc->sector_size > 4096 ||
3199 (cc->sector_size & (cc->sector_size - 1))) {
3203 if (ti->len & ((cc->sector_size >> SECTOR_SHIFT) - 1)) {
3207 cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT;
3209 set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
3223 struct crypt_config *cc = ti->private;
3225 return dm_report_zones(cc->dev->bdev, cc->start,
3226 cc->start + dm_target_offset(ti, args->next_sector),
3239 struct crypt_config *cc;
3260 cc = kzalloc(struct_size(cc, key, key_size), GFP_KERNEL);
3261 if (!cc) {
3265 cc->key_size = key_size;
3266 cc->sector_size = (1 << SECTOR_SHIFT);
3267 cc->sector_shift = 0;
3269 ti->private = cc;
3276 ret = percpu_counter_init(&cc->n_allocated_pages, 0, GFP_KERNEL);
3291 if (crypt_integrity_aead(cc)) {
3292 cc->dmreq_start = sizeof(struct aead_request);
3293 cc->dmreq_start += crypto_aead_reqsize(any_tfm_aead(cc));
3294 align_mask = crypto_aead_alignmask(any_tfm_aead(cc));
3296 cc->dmreq_start = sizeof(struct skcipher_request);
3297 cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc));
3298 align_mask = crypto_skcipher_alignmask(any_tfm(cc));
3300 cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
3304 iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
3317 iv_size_padding + cc->iv_size +
3318 cc->iv_size +
3322 ret = mempool_init_kmalloc_pool(&cc->req_pool, MIN_IOS, cc->dmreq_start + additional_req_size);
3328 cc->per_bio_data_size = ti->per_io_data_size =
3329 ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size,
3332 ret = mempool_init(&cc->page_pool, BIO_MAX_VECS, crypt_page_alloc, crypt_page_free, cc);
3338 ret = bioset_init(&cc->bs, MIN_IOS, 0, BIOSET_NEED_BVECS);
3344 mutex_init(&cc->bio_alloc_lock);
3348 (tmpll & ((cc->sector_size >> SECTOR_SHIFT) - 1))) {
3352 cc->iv_offset = tmpll;
3354 ret = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev);
3365 cc->start = tmpll;
3367 if (bdev_is_zoned(cc->dev->bdev)) {
3373 set_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
3374 set_bit(DM_CRYPT_WRITE_INLINE, &cc->flags);
3391 if (crypt_integrity_aead(cc) || cc->integrity_iv_size) {
3392 ret = crypt_integrity_ctr(cc, ti);
3396 cc->tag_pool_max_sectors = POOL_ENTRY_SIZE / cc->on_disk_tag_size;
3397 if (!cc->tag_pool_max_sectors)
3398 cc->tag_pool_max_sectors = 1;
3400 ret = mempool_init_kmalloc_pool(&cc->tag_pool, MIN_IOS,
3401 cc->tag_pool_max_sectors * cc->on_disk_tag_size);
3407 cc->tag_pool_max_sectors <<= cc->sector_shift;
3416 cc->workqueue_id = wq_id;
3420 if (test_bit(DM_CRYPT_HIGH_PRIORITY, &cc->flags))
3423 cc->io_queue = alloc_workqueue("kcryptd_io-%s-%d", common_wq_flags, 1, devname, wq_id);
3424 if (!cc->io_queue) {
3429 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags)) {
3430 cc->crypt_queue = alloc_workqueue("kcryptd-%s-%d",
3438 cc->crypt_queue = alloc_workqueue("kcryptd-%s-%d",
3442 if (!cc->crypt_queue) {
3447 spin_lock_init(&cc->write_thread_lock);
3448 cc->write_tree = RB_ROOT;
3450 cc->write_thread = kthread_run(dmcrypt_write, cc, "dmcrypt_write/%s", devname);
3451 if (IS_ERR(cc->write_thread)) {
3452 ret = PTR_ERR(cc->write_thread);
3453 cc->write_thread = NULL;
3457 if (test_bit(DM_CRYPT_HIGH_PRIORITY, &cc->flags))
3458 set_user_nice(cc->write_thread, MIN_NICE);
3476 struct crypt_config *cc = ti->private;
3485 bio_set_dev(bio, cc->dev->bdev);
3487 bio->bi_iter.bi_sector = cc->start +
3496 (bio_data_dir(bio) == WRITE || cc->on_disk_tag_size))
3503 if (unlikely((bio->bi_iter.bi_sector & ((cc->sector_size >> SECTOR_SHIFT) - 1)) != 0))
3506 if (unlikely(bio->bi_iter.bi_size & (cc->sector_size - 1)))
3509 io = dm_per_bio_data(bio, cc->per_bio_data_size);
3510 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
3512 if (cc->on_disk_tag_size) {
3513 unsigned int tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift);
3521 if (bio_sectors(bio) > cc->tag_pool_max_sectors)
3522 dm_accept_partial_bio(bio, cc->tag_pool_max_sectors);
3523 io->integrity_metadata = mempool_alloc(&cc->tag_pool, GFP_NOIO);
3528 if (crypt_integrity_aead(cc))
3550 struct crypt_config *cc = ti->private;
3560 DMEMIT("%s ", cc->cipher_string);
3562 if (cc->key_size > 0) {
3563 if (cc->key_string)
3564 DMEMIT(":%u:%s", cc->key_size, cc->key_string);
3566 for (i = 0; i < cc->key_size; i++) {
3567 DMEMIT("%c%c", hex2asc(cc->key[i] >> 4),
3568 hex2asc(cc->key[i] & 0xf));
3574 DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
3575 cc->dev->name, (unsigned long long)cc->start);
3578 num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
3579 num_feature_args += test_bit(DM_CRYPT_HIGH_PRIORITY, &cc->flags);
3580 num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
3581 num_feature_args += test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags);
3582 num_feature_args += test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags);
3583 num_feature_args += cc->sector_size != (1 << SECTOR_SHIFT);
3584 num_feature_args += test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
3585 if (cc->on_disk_tag_size)
3591 if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
3593 if (test_bit(DM_CRYPT_HIGH_PRIORITY, &cc->flags))
3595 if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
3597 if (test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags))
3599 if (test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))
3601 if (cc->on_disk_tag_size)
3602 DMEMIT(" integrity:%u:%s", cc->on_disk_tag_size, cc->cipher_auth);
3603 if (cc->sector_size != (1 << SECTOR_SHIFT))
3604 DMEMIT(" sector_size:%d", cc->sector_size);
3605 if (test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags))
3613 DMEMIT(",same_cpu_crypt=%c", test_bit(DM_CRYPT_SAME_CPU, &cc->flags) ? 'y' : 'n');
3614 DMEMIT(",high_priority=%c", test_bit(DM_CRYPT_HIGH_PRIORITY, &cc->flags) ? 'y' : 'n');
3615 DMEMIT(",submit_from_crypt_cpus=%c", test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags) ?
3617 DMEMIT(",no_read_workqueue=%c", test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags) ?
3619 DMEMIT(",no_write_workqueue=%c", test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags) ?
3621 DMEMIT(",iv_large_sectors=%c", test_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags) ?
3624 if (cc->on_disk_tag_size)
3626 cc->on_disk_tag_size, cc->cipher_auth);
3627 if (cc->sector_size != (1 << SECTOR_SHIFT))
3628 DMEMIT(",sector_size=%d", cc->sector_size);
3629 if (cc->cipher_string)
3630 DMEMIT(",cipher_string=%s", cc->cipher_string);
3632 DMEMIT(",key_size=%u", cc->key_size);
3633 DMEMIT(",key_parts=%u", cc->key_parts);
3634 DMEMIT(",key_extra_size=%u", cc->key_extra_size);
3635 DMEMIT(",key_mac_size=%u", cc->key_mac_size);
3643 struct crypt_config *cc = ti->private;
3645 set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
3650 struct crypt_config *cc = ti->private;
3652 if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
3662 struct crypt_config *cc = ti->private;
3664 clear_bit(DM_CRYPT_SUSPENDED, &cc->flags);
3674 struct crypt_config *cc = ti->private;
3681 if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
3688 if (key_size < 0 || cc->key_size != key_size) {
3693 ret = crypt_set_key(cc, argv[2]);
3696 if (cc->iv_gen_ops && cc->iv_gen_ops->init)
3697 ret = cc->iv_gen_ops->init(cc);
3699 if (cc->key_string)
3700 memset(cc->key, 0, cc->key_size * sizeof(u8));
3704 return crypt_wipe_key(cc);
3715 struct crypt_config *cc = ti->private;
3717 return fn(ti, cc->dev, cc->start, ti->len, data);
3722 struct crypt_config *cc = ti->private;
3725 max_t(unsigned int, limits->logical_block_size, cc->sector_size);
3727 max_t(unsigned int, limits->physical_block_size, cc->sector_size);
3728 limits->io_min = max_t(unsigned int, limits->io_min, cc->sector_size);