Lines Matching defs:io

246 static void kcryptd_queue_crypt(struct dm_crypt_io *io);
1148 static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
1154 if (!bio_sectors(bio) || !io->cc->on_disk_tag_size)
1161 tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift);
1163 bip->bip_iter.bi_sector = io->cc->start + io->sector;
1165 ret = bio_integrity_add_page(bio, virt_to_page(io->integrity_metadata),
1166 tag_len, offset_in_page(io->integrity_metadata));
1284 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
1286 return &io->integrity_metadata[*org_tag_of_dmreq(cc, dmreq) *
1535 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
1537 if ((struct skcipher_request *)(io + 1) != req)
1544 struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
1546 if ((struct aead_request *)(io + 1) != req)
1677 static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size)
1679 struct crypt_config *cc = io->cc;
1690 clone = bio_alloc_bioset(cc->dev->bdev, nr_iovecs, io->base_bio->bi_opf,
1692 clone->bi_private = io;
1694 clone->bi_ioprio = io->base_bio->bi_ioprio;
1735 if (dm_crypt_integrity_io_alloc(io, clone)) {
1764 static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
1767 io->cc = cc;
1768 io->base_bio = bio;
1769 io->sector = sector;
1770 io->error = 0;
1771 io->ctx.aead_recheck = false;
1772 io->ctx.aead_failed = false;
1773 io->ctx.r.req = NULL;
1774 io->integrity_metadata = NULL;
1775 io->integrity_metadata_from_pool = false;
1776 atomic_set(&io->io_pending, 0);
1779 static void crypt_inc_pending(struct dm_crypt_io *io)
1781 atomic_inc(&io->io_pending);
1784 static void kcryptd_queue_read(struct dm_crypt_io *io);
1790 static void crypt_dec_pending(struct dm_crypt_io *io)
1792 struct crypt_config *cc = io->cc;
1793 struct bio *base_bio = io->base_bio;
1794 blk_status_t error = io->error;
1796 if (!atomic_dec_and_test(&io->io_pending))
1799 if (likely(!io->ctx.aead_recheck) && unlikely(io->ctx.aead_failed) &&
1801 io->ctx.aead_recheck = true;
1802 io->ctx.aead_failed = false;
1803 io->error = 0;
1804 kcryptd_queue_read(io);
1808 if (io->ctx.r.req)
1809 crypt_free_req(cc, io->ctx.r.req, base_bio);
1811 if (unlikely(io->integrity_metadata_from_pool))
1812 mempool_free(io->integrity_metadata, &io->cc->tag_pool);
1814 kfree(io->integrity_metadata);
1840 struct dm_crypt_io *io = clone->bi_private;
1841 struct crypt_config *cc = io->cc;
1845 if (io->ctx.aead_recheck && !error) {
1846 kcryptd_queue_crypt(io);
1853 if (rw == WRITE || io->ctx.aead_recheck)
1859 kcryptd_queue_crypt(io);
1864 io->error = error;
1866 crypt_dec_pending(io);
1871 static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
1873 struct crypt_config *cc = io->cc;
1876 if (io->ctx.aead_recheck) {
1879 crypt_inc_pending(io);
1880 clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
1882 crypt_dec_pending(io);
1885 clone->bi_iter.bi_sector = cc->start + io->sector;
1886 crypt_convert_init(cc, &io->ctx, clone, clone, io->sector);
1887 io->saved_bi_iter = clone->bi_iter;
1888 dm_submit_bio_remap(io->base_bio, clone);
1898 clone = bio_alloc_clone(cc->dev->bdev, io->base_bio, gfp, &cc->bs);
1901 clone->bi_private = io;
1904 crypt_inc_pending(io);
1906 clone->bi_iter.bi_sector = cc->start + io->sector;
1908 if (dm_crypt_integrity_io_alloc(io, clone)) {
1909 crypt_dec_pending(io);
1914 dm_submit_bio_remap(io->base_bio, clone);
1920 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
1922 crypt_inc_pending(io);
1923 if (kcryptd_io_read(io, GFP_NOIO))
1924 io->error = BLK_STS_RESOURCE;
1925 crypt_dec_pending(io);
1928 static void kcryptd_queue_read(struct dm_crypt_io *io)
1930 struct crypt_config *cc = io->cc;
1932 INIT_WORK(&io->work, kcryptd_io_read_work);
1933 queue_work(cc->io_queue, &io->work);
1936 static void kcryptd_io_write(struct dm_crypt_io *io)
1938 struct bio *clone = io->ctx.bio_out;
1940 dm_submit_bio_remap(io->base_bio, clone);
1948 struct dm_crypt_io *io;
1987 io = crypt_io_from_node(rb_first(&write_tree));
1988 rb_erase(&io->rb_node, &write_tree);
1989 kcryptd_io_write(io);
1997 static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
1999 struct bio *clone = io->ctx.bio_out;
2000 struct crypt_config *cc = io->cc;
2005 if (unlikely(io->error)) {
2008 crypt_dec_pending(io);
2013 BUG_ON(io->ctx.iter_out.bi_size);
2015 clone->bi_iter.bi_sector = cc->start + io->sector;
2019 dm_submit_bio_remap(io->base_bio, clone);
2028 sector = io->sector;
2036 rb_link_node(&io->rb_node, parent, rbp);
2037 rb_insert_color(&io->rb_node, &cc->write_tree);
2064 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
2065 struct crypt_config *cc = io->cc;
2066 struct convert_context *ctx = &io->ctx;
2068 sector_t sector = io->sector;
2074 r = crypt_convert(cc, &io->ctx, true, false);
2076 io->error = r;
2084 /* Encryption was already finished, submit io now */
2086 kcryptd_crypt_write_io_submit(io, 0);
2087 io->sector = sector;
2090 crypt_dec_pending(io);
2093 static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
2095 struct crypt_config *cc = io->cc;
2096 struct convert_context *ctx = &io->ctx;
2099 sector_t sector = io->sector;
2103 * Prevent io from disappearing until this function completes.
2105 crypt_inc_pending(io);
2106 crypt_convert_init(cc, ctx, NULL, io->base_bio, sector);
2108 clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
2110 io->error = BLK_STS_IOERR;
2114 io->ctx.bio_out = clone;
2115 io->ctx.iter_out = clone->bi_iter;
2118 bio_copy_data(clone, io->base_bio);
2119 io->ctx.bio_in = clone;
2120 io->ctx.iter_in = clone->bi_iter;
2125 crypt_inc_pending(io);
2134 INIT_WORK(&io->work, kcryptd_crypt_write_continue);
2135 queue_work(cc->crypt_queue, &io->work);
2139 io->error = r;
2147 /* Encryption was already finished, submit io now */
2149 kcryptd_crypt_write_io_submit(io, 0);
2150 io->sector = sector;
2154 crypt_dec_pending(io);
2157 static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
2159 if (io->ctx.aead_recheck) {
2160 if (!io->error) {
2161 io->ctx.bio_in->bi_iter = io->saved_bi_iter;
2162 bio_copy_data(io->base_bio, io->ctx.bio_in);
2164 crypt_free_buffer_pages(io->cc, io->ctx.bio_in);
2165 bio_put(io->ctx.bio_in);
2167 crypt_dec_pending(io);
2172 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
2173 struct crypt_config *cc = io->cc;
2176 wait_for_completion(&io->ctx.restart);
2177 reinit_completion(&io->ctx.restart);
2179 r = crypt_convert(cc, &io->ctx, true, false);
2181 io->error = r;
2183 if (atomic_dec_and_test(&io->ctx.cc_pending))
2184 kcryptd_crypt_read_done(io);
2186 crypt_dec_pending(io);
2189 static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
2191 struct crypt_config *cc = io->cc;
2194 crypt_inc_pending(io);
2196 if (io->ctx.aead_recheck) {
2197 io->ctx.cc_sector = io->sector + cc->iv_offset;
2198 r = crypt_convert(cc, &io->ctx,
2201 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
2202 io->sector);
2204 r = crypt_convert(cc, &io->ctx,
2212 INIT_WORK(&io->work, kcryptd_crypt_read_continue);
2213 queue_work(cc->crypt_queue, &io->work);
2217 io->error = r;
2219 if (atomic_dec_and_test(&io->ctx.cc_pending))
2220 kcryptd_crypt_read_done(io);
2222 crypt_dec_pending(io);
2229 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
2230 struct crypt_config *cc = io->cc;
2255 io->error = BLK_STS_PROTECTION;
2257 io->error = BLK_STS_IOERR;
2259 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
2268 if (bio_data_dir(io->base_bio) == READ) {
2269 kcryptd_crypt_read_done(io);
2278 kcryptd_crypt_write_io_submit(io, 1);
2283 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
2285 if (bio_data_dir(io->base_bio) == READ)
2286 kcryptd_crypt_read_convert(io);
2288 kcryptd_crypt_write_convert(io);
2291 static void kcryptd_queue_crypt(struct dm_crypt_io *io)
2293 struct crypt_config *cc = io->cc;
2295 if ((bio_data_dir(io->base_bio) == READ && test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags)) ||
2296 (bio_data_dir(io->base_bio) == WRITE && test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))) {
2303 INIT_WORK(&io->work, kcryptd_crypt);
2304 queue_work(system_bh_wq, &io->work);
2307 kcryptd_crypt(&io->work);
2312 INIT_WORK(&io->work, kcryptd_crypt);
2313 queue_work(cc->crypt_queue, &io->work);
3425 ti->error = "Couldn't create kcryptd io queue";
3475 struct dm_crypt_io *io;
3509 io = dm_per_bio_data(bio, cc->per_bio_data_size);
3510 crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
3516 io->integrity_metadata = NULL;
3518 io->integrity_metadata = kmalloc(tag_len, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
3520 if (unlikely(!io->integrity_metadata)) {
3523 io->integrity_metadata = mempool_alloc(&cc->tag_pool, GFP_NOIO);
3524 io->integrity_metadata_from_pool = true;
3529 io->ctx.r.req_aead = (struct aead_request *)(io + 1);
3531 io->ctx.r.req = (struct skcipher_request *)(io + 1);
3533 if (bio_data_dir(io->base_bio) == READ) {
3534 if (kcryptd_io_read(io, CRYPT_MAP_READ_GFP))
3535 kcryptd_queue_read(io);
3537 kcryptd_queue_crypt(io);