• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/md/

Lines Matching defs:io

150 static void kcryptd_queue_crypt(struct dm_crypt_io *io);
518 struct dm_crypt_io *io = bio->bi_private;
519 struct crypt_config *cc = io->target->private;
530 static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
533 struct crypt_config *cc = io->target->private;
544 clone_init(io, clone);
597 struct dm_crypt_io *io;
599 io = mempool_alloc(cc->io_pool, GFP_NOIO);
600 io->target = ti;
601 io->base_bio = bio;
602 io->sector = sector;
603 io->error = 0;
604 io->base_io = NULL;
605 atomic_set(&io->pending, 0);
607 return io;
610 static void crypt_inc_pending(struct dm_crypt_io *io)
612 atomic_inc(&io->pending);
620 static void crypt_dec_pending(struct dm_crypt_io *io)
622 struct crypt_config *cc = io->target->private;
623 struct bio *base_bio = io->base_bio;
624 struct dm_crypt_io *base_io = io->base_io;
625 int error = io->error;
627 if (!atomic_dec_and_test(&io->pending))
630 mempool_free(io, cc->io_pool);
657 struct dm_crypt_io *io = clone->bi_private;
658 struct crypt_config *cc = io->target->private;
673 kcryptd_queue_crypt(io);
678 io->error = error;
680 crypt_dec_pending(io);
683 static void clone_init(struct dm_crypt_io *io, struct bio *clone)
685 struct crypt_config *cc = io->target->private;
687 clone->bi_private = io;
690 clone->bi_rw = io->base_bio->bi_rw;
694 static void kcryptd_io_read(struct dm_crypt_io *io)
696 struct crypt_config *cc = io->target->private;
697 struct bio *base_bio = io->base_bio;
700 crypt_inc_pending(io);
709 io->error = -ENOMEM;
710 crypt_dec_pending(io);
714 clone_init(io, clone);
718 clone->bi_sector = cc->start + io->sector;
725 static void kcryptd_io_write(struct dm_crypt_io *io)
727 struct bio *clone = io->ctx.bio_out;
733 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
735 if (bio_data_dir(io->base_bio) == READ)
736 kcryptd_io_read(io);
738 kcryptd_io_write(io);
741 static void kcryptd_queue_io(struct dm_crypt_io *io)
743 struct crypt_config *cc = io->target->private;
745 INIT_WORK(&io->work, kcryptd_io);
746 queue_work(cc->io_queue, &io->work);
749 static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io,
752 struct bio *clone = io->ctx.bio_out;
753 struct crypt_config *cc = io->target->private;
758 io->error = -EIO;
759 crypt_dec_pending(io);
764 BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
766 clone->bi_sector = cc->start + io->sector;
769 kcryptd_queue_io(io);
774 static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
776 struct crypt_config *cc = io->target->private;
781 unsigned remaining = io->base_bio->bi_size;
782 sector_t sector = io->sector;
786 * Prevent io from disappearing until this function completes.
788 crypt_inc_pending(io);
789 crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
796 clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
798 io->error = -ENOMEM;
802 io->ctx.bio_out = clone;
803 io->ctx.idx_out = 0;
808 crypt_inc_pending(io);
809 r = crypt_convert(cc, &io->ctx);
810 crypt_finished = atomic_dec_and_test(&io->ctx.pending);
812 /* Encryption was already finished, submit io now */
814 kcryptd_crypt_write_io_submit(io, r, 0);
823 io->sector = sector;
828 * But don't wait if split was due to the io size restriction
838 new_io = crypt_io_alloc(io->target, io->base_bio,
842 io->base_bio, sector);
843 new_io->ctx.idx_in = io->ctx.idx_in;
844 new_io->ctx.offset_in = io->ctx.offset_in;
850 if (!io->base_io)
851 new_io->base_io = io;
853 new_io->base_io = io->base_io;
854 crypt_inc_pending(io->base_io);
855 crypt_dec_pending(io);
858 io = new_io;
862 crypt_dec_pending(io);
865 static void kcryptd_crypt_read_done(struct dm_crypt_io *io, int error)
868 io->error = -EIO;
870 crypt_dec_pending(io);
873 static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
875 struct crypt_config *cc = io->target->private;
878 crypt_inc_pending(io);
880 crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
881 io->sector);
883 r = crypt_convert(cc, &io->ctx);
885 if (atomic_dec_and_test(&io->ctx.pending))
886 kcryptd_crypt_read_done(io, r);
888 crypt_dec_pending(io);
896 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
897 struct crypt_config *cc = io->target->private;
909 if (bio_data_dir(io->base_bio) == READ)
910 kcryptd_crypt_read_done(io, error);
912 kcryptd_crypt_write_io_submit(io, error, 1);
917 struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
919 if (bio_data_dir(io->base_bio) == READ)
920 kcryptd_crypt_read_convert(io);
922 kcryptd_crypt_write_convert(io);
925 static void kcryptd_queue_crypt(struct dm_crypt_io *io)
927 struct crypt_config *cc = io->target->private;
929 INIT_WORK(&io->work, kcryptd_crypt);
930 queue_work(cc->crypt_queue, &io->work);
1206 ti->error = "Cannot allocate crypt io mempool";
1257 ti->error = "Couldn't create kcryptd io queue";
1278 struct dm_crypt_io *io;
1287 io = crypt_io_alloc(ti, bio, dm_target_offset(ti, bio->bi_sector));
1289 if (bio_data_dir(io->base_bio) == READ)
1290 kcryptd_queue_io(io);
1292 kcryptd_queue_crypt(io);