Lines Matching defs:bio

8 #include <linux/bio.h>
41 * bio_copy_from_iter - copy all pages from iov_iter to bio
42 * @bio: The &struct bio which describes the I/O as destination
45 * Copy all pages from iov_iter to bio.
48 static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
53 bio_for_each_segment_all(bvec, bio, iter_all) {
72 * bio_copy_to_iter - copy all pages from bio to iov_iter
73 * @bio: The &struct bio which describes the I/O as source
76 * Copy all pages from bio to iov_iter.
79 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
84 bio_for_each_segment_all(bvec, bio, iter_all) {
103 * bio_uncopy_user - finish previously mapped bio
104 * @bio: bio being terminated
109 static int bio_uncopy_user(struct bio *bio)
111 struct bio_map_data *bmd = bio->bi_private;
122 else if (bio_data_dir(bio) == READ)
123 ret = bio_copy_to_iter(bio, bmd->iter);
125 bio_free_pages(bio);
136 struct bio *bio;
157 bio = bio_kmalloc(nr_pages, gfp_mask);
158 if (!bio)
160 bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, req_op(rq));
192 if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) {
203 map_data->offset += bio->bi_iter.bi_size;
210 ret = bio_copy_from_iter(bio, iter);
218 ret = bio_copy_from_iter(bio, &iter2);
223 zero_fill_bio(bio);
224 iov_iter_advance(iter, bio->bi_iter.bi_size);
227 bio->bi_private = bmd;
229 ret = blk_rq_append_bio(rq, bio);
235 bio_free_pages(bio);
236 bio_uninit(bio);
237 kfree(bio);
243 static void blk_mq_map_bio_put(struct bio *bio)
245 if (bio->bi_opf & REQ_ALLOC_CACHE) {
246 bio_put(bio);
248 bio_uninit(bio);
249 kfree(bio);
253 static struct bio *blk_rq_map_bio_alloc(struct request *rq,
256 struct bio *bio;
259 bio = bio_alloc_bioset(NULL, nr_vecs, rq->cmd_flags, gfp_mask,
261 if (!bio)
264 bio = bio_kmalloc(nr_vecs, gfp_mask);
265 if (!bio)
267 bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq));
269 return bio;
278 struct bio *bio;
285 bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask);
286 if (bio == NULL)
292 bio_set_flag(bio, BIO_PAGE_PINNED);
324 if (!bio_add_hw_page(rq->q, bio, page, n, offs,
329 bio_release_page(bio, page);
335 * release the pages we didn't map into the bio, if any
338 bio_release_page(bio, pages[j++]);
341 /* couldn't stuff something into bio? */
348 ret = blk_rq_append_bio(rq, bio);
354 bio_release_pages(bio, false);
355 blk_mq_map_bio_put(bio);
359 static void bio_invalidate_vmalloc_pages(struct bio *bio)
362 if (bio->bi_private && !op_is_write(bio_op(bio))) {
365 for (i = 0; i < bio->bi_vcnt; i++)
366 len += bio->bi_io_vec[i].bv_len;
367 invalidate_kernel_vmap_range(bio->bi_private, len);
372 static void bio_map_kern_endio(struct bio *bio)
374 bio_invalidate_vmalloc_pages(bio);
375 bio_uninit(bio);
376 kfree(bio);
380 * bio_map_kern - map kernel address into bio
381 * @q: the struct request_queue for the bio
384 * @gfp_mask: allocation flags for bio allocation
386 * Map the kernel address into a bio suitable for io to a block
389 static struct bio *bio_map_kern(struct request_queue *q, void *data,
399 struct bio *bio;
401 bio = bio_kmalloc(nr_pages, gfp_mask);
402 if (!bio)
404 bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0);
408 bio->bi_private = data;
425 if (bio_add_pc_page(q, bio, page, bytes,
428 bio_uninit(bio);
429 kfree(bio);
438 bio->bi_end_io = bio_map_kern_endio;
439 return bio;
442 static void bio_copy_kern_endio(struct bio *bio)
444 bio_free_pages(bio);
445 bio_uninit(bio);
446 kfree(bio);
449 static void bio_copy_kern_endio_read(struct bio *bio)
451 char *p = bio->bi_private;
455 bio_for_each_segment_all(bvec, bio, iter_all) {
460 bio_copy_kern_endio(bio);
464 * bio_copy_kern - copy kernel address into bio
465 * @q: the struct request_queue for the bio
468 * @gfp_mask: allocation flags for bio and page allocation
471 * copy the kernel address into a bio suitable for io to a block
474 static struct bio *bio_copy_kern(struct request_queue *q, void *data,
480 struct bio *bio;
491 bio = bio_kmalloc(nr_pages, gfp_mask);
492 if (!bio)
494 bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0);
510 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
518 bio->bi_end_io = bio_copy_kern_endio_read;
519 bio->bi_private = data;
521 bio->bi_end_io = bio_copy_kern_endio;
524 return bio;
527 bio_free_pages(bio);
528 bio_uninit(bio);
529 kfree(bio);
534 * Append a bio to a passthrough request. Only works if the bio can be merged
537 int blk_rq_append_bio(struct request *rq, struct bio *bio)
543 bio_for_each_bvec(bv, bio, iter)
546 if (!rq->bio) {
547 blk_rq_bio_prep(rq, bio, nr_segs);
549 if (!ll_back_merge_fn(rq, bio, nr_segs))
551 rq->biotail->bi_next = bio;
552 rq->biotail = bio;
553 rq->__data_len += (bio)->bi_iter.bi_size;
554 bio_crypt_free_ctx(bio);
561 /* Prepare bio for passthrough IO given ITER_BVEC iter */
570 struct bio *bio;
579 bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL);
580 if (bio == NULL)
583 bio_iov_bvec_set(bio, (struct iov_iter *)iter);
584 blk_rq_bio_prep(rq, bio, nr_segs);
596 blk_mq_map_bio_put(bio);
613 blk_mq_map_bio_put(bio);
638 struct bio *bio = NULL;
673 if (!bio)
674 bio = rq->bio;
680 blk_rq_unmap_user(bio);
682 rq->bio = NULL;
739 * @bio: start of bio list
743 * supply the original rq->bio from the blk_rq_map_user() return, since
744 * the I/O completion may have changed rq->bio.
746 int blk_rq_unmap_user(struct bio *bio)
748 struct bio *next_bio;
751 while (bio) {
752 if (bio->bi_private) {
753 ret2 = bio_uncopy_user(bio);
757 bio_release_pages(bio, bio_data_dir(bio) == READ);
760 next_bio = bio;
761 bio = bio->bi_next;
787 struct bio *bio;
797 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
799 bio = bio_map_kern(q, kbuf, len, gfp_mask);
801 if (IS_ERR(bio))
802 return PTR_ERR(bio);
804 bio->bi_opf &= ~REQ_OP_MASK;
805 bio->bi_opf |= req_op(rq);
807 ret = blk_rq_append_bio(rq, bio);
809 bio_uninit(bio);
810 kfree(bio);