Lines Matching defs:bio

16 #include <linux/bio.h>
111 struct bio *bio;
118 * bio that belong to a compressed cluster yet.
125 * Update and unlock a bio's pages, and free the bio.
127 * This marks pages up-to-date only if there was no error in the bio (I/O error,
128 * decryption error, or verity error), as indicated by bio->bi_status.
132 * cluster basis rather than a per-bio basis. Instead, we only must do two
136 * release the bio's reference to the decompress_io_ctx of the page's cluster.
138 static void f2fs_finish_read_bio(struct bio *bio, bool in_task)
142 struct bio_post_read_ctx *ctx = bio->bi_private;
144 bio_for_each_segment_all(bv, bio, iter_all) {
155 if (bio->bi_status)
165 bio_put(bio);
172 struct bio *bio = ctx->bio;
183 bio->bi_private = NULL;
186 * Verify the bio's pages with fs-verity. Exclude compressed pages,
193 bio_for_each_segment_all(bv, bio, iter_all) {
198 bio->bi_status = BLK_STS_IOERR;
203 fsverity_verify_bio(bio);
206 f2fs_finish_read_bio(bio, true);
210 * If the bio's data needs to be verified with fs-verity, then enqueue the
211 * verity work for the bio. Otherwise finish the bio now.
218 static void f2fs_verify_and_finish_bio(struct bio *bio, bool in_task)
220 struct bio_post_read_ctx *ctx = bio->bi_private;
226 f2fs_finish_read_bio(bio, in_task);
232 * remaining page was read by @ctx->bio.
234 * Note that a bio may span clusters (even a mix of compressed and uncompressed
236 * that the bio includes at least one compressed page. The actual decompression
237 * is done on a per-cluster basis, not a per-bio basis.
247 bio_for_each_segment_all(bv, ctx->bio, iter_all) {
262 * Optimization: if all the bio's pages are compressed, then scheduling
263 * the per-bio verity work is unnecessary, as verity will be fully
274 struct bio *bio = ctx->bio;
276 if ((ctx->enabled_steps & STEP_DECRYPT) && !fscrypt_decrypt_bio(bio)) {
277 f2fs_finish_read_bio(bio, true);
284 f2fs_verify_and_finish_bio(bio, true);
287 static void f2fs_read_end_io(struct bio *bio)
289 struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
293 iostat_update_and_unbind_ctx(bio);
294 ctx = bio->bi_private;
297 bio->bi_status = BLK_STS_IOERR;
299 if (bio->bi_status) {
300 f2fs_finish_read_bio(bio, intask);
322 f2fs_verify_and_finish_bio(bio, intask);
325 static void f2fs_write_end_io(struct bio *bio)
331 iostat_update_and_unbind_ctx(bio);
332 sbi = bio->bi_private;
335 bio->bi_status = BLK_STS_IOERR;
337 bio_for_each_segment_all(bvec, bio, iter_all) {
345 f2fs_compress_write_end_io(bio, page);
350 if (unlikely(bio->bi_status)) {
370 bio_put(bio);
374 static void f2fs_zone_write_end_io(struct bio *bio)
376 struct f2fs_bio_info *io = (struct f2fs_bio_info *)bio->bi_private;
378 bio->bi_private = io->bi_private;
380 f2fs_write_end_io(bio);
450 static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
455 struct bio *bio;
458 bio = bio_alloc_bioset(bdev, npages,
461 bio->bi_iter.bi_sector = sector;
463 bio->bi_end_io = f2fs_read_end_io;
464 bio->bi_private = NULL;
466 bio->bi_end_io = f2fs_write_end_io;
467 bio->bi_private = sbi;
469 iostat_alloc_and_bind_ctx(sbi, bio, NULL);
472 wbc_init_bio(fio->io_wbc, bio);
474 return bio;
477 static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
487 fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask);
490 static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
499 return !bio_has_crypt_ctx(bio);
501 return fscrypt_mergeable_bio(bio, inode, next_idx);
504 void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio,
507 WARN_ON_ONCE(!is_read_io(bio_op(bio)));
508 trace_f2fs_submit_read_bio(sbi->sb, type, bio);
510 iostat_update_submit_ctx(bio, type);
511 submit_bio(bio);
514 static void f2fs_submit_write_bio(struct f2fs_sb_info *sbi, struct bio *bio,
517 WARN_ON_ONCE(is_read_io(bio_op(bio)));
522 trace_f2fs_submit_write_bio(sbi->sb, type, bio);
523 iostat_update_submit_ctx(bio, type);
524 submit_bio(bio);
531 if (!io->bio)
535 trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
536 f2fs_submit_read_bio(io->sbi, io->bio, fio->type);
538 trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
539 f2fs_submit_write_bio(io->sbi, io->bio, fio->type);
541 io->bio = NULL;
544 static bool __has_merged_page(struct bio *bio, struct inode *inode,
550 if (!bio)
556 bio_for_each_segment_all(bvec, bio, iter_all) {
598 sbi->write_io[i][j].bio = NULL;
622 if (!io->bio)
628 io->bio->bi_opf |= REQ_META | REQ_PRIO | REQ_SYNC;
630 io->bio->bi_opf |= REQ_PREFLUSH | REQ_FUA;
650 ret = __has_merged_page(io->bio, inode, page, ino);
687 struct bio *bio;
698 /* Allocate a new bio */
699 bio = __bio_alloc(fio, 1);
701 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
704 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
705 bio_put(bio);
715 if (is_read_io(bio_op(bio)))
716 f2fs_submit_read_bio(fio->sbi, bio, fio->type);
718 f2fs_submit_write_bio(fio->sbi, bio, fio->type);
722 static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
726 bio->bi_iter.bi_size >= sbi->max_io_bytes))
730 return bio->bi_bdev == f2fs_target_device(sbi, cur_blkaddr, NULL);
741 static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
747 if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr))
752 static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
759 be->bio = bio;
760 bio_get(bio);
762 if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE)
776 static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
791 if (be->bio != *bio)
796 f2fs_bug_on(sbi, !page_is_mergeable(sbi, *bio,
799 if (f2fs_crypt_mergeable_bio(*bio,
802 bio_add_page(*bio, page, PAGE_SIZE, 0) ==
808 /* page can't be merged into bio; submit the bio */
810 f2fs_submit_write_bio(sbi, *bio, DATA);
817 bio_put(*bio);
818 *bio = NULL;
825 struct bio **bio, struct page *page)
829 struct bio *target = bio ? *bio : NULL;
844 found = (target == be->bio);
846 found = __has_merged_page(be->bio, NULL,
861 found = (target == be->bio);
863 found = __has_merged_page(be->bio, NULL,
866 target = be->bio;
876 if (bio && *bio) {
877 bio_put(*bio);
878 *bio = NULL;
884 struct bio *bio = *fio->bio;
894 if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
896 f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL);
898 if (!bio) {
899 bio = __bio_alloc(fio, BIO_MAX_VECS);
900 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
903 add_bio_entry(fio->sbi, bio, page, fio->temp);
905 if (add_ipu_page(fio, &bio, page))
915 *fio->bio = bio;
988 if (io->bio &&
989 (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
991 !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host,
995 if (io->bio == NULL) {
996 io->bio = __bio_alloc(fio, BIO_MAX_VECS);
997 f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
1002 if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
1016 bio_get(io->bio);
1018 io->bi_private = io->bio->bi_private;
1019 io->bio->bi_private = io;
1020 io->bio->bi_end_io = f2fs_zone_write_end_io;
1021 io->zone_pending_bio = io->bio;
1034 static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
1039 struct bio *bio;
1045 bio = bio_alloc_bioset(bdev, bio_max_segs(nr_pages),
1048 if (!bio)
1050 bio->bi_iter.bi_sector = sector;
1051 f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS);
1052 bio->bi_end_io = f2fs_read_end_io;
1070 ctx->bio = bio;
1075 bio->bi_private = ctx;
1077 iostat_alloc_and_bind_ctx(sbi, bio, ctx);
1079 return bio;
1088 struct bio *bio;
1090 bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
1092 if (IS_ERR(bio))
1093 return PTR_ERR(bio);
1098 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
1099 iostat_update_and_unbind_ctx(bio);
1100 if (bio->bi_private)
1101 mempool_free(bio->bi_private, bio_post_read_ctx_pool);
1102 bio_put(bio);
1107 f2fs_submit_read_bio(sbi, bio, DATA);
2048 struct bio **bio_ret,
2052 struct bio *bio = *bio_ret;
2116 if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio,
2118 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
2120 f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
2121 bio = NULL;
2123 if (bio == NULL) {
2124 bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
2127 if (IS_ERR(bio)) {
2128 ret = PTR_ERR(bio);
2129 bio = NULL;
2140 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2148 *bio_ret = bio;
2153 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
2160 struct bio *bio = *bio_ret;
2268 if (bio && (!page_is_mergeable(sbi, bio,
2270 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
2272 f2fs_submit_read_bio(sbi, bio, DATA);
2273 bio = NULL;
2276 if (!bio) {
2277 bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
2280 if (IS_ERR(bio)) {
2281 ret = PTR_ERR(bio);
2289 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2292 ctx = get_post_read_ctx(bio);
2304 *bio_ret = bio;
2317 *bio_ret = bio;
2329 struct bio *bio = NULL;
2368 ret = f2fs_read_multi_pages(&cc, &bio,
2405 &bio, &last_block_in_bio, rac);
2423 ret = f2fs_read_multi_pages(&cc, &bio,
2432 if (bio)
2433 f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
2733 struct bio **bio,
2765 .bio = bio,
2881 if (bio && *bio)
2882 f2fs_submit_merged_ipu_write(sbi, bio, NULL);
2942 struct bio *bio = NULL;
3134 &submitted, &bio, &last_block,
3205 /* submit cached bio of IPU write */
3206 if (bio)
3207 f2fs_submit_merged_ipu_write(sbi, &bio, NULL);