Lines Matching refs:bio

7 #include <linux/bio.h>
31 struct bio *free_list;
32 struct bio *free_list_irq;
67 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
92 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size);
114 return bs->front_pad + sizeof(struct bio) + bs->back_pad;
143 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
213 void bio_uninit(struct bio *bio)
216 if (bio->bi_blkg) {
217 blkg_put(bio->bi_blkg);
218 bio->bi_blkg = NULL;
221 if (bio_integrity(bio))
222 bio_integrity_free(bio);
224 bio_crypt_free_ctx(bio);
228 static void bio_free(struct bio *bio)
230 struct bio_set *bs = bio->bi_pool;
231 void *p = bio;
235 bio_uninit(bio);
236 bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs);
241 * Users of this function have their own bio allocation. Subsequently,
243 * when IO has completed, or when the bio is released.
245 void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
248 bio->bi_next = NULL;
249 bio->bi_bdev = bdev;
250 bio->bi_opf = opf;
251 bio->bi_flags = 0;
252 bio->bi_ioprio = 0;
253 bio->bi_write_hint = 0;
254 bio->bi_status = 0;
255 bio->bi_iter.bi_sector = 0;
256 bio->bi_iter.bi_size = 0;
257 bio->bi_iter.bi_idx = 0;
258 bio->bi_iter.bi_bvec_done = 0;
259 bio->bi_end_io = NULL;
260 bio->bi_private = NULL;
262 bio->bi_blkg = NULL;
263 bio->bi_issue.value = 0;
265 bio_associate_blkg(bio);
267 bio->bi_iocost_cost = 0;
271 bio->bi_crypt_context = NULL;
274 bio->bi_integrity = NULL;
276 bio->bi_vcnt = 0;
278 atomic_set(&bio->__bi_remaining, 1);
279 atomic_set(&bio->__bi_cnt, 1);
280 bio->bi_cookie = BLK_QC_T_NONE;
282 bio->bi_max_vecs = max_vecs;
283 bio->bi_io_vec = table;
284 bio->bi_pool = NULL;
289 * bio_reset - reinitialize a bio
290 * @bio: bio to reset
291 * @bdev: block device to use the bio for
292 * @opf: operation and flags for bio
295 * After calling bio_reset(), @bio will be in the same state as a freshly
296 * allocated bio returned bio bio_alloc_bioset() - the only fields that are
298 * comment in struct bio.
300 void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf)
302 bio_uninit(bio);
303 memset(bio, 0, BIO_RESET_BYTES);
304 atomic_set(&bio->__bi_remaining, 1);
305 bio->bi_bdev = bdev;
306 if (bio->bi_bdev)
307 bio_associate_blkg(bio);
308 bio->bi_opf = opf;
312 static struct bio *__bio_chain_endio(struct bio *bio)
314 struct bio *parent = bio->bi_private;
316 if (bio->bi_status && !parent->bi_status)
317 parent->bi_status = bio->bi_status;
318 bio_put(bio);
322 static void bio_chain_endio(struct bio *bio)
324 bio_endio(__bio_chain_endio(bio));
328 * bio_chain - chain bio completions
329 * @bio: the target bio
330 * @parent: the parent bio of @bio
332 * The caller won't have a bi_end_io called when @bio completes - instead,
333 * @parent's bi_end_io won't be called until both @parent and @bio have
334 * completed; the chained bio will also be freed when it completes.
336 * The caller must not set bi_private or bi_end_io in @bio.
338 void bio_chain(struct bio *bio, struct bio *parent)
340 BUG_ON(bio->bi_private || bio->bi_end_io);
342 bio->bi_private = parent;
343 bio->bi_end_io = bio_chain_endio;
348 struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
351 struct bio *new = bio_alloc(bdev, nr_pages, opf, gfp);
353 if (bio) {
354 bio_chain(bio, new);
355 submit_bio(bio);
365 struct bio *bio;
369 bio = bio_list_pop(&bs->rescue_list);
372 if (!bio)
375 submit_bio_noacct(bio);
382 struct bio *bio;
388 * were allocated from this bio_set; otherwise, if there was a bio on
393 * Since bio lists are singly linked, pop them all instead of trying to
400 while ((bio = bio_list_pop(&current->bio_list[0])))
401 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
405 while ((bio = bio_list_pop(&current->bio_list[1])))
406 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
432 static struct bio *bio_alloc_percpu_cache(struct block_device *bdev,
437 struct bio *bio;
448 bio = cache->free_list;
449 cache->free_list = bio->bi_next;
453 bio_init(bio, bdev, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs, opf);
454 bio->bi_pool = bs;
455 return bio;
459 * bio_alloc_bioset - allocate a bio for I/O
460 * @bdev: block device to allocate the bio for (can be %NULL)
462 * @opf: operation and flags for bio
466 * Allocate a bio from the mempools in @bs.
469 * allocate a bio. This is due to the mempool guarantees. To make this work,
470 * callers must never allocate more than 1 bio at a time from the general pool.
471 * Callers that need to allocate more than 1 bio must always submit the
472 * previously allocated bio for IO before attempting to allocate a new one.
488 * for per bio allocations.
490 * Returns: Pointer to new bio on success, NULL on failure.
492 struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
497 struct bio *bio;
506 bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf,
508 if (bio)
509 return bio;
511 * No cached bio available, bio returned below marked with
554 bio = p + bs->front_pad;
567 bio_init(bio, bdev, bvl, nr_vecs, opf);
569 bio_init(bio, bdev, bio->bi_inline_vecs, BIO_INLINE_VECS, opf);
571 bio_init(bio, bdev, NULL, 0, opf);
574 bio->bi_pool = bs;
575 return bio;
584 * bio_kmalloc - kmalloc a bio
588 * Use kmalloc to allocate a bio (including bvecs). The bio must be initialized
589 * using bio_init() before use. To free a bio returned from this function use
590 * kfree() after calling bio_uninit(). A bio returned from this function can
597 * Returns: Pointer to new bio on success, NULL on failure.
599 struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask)
601 struct bio *bio;
605 return kmalloc(struct_size(bio, bi_inline_vecs, nr_vecs), gfp_mask);
609 void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
614 __bio_for_each_segment(bv, bio, iter, start)
620 * bio_truncate - truncate the bio to small size of @new_size
621 * @bio: the bio to be truncated
622 * @new_size: new size for truncating the bio
625 * Truncate the bio to new size of @new_size. If bio_op(bio) is
627 * be used for handling corner cases, such as bio eod.
629 static void bio_truncate(struct bio *bio, unsigned new_size)
636 if (new_size >= bio->bi_iter.bi_size)
639 if (bio_op(bio) != REQ_OP_READ)
642 bio_for_each_segment(bv, bio, iter) {
660 * fs bio user has to retrieve all pages via bio_for_each_segment_all
663 * It is enough to truncate bio by updating .bi_size since we can make
666 bio->bi_iter.bi_size = new_size;
671 * @bio: bio to truncate
676 * We'll just truncate the bio to the size of the device, and clear the end of
681 void guard_bio_eod(struct bio *bio)
683 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
693 if (unlikely(bio->bi_iter.bi_sector >= maxsector))
696 maxsector -= bio->bi_iter.bi_sector;
697 if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
700 bio_truncate(bio, maxsector << 9);
707 struct bio *bio;
709 while ((bio = cache->free_list) != NULL) {
710 cache->free_list = bio->bi_next;
712 bio_free(bio);
760 static inline void bio_put_percpu_cache(struct bio *bio)
764 cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu());
769 bio_uninit(bio);
770 bio->bi_next = cache->free_list;
772 bio->bi_bdev = NULL;
773 cache->free_list = bio;
778 bio_uninit(bio);
779 bio->bi_next = cache->free_list_irq;
780 cache->free_list_irq = bio;
789 bio_free(bio);
793 * bio_put - release a reference to a bio
794 * @bio: bio to release reference to
797 * Put a reference to a &struct bio, either one you have gotten with
798 * bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
800 void bio_put(struct bio *bio)
802 if (unlikely(bio_flagged(bio, BIO_REFFED))) {
803 BUG_ON(!atomic_read(&bio->__bi_cnt));
804 if (!atomic_dec_and_test(&bio->__bi_cnt))
807 if (bio->bi_opf & REQ_ALLOC_CACHE)
808 bio_put_percpu_cache(bio);
810 bio_free(bio);
814 static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp)
816 bio_set_flag(bio, BIO_CLONED);
817 bio->bi_ioprio = bio_src->bi_ioprio;
818 bio->bi_write_hint = bio_src->bi_write_hint;
819 bio->bi_iter = bio_src->bi_iter;
821 if (bio->bi_bdev) {
822 if (bio->bi_bdev == bio_src->bi_bdev &&
824 bio_set_flag(bio, BIO_REMAPPED);
825 bio_clone_blkg_association(bio, bio_src);
828 if (bio_crypt_clone(bio, bio_src, gfp) < 0)
831 bio_integrity_clone(bio, bio_src, gfp) < 0)
837 * bio_alloc_clone - clone a bio that shares the original bio's biovec
839 * @bio_src: bio to clone from
843 * Allocate a new bio that is a clone of @bio_src. The caller owns the returned
844 * bio, but not the actual data it points to.
846 * The caller must ensure that the return bio is not freed before @bio_src.
848 struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
851 struct bio *bio;
853 bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs);
854 if (!bio)
857 if (__bio_clone(bio, bio_src, gfp) < 0) {
858 bio_put(bio);
861 bio->bi_io_vec = bio_src->bi_io_vec;
863 return bio;
868 * bio_init_clone - clone a bio that shares the original bio's biovec
870 * @bio: bio to clone into
871 * @bio_src: bio to clone from
874 * Initialize a new bio in caller provided memory that is a clone of @bio_src.
875 * The caller owns the returned bio, but not the actual data it points to.
877 * The caller must ensure that @bio_src is not freed before @bio.
879 int bio_init_clone(struct block_device *bdev, struct bio *bio,
880 struct bio *bio_src, gfp_t gfp)
884 bio_init(bio, bdev, bio_src->bi_io_vec, 0, bio_src->bi_opf);
885 ret = __bio_clone(bio, bio_src, gfp);
887 bio_uninit(bio);
893 * bio_full - check if the bio is full
894 * @bio: bio to check
897 * Return true if @bio is full and one segment with @len bytes can't be
898 * added to the bio, otherwise return false
900 static inline bool bio_full(struct bio *bio, unsigned len)
902 if (bio->bi_vcnt >= bio->bi_max_vecs)
904 if (bio->bi_iter.bi_size > UINT_MAX - len)
956 * bio_add_hw_page - attempt to add a page to a bio with hw constraints
958 * @bio: destination bio
965 * Add a page to a bio while respecting the hardware max_sectors, max_segment
968 int bio_add_hw_page(struct request_queue *q, struct bio *bio,
974 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
978 if (len > max_size - bio->bi_iter.bi_size)
981 if (bio->bi_vcnt > 0) {
982 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
986 bio->bi_iter.bi_size += len;
990 if (bio->bi_vcnt >=
991 min(bio->bi_max_vecs, queue_max_segments(q)))
1002 bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, offset);
1003 bio->bi_vcnt++;
1004 bio->bi_iter.bi_size += len;
1009 * bio_add_pc_page - attempt to add page to passthrough bio
1011 * @bio: destination bio
1017 * number of reasons, such as the bio being full or target block device
1018 * limitations. The target block device must allow bio's up to PAGE_SIZE,
1019 * so it is always possible to add a single page to an empty bio.
1023 int bio_add_pc_page(struct request_queue *q, struct bio *bio,
1027 return bio_add_hw_page(q, bio, page, len, offset,
1033 * bio_add_zone_append_page - attempt to add page to zone-append bio
1034 * @bio: destination bio
1039 * Attempt to add a page to the bio_vec maplist of a bio that will be submitted
1041 * bio being full or the target block device is not a zoned block device or
1043 * allow bio's up to PAGE_SIZE, so it is always possible to add a single page
1044 * to an empty bio.
1046 * Returns: number of bytes added to the bio, or 0 in case of a failure.
1048 int bio_add_zone_append_page(struct bio *bio, struct page *page,
1051 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1054 if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND))
1057 if (WARN_ON_ONCE(!bdev_is_zoned(bio->bi_bdev)))
1060 return bio_add_hw_page(q, bio, page, len, offset,
1066 * __bio_add_page - add page(s) to a bio in a new segment
1067 * @bio: destination bio
1072 * Add the data at @page + @off to @bio as a new bvec. The caller must ensure
1073 * that @bio has space for another bvec.
1075 void __bio_add_page(struct bio *bio, struct page *page,
1078 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
1079 WARN_ON_ONCE(bio_full(bio, len));
1081 bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, off);
1082 bio->bi_iter.bi_size += len;
1083 bio->bi_vcnt++;
1088 * bio_add_page - attempt to add page(s) to bio
1089 * @bio: destination bio
1095 * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
1097 int bio_add_page(struct bio *bio, struct page *page,
1102 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
1104 if (bio->bi_iter.bi_size > UINT_MAX - len)
1107 if (bio->bi_vcnt > 0 &&
1108 bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1],
1110 bio->bi_iter.bi_size += len;
1114 if (bio->bi_vcnt >= bio->bi_max_vecs)
1116 __bio_add_page(bio, page, len, offset);
1121 void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len,
1126 __bio_add_page(bio, &folio->page, len, off);
1130 * bio_add_folio - Attempt to add part of a folio to a bio.
1131 * @bio: BIO to add to.
1143 bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len,
1148 return bio_add_page(bio, &folio->page, len, off) > 0;
1152 void __bio_release_pages(struct bio *bio, bool mark_dirty)
1156 bio_for_each_folio_all(fi, bio) {
1169 bio_release_page(bio, page++);
1175 void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
1179 WARN_ON_ONCE(bio->bi_max_vecs);
1181 if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
1182 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1188 bio->bi_vcnt = iter->nr_segs;
1189 bio->bi_io_vec = (struct bio_vec *)iter->bvec;
1190 bio->bi_iter.bi_bvec_done = iter->iov_offset;
1191 bio->bi_iter.bi_size = size;
1192 bio_set_flag(bio, BIO_CLONED);
1195 static int bio_iov_add_page(struct bio *bio, struct page *page,
1200 if (WARN_ON_ONCE(bio->bi_iter.bi_size > UINT_MAX - len))
1203 if (bio->bi_vcnt > 0 &&
1204 bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1],
1206 bio->bi_iter.bi_size += len;
1208 bio_release_page(bio, page);
1211 __bio_add_page(bio, page, len, offset);
1215 static int bio_iov_add_zone_append_page(struct bio *bio, struct page *page,
1218 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1221 if (bio_add_hw_page(q, bio, page, len, offset,
1225 bio_release_page(bio, page);
1232 * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
1233 * @bio: bio to add pages to
1236 * Extracts pages from *iter and appends them to @bio's bvec array. The pages
1241 static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
1244 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
1245 unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
1246 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
1254 * Move page array up in the allocated memory for the bio vecs as far as
1261 if (bio->bi_bdev && blk_queue_pci_p2pdma(bio->bi_bdev->bd_disk->queue))
1268 * result to ensure the bio's total size is correct. The remainder of
1269 * the iov data will be picked up in the next bio iteration.
1272 UINT_MAX - bio->bi_iter.bi_size,
1279 if (bio->bi_bdev) {
1280 size_t trim = size & (bdev_logical_block_size(bio->bi_bdev) - 1);
1294 if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
1295 ret = bio_iov_add_zone_append_page(bio, page, len,
1300 bio_iov_add_page(bio, page, len, offset);
1308 bio_release_page(bio, pages[i++]);
1314 * bio_iov_iter_get_pages - add user or kernel pages to a bio
1315 * @bio: bio to add pages to
1325 * -EIOCBQUEUED. The caller needs to check if the bio is flagged BIO_NO_PAGE_REF
1329 * fit into the bio, or are requested in @iter, whatever is smaller. If
1333 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
1337 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
1341 bio_iov_bvec_set(bio, iter);
1342 iov_iter_advance(iter, bio->bi_iter.bi_size);
1347 bio_set_flag(bio, BIO_PAGE_PINNED);
1349 ret = __bio_iov_iter_get_pages(bio, iter);
1350 } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
1352 return bio->bi_vcnt ? 0 : ret;
1356 static void submit_bio_wait_endio(struct bio *bio)
1358 complete(bio->bi_private);
1362 * submit_bio_wait - submit a bio, and wait until it completes
1363 * @bio: The &struct bio which describes the I/O
1369 * result in bio reference to be consumed. The caller must drop the reference
1372 int submit_bio_wait(struct bio *bio)
1375 bio->bi_bdev->bd_disk->lockdep_map);
1377 bio->bi_private = &done;
1378 bio->bi_end_io = submit_bio_wait_endio;
1379 bio->bi_opf |= REQ_SYNC;
1380 submit_bio(bio);
1383 return blk_status_to_errno(bio->bi_status);
1387 void __bio_advance(struct bio *bio, unsigned bytes)
1389 if (bio_integrity(bio))
1390 bio_integrity_advance(bio, bytes);
1392 bio_crypt_advance(bio, bytes);
1393 bio_advance_iter(bio, &bio->bi_iter, bytes);
1397 void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
1398 struct bio *src, struct bvec_iter *src_iter)
1419 * bio_copy_data - copy contents of data buffers from one bio to another
1420 * @src: source bio
1421 * @dst: destination bio
1426 void bio_copy_data(struct bio *dst, struct bio *src)
1435 void bio_free_pages(struct bio *bio)
1440 bio_for_each_segment_all(bvec, bio, iter_all)
1462 * deferred bio dirtying paths.
1466 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1468 void bio_set_pages_dirty(struct bio *bio)
1472 bio_for_each_folio_all(fi, bio) {
1495 static struct bio *bio_dirty_list;
1502 struct bio *bio, *next;
1509 while ((bio = next) != NULL) {
1510 next = bio->bi_private;
1512 bio_release_pages(bio, true);
1513 bio_put(bio);
1517 void bio_check_pages_dirty(struct bio *bio)
1522 bio_for_each_folio_all(fi, bio) {
1527 bio_release_pages(bio, false);
1528 bio_put(bio);
1532 bio->bi_private = bio_dirty_list;
1533 bio_dirty_list = bio;
1539 static inline bool bio_remaining_done(struct bio *bio)
1545 if (!bio_flagged(bio, BIO_CHAIN))
1548 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1550 if (atomic_dec_and_test(&bio->__bi_remaining)) {
1551 bio_clear_flag(bio, BIO_CHAIN);
1559 * bio_endio - end I/O on a bio
1560 * @bio: bio
1563 * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
1564 * way to end I/O on a bio. No one should call bi_end_io() directly on a
1565 * bio unless they own it and thus know that it has an end_io function.
1567 * bio_endio() can be called several times on a bio that has been chained
1571 void bio_endio(struct bio *bio)
1574 if (!bio_remaining_done(bio))
1576 if (!bio_integrity_endio(bio))
1579 rq_qos_done_bio(bio);
1581 if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
1582 trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio);
1583 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1594 if (bio->bi_end_io == bio_chain_endio) {
1595 bio = __bio_chain_endio(bio);
1599 blk_throtl_bio_endio(bio);
1601 bio_uninit(bio);
1602 if (bio->bi_end_io)
1603 bio->bi_end_io(bio);
1608 * bio_split - split a bio
1609 * @bio: bio to split
1610 * @sectors: number of sectors to split from the front of @bio
1612 * @bs: bio set to allocate from
1614 * Allocates and returns a new bio which represents @sectors from the start of
1615 * @bio, and updates @bio to represent the remaining sectors.
1617 * Unless this is a discard request the newly allocated bio will point
1618 * to @bio's bi_io_vec. It is the caller's responsibility to ensure that
1619 * neither @bio nor @bs are freed before the split bio.
1621 struct bio *bio_split(struct bio *bio, int sectors,
1624 struct bio *split;
1627 BUG_ON(sectors >= bio_sectors(bio));
1630 if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
1633 split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs);
1642 bio_advance(bio, split->bi_iter.bi_size);
1644 if (bio_flagged(bio, BIO_TRACE_COMPLETION))
1652 * bio_trim - trim a bio
1653 * @bio: bio to trim
1654 * @offset: number of sectors to trim from the front of @bio
1655 * @size: size we want to trim @bio to, in sectors
1660 void bio_trim(struct bio *bio, sector_t offset, sector_t size)
1663 offset + size > bio_sectors(bio)))
1667 if (offset == 0 && size == bio->bi_iter.bi_size)
1670 bio_advance(bio, offset << 9);
1671 bio->bi_iter.bi_size = size;
1673 if (bio_integrity(bio))
1674 bio_integrity_trim(bio);
1715 * @pool_size: Number of bio and bio_vecs to cache in the mempool
1716 * @front_pad: Number of bytes to allocate in front of the returned bio
1722 * to ask for a number of bytes to be allocated in front of the bio.
1723 * Front pad allocation is useful for embedding the bio inside
1724 * another structure, to avoid allocating extra data to go with the bio.
1725 * Note that the bio must be embedded at the END of that structure always,
1783 BUILD_BUG_ON(BIO_FLAG_LAST > 8 * sizeof_field(struct bio, bi_flags));
1795 cpuhp_setup_state_multi(CPUHP_BIO_DEAD, "block/bio:dead", NULL,
1800 panic("bio: can't allocate bios\n");
1803 panic("bio: can't create integrity pool\n");